diff -u --recursive --new-file v2.1.113/linux/Makefile linux/Makefile --- v2.1.113/linux/Makefile Mon Aug 3 12:45:43 1998 +++ linux/Makefile Sat Aug 1 11:33:59 1998 @@ -1,6 +1,6 @@ VERSION = 2 PATCHLEVEL = 1 -SUBLEVEL = 113 +SUBLEVEL = 114 ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/) diff -u --recursive --new-file v2.1.113/linux/arch/i386/kernel/bios32.c linux/arch/i386/kernel/bios32.c --- v2.1.113/linux/arch/i386/kernel/bios32.c Tue Jul 28 14:21:07 1998 +++ linux/arch/i386/kernel/bios32.c Mon Aug 3 11:17:02 1998 @@ -1,7 +1,7 @@ /* * bios32.c - Low-Level PCI Access * - * $Id: bios32.c,v 1.42 1998/07/26 09:33:07 mj Exp $ + * $Id: bios32.c,v 1.43 1998/08/03 15:59:20 mj Exp $ * * Copyright 1993, 1994 Drew Eckhardt * Visionary Computing @@ -920,6 +920,13 @@ struct pci_bus *b = &pci_root; int i; + /* + * Don't search for peer host bridges if we use config type 2 + * since it reads bogus values for non-existent busses and + * chipsets supporting multiple primary busses use conf1 anyway. + */ + if (access_pci == &pci_direct_conf2) + return; do { int n = b->subordinate+1; u16 l; @@ -972,8 +979,13 @@ /* * Don't enable VGA-compatible cards since they have * fixed I/O and memory space. + * + * Don't enabled disabled IDE interfaces either because + * some BIOSes may reallocate the same address when they + * find that no devices are attached. */ - if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { + if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) && + ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (has_io && !(cmd & PCI_COMMAND_IO)) { printk("PCI: Enabling I/O for device %02x:%02x\n", diff -u --recursive --new-file v2.1.113/linux/arch/i386/kernel/io_apic.c linux/arch/i386/kernel/io_apic.c --- v2.1.113/linux/arch/i386/kernel/io_apic.c Tue Jul 28 14:21:07 1998 +++ linux/arch/i386/kernel/io_apic.c Mon Aug 3 11:14:41 1998 @@ -7,49 +7,24 @@ * patches and reporting/debugging problems patiently! */ -#include -#include -#include #include -#include -#include -#include -#include -#include -#include #include -#include #include -#include -#include -#include -#include +#include #include #include "irq.h" /* - * volatile is justified in this case, it might change - * spontaneously, GCC should not cache it + * volatile is justified in this case, IO-APIC register contents + * might change spontaneously, GCC should not cache it */ #define IO_APIC_BASE ((volatile int *)fix_to_virt(FIX_IO_APIC_BASE)) -enum mp_irq_source_types { - mp_INT = 0, - mp_NMI = 1, - mp_SMI = 2, - mp_ExtINT = 3 -}; - -enum ioapic_irq_destination_types { - dest_Fixed = 0, - dest_LowestPrio = 1, - dest_ExtINT = 7 -}; - /* * The structure of the IO-APIC: */ + struct IO_APIC_reg_00 { __u32 __reserved_2 : 24, ID : 4, @@ -69,6 +44,17 @@ __reserved_1 : 4; } __attribute__ ((packed)); +/* + * # of IRQ routing registers + */ +int nr_ioapic_registers = 0; + +enum ioapic_irq_destination_types { + dest_Fixed = 0, + dest_LowestPrio = 1, + dest_ExtINT = 7 +}; + struct IO_APIC_route_entry { __u32 vector : 8, delivery_mode : 3, /* 000: FIXED @@ -97,13 +83,17 @@ } __attribute__ ((packed)); -#define UNEXPECTED_IO_APIC() \ - { \ - printk(" WARNING: unexpected IO-APIC, please mail\n"); \ - printk(" to linux-smp@vger.rutgers.edu\n"); \ - } +/* + * MP-BIOS irq configuration table structures: + */ + +enum mp_irq_source_types { + mp_INT = 0, + mp_NMI = 1, + mp_SMI = 2, + mp_ExtINT = 3 +}; -int nr_ioapic_registers = 0; /* # of IRQ routing registers */ int mp_irq_entries = 0; /* # of MP IRQ source entries */ struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; /* MP IRQ source entries */ @@ -116,13 +106,13 @@ */ static int irq_2_pin[NR_IRQS]; -unsigned int io_apic_read (unsigned int reg) +static inline unsigned int io_apic_read(unsigned int reg) { *IO_APIC_BASE = reg; return *(IO_APIC_BASE+4); } -void io_apic_write (unsigned int reg, unsigned int value) +static inline void io_apic_write(unsigned int reg, unsigned int value) { *IO_APIC_BASE = reg; *(IO_APIC_BASE+4) = value; @@ -141,57 +131,57 @@ * We disable IO-APIC IRQs by setting their 'destination CPU mask' to * zero. Trick, trick. */ -void disable_IO_APIC_irq(unsigned int irq) +static inline void disable_IO_APIC_irq(unsigned int irq) { int pin = irq_2_pin[irq]; struct IO_APIC_route_entry entry; if (pin != -1) { - *(((int *)&entry)+1) = io_apic_read(0x11+pin*2); + *(((int *)&entry) + 1) = io_apic_read(0x11 + pin * 2); entry.dest.logical.logical_dest = 0x0; - io_apic_write(0x11+2*pin, *(((int *)&entry)+1)); + io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1)); io_apic_sync(); } } -void enable_IO_APIC_irq(unsigned int irq) +static inline void enable_IO_APIC_irq(unsigned int irq) { int pin = irq_2_pin[irq]; struct IO_APIC_route_entry entry; if (pin != -1) { - *(((int *)&entry)+1) = io_apic_read(0x11+pin*2); + *(((int *)&entry) + 1) = io_apic_read(0x11 + pin * 2); entry.dest.logical.logical_dest = 0xff; - io_apic_write(0x11+2*pin, *(((int *)&entry)+1)); + io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1)); } } -void mask_IO_APIC_irq(unsigned int irq) +static inline void mask_IO_APIC_irq(unsigned int irq) { int pin = irq_2_pin[irq]; struct IO_APIC_route_entry entry; if (pin != -1) { - *(((int *)&entry)+0) = io_apic_read(0x10+pin*2); + *(((int *)&entry) + 0) = io_apic_read(0x10 + pin * 2); entry.mask = 1; - io_apic_write(0x10+2*pin, *(((int *)&entry)+0)); + io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0)); io_apic_sync(); } } -void unmask_IO_APIC_irq(unsigned int irq) +static inline void unmask_IO_APIC_irq(unsigned int irq) { int pin = irq_2_pin[irq]; struct IO_APIC_route_entry entry; if (pin != -1) { - *(((int *)&entry)+0) = io_apic_read(0x10+pin*2); + *(((int *)&entry) + 0) = io_apic_read(0x10 + pin * 2); entry.mask = 0; - io_apic_write(0x10+2*pin, *(((int *)&entry)+0)); + io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0)); } } -void clear_IO_APIC_pin (unsigned int pin) +static void __init clear_IO_APIC_pin(unsigned int pin) { struct IO_APIC_route_entry entry; @@ -200,8 +190,8 @@ */ memset(&entry, 0, sizeof(entry)); entry.mask = 1; - io_apic_write(0x10+2*pin, *(((int *)&entry)+0)); - io_apic_write(0x11+2*pin, *(((int *)&entry)+1)); + io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0)); + io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1)); } @@ -214,30 +204,30 @@ int pirq_entries [MAX_PIRQS]; int pirqs_enabled; -__initfunc(void ioapic_pirq_setup(char *str, int *ints)) +void __init ioapic_pirq_setup(char *str, int *ints) { int i, max; - for (i=0; i IRQ %d\n", i, ints[i+1]); /* * PIRQs are mapped upside down, usually. */ - pirq_entries[MAX_PIRQS-i-1]=ints[i+1]; + pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; } } } @@ -245,11 +235,11 @@ /* * Find the IRQ entry number of a certain pin. */ -__initfunc(static int find_irq_entry(int pin, int type)) +static int __init find_irq_entry(int pin, int type) { int i; - for (i=0; i=16) && (pin<=23)) { + if ((pin >= 16) && (pin <= 23)) { if (pirq_entries[pin-16] != -1) { if (!pirq_entries[pin-16]) { printk("disabling PIRQ%d\n", pin-16); @@ -510,14 +499,14 @@ return irq; } -int IO_APIC_irq_trigger (int irq) +static inline int IO_APIC_irq_trigger(int irq) { int idx, pin; - for (pin=0; pin 0) @@ -541,14 +530,14 @@ return current_vector; } -__initfunc(void setup_IO_APIC_irqs (void)) +void __init setup_IO_APIC_irqs(void) { struct IO_APIC_route_entry entry; - int pin, idx, bus, irq, first_notcon=1; + int pin, idx, bus, irq, first_notcon = 1; printk("init IO_APIC IRQs\n"); - for (pin=0; pin%d ", i, irq_2_pin[i]); printk("\n"); @@ -729,15 +724,15 @@ return; } -__initfunc(static void init_sym_mode (void)) +static void __init init_sym_mode(void) { int i, pin; - for (i=0; ioem_id; table++) + for ( ; table->oem_id ; table++) if ((!strcmp(table->oem_id,ioapic_OEM_ID)) && (!strcmp(table->product_id,ioapic_Product_ID))) return 1; return 0; } -__initfunc(static int ioapic_whitelisted (void)) +static int __init ioapic_whitelisted(void) { /* * Right now, whitelist everything to see whether the new parsing @@ -818,12 +813,12 @@ #endif } -__initfunc(static int ioapic_blacklisted (void)) +static int __init ioapic_blacklisted(void) { return in_ioapic_list(ioapic_blacklist); } -__initfunc(static void setup_ioapic_id (void)) +static void __init setup_ioapic_id(void) { struct IO_APIC_reg_00 reg_00; @@ -857,11 +852,11 @@ panic("could not set ID"); } -__initfunc(static void construct_default_ISA_mptable (void)) +static void __init construct_default_ISA_mptable(void) { - int i, pos=0; + int i, pos = 0; - for (i=0; i<16; i++) { + for (i = 0; i < 16; i++) { if (!IO_APIC_IRQ(i)) continue; @@ -903,14 +898,11 @@ * - if this function detects that timer IRQs are defunct, then we fall * back to ISA timer IRQs */ -__initfunc(static int timer_irq_works (void)) +static int __init timer_irq_works(void) { - unsigned int t1=jiffies; - unsigned long flags; + unsigned int t1 = jiffies; - save_flags(flags); sti(); - udelay(10*10000); if (jiffies-t1>1) @@ -919,8 +911,6 @@ return 0; } -#ifdef __SMP__ - /* * In the SMP+IOAPIC case it might happen that there are an unspecified * number of pending IRQ events unhandled. These cases are very rare, @@ -928,7 +918,7 @@ * better to do it this way as thus we do not have to be aware of * 'pending' interrupts in the IRQ path, except at this point. */ -static inline void self_IPI (unsigned int irq) +static inline void self_IPI(unsigned int irq) { irq_desc_t *desc = irq_desc + irq; @@ -1023,8 +1013,8 @@ irq_exit(cpu, irq); } -static void do_level_ioapic_IRQ (unsigned int irq, int cpu, - struct pt_regs * regs) +static void do_level_ioapic_IRQ(unsigned int irq, int cpu, + struct pt_regs * regs) { irq_desc_t *desc = irq_desc + irq; struct irqaction * action; @@ -1095,7 +1085,7 @@ disable_level_ioapic_irq }; -void init_IO_APIC_traps(void) +static inline void init_IO_APIC_traps(void) { int i; /* @@ -1124,7 +1114,6 @@ } } } -#endif /* * This code may look a bit paranoid, but it's supposed to cooperate with @@ -1132,38 +1121,38 @@ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast * fanatically on his truly buggy board. */ -__initfunc(static void check_timer (void)) +static inline void check_timer(void) { int pin1, pin2; - pin1 = find_timer_pin (mp_INT); - pin2 = find_timer_pin (mp_ExtINT); + pin1 = find_timer_pin(mp_INT); + pin2 = find_timer_pin(mp_ExtINT); - if (!timer_irq_works ()) { + if (!timer_irq_works()) { if (pin1 != -1) printk("..MP-BIOS bug: 8254 timer not connected to IO-APIC\n"); printk("...trying to set up timer as ExtINT... "); if (pin2 != -1) { printk(".. (found pin %d) ...", pin2); - setup_ExtINT_pin (pin2); + setup_ExtINT_pin(pin2); make_8259A_irq(0); } - if (!timer_irq_works ()) { + if (!timer_irq_works()) { printk(" failed.\n"); printk("...trying to set up timer as BP IRQ..."); /* * Just in case ... */ if (pin1 != -1) - clear_IO_APIC_pin (pin1); + clear_IO_APIC_pin(pin1); if (pin2 != -1) - clear_IO_APIC_pin (pin2); + clear_IO_APIC_pin(pin2); make_8259A_irq(0); - if (!timer_irq_works ()) { + if (!timer_irq_works()) { printk(" failed.\n"); panic("IO-APIC + timer doesn't work!"); } @@ -1172,7 +1161,7 @@ } } -__initfunc(void setup_IO_APIC (void)) +void __init setup_IO_APIC(void) { init_sym_mode(); @@ -1216,7 +1205,7 @@ * Set up the IO-APIC IRQ routing table by parsing the MP-BIOS * mptable: */ - setup_IO_APIC_irqs (); + setup_IO_APIC_irqs(); init_IRQ_SMP(); check_timer(); diff -u --recursive --new-file v2.1.113/linux/arch/i386/kernel/irq.h linux/arch/i386/kernel/irq.h --- v2.1.113/linux/arch/i386/kernel/irq.h Fri Jul 31 17:14:26 1998 +++ linux/arch/i386/kernel/irq.h Mon Aug 3 11:06:09 1998 @@ -56,17 +56,11 @@ void mask_irq(unsigned int irq); void unmask_irq(unsigned int irq); -void enable_IO_APIC_irq (unsigned int irq); -void disable_IO_APIC_irq (unsigned int irq); -void unmask_IO_APIC_irq (unsigned int irq); -void mask_IO_APIC_irq (unsigned int irq); void set_8259A_irq_mask (unsigned int irq); int i8259A_irq_pending (unsigned int irq); void ack_APIC_irq (void); void setup_IO_APIC (void); -void init_IO_APIC_traps(void); int IO_APIC_get_PCI_irq_vector (int bus, int slot, int fn); -int IO_APIC_irq_trigger (int irq); void make_8259A_irq (unsigned int irq); void send_IPI (int dest, int vector); void init_pic_mode (void); diff -u --recursive --new-file v2.1.113/linux/arch/i386/kernel/process.c linux/arch/i386/kernel/process.c --- v2.1.113/linux/arch/i386/kernel/process.c Sun Jul 26 11:57:15 1998 +++ linux/arch/i386/kernel/process.c Mon Aug 3 12:40:22 1998 @@ -441,13 +441,26 @@ * * This extra buffer essentially acts to make for less * "jitter" in the allocations.. + * + * On SMP we don't do this right now because: + * - we aren't holding any locks when called, and we might + * as well just depend on the generic memory management + * to do proper locking for us instead of complicating it + * here. + * - if you use SMP you have a beefy enough machine that + * this shouldn't matter.. */ +#ifndef __SMP__ #define EXTRA_TASK_STRUCT 16 static struct task_struct * task_struct_stack[EXTRA_TASK_STRUCT]; static int task_struct_stack_ptr = -1; +#endif struct task_struct * alloc_task_struct(void) { +#ifndef EXTRA_TASK_STRUCT + return (struct task_struct *) __get_free_pages(GFP_KERNEL,1); +#else int index; struct task_struct *ret; @@ -464,16 +477,19 @@ } } return ret; +#endif } void free_task_struct(struct task_struct *p) { +#ifdef EXTRA_TASK_STRUCT int index = task_struct_stack_ptr+1; if (index < EXTRA_TASK_STRUCT) { task_struct_stack[index] = p; task_struct_stack_ptr = index; } else +#endif free_pages((unsigned long) p, 1); } diff -u --recursive --new-file v2.1.113/linux/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c --- v2.1.113/linux/arch/i386/kernel/traps.c Mon Aug 3 12:45:43 1998 +++ linux/arch/i386/kernel/traps.c Mon Aug 3 11:27:32 1998 @@ -108,7 +108,7 @@ unsigned long *stack, addr, module_start, module_end; extern char _stext, _etext; - esp = (unsigned long) ®s->esp; + esp = (unsigned long) (1+regs); ss = __KERNEL_DS; if (regs->xcs & 3) { in_kernel = 0; @@ -169,8 +169,8 @@ printk("\nCode: "); for(i=0;i<20;i++) printk("%02x ", ((unsigned char *)regs->eip)[i]); - printk("\n"); } + printk("\n"); } spinlock_t die_lock; diff -u --recursive --new-file v2.1.113/linux/arch/i386/mm/fault.c linux/arch/i386/mm/fault.c --- v2.1.113/linux/arch/i386/mm/fault.c Mon Aug 3 12:45:43 1998 +++ linux/arch/i386/mm/fault.c Sat Aug 1 10:26:31 1998 @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff -u --recursive --new-file v2.1.113/linux/arch/m68k/amiga/amiga_ksyms.c linux/arch/m68k/amiga/amiga_ksyms.c --- v2.1.113/linux/arch/m68k/amiga/amiga_ksyms.c Mon Aug 3 12:45:44 1998 +++ linux/arch/m68k/amiga/amiga_ksyms.c Sat Aug 1 11:33:50 1998 @@ -1,3 +1,4 @@ +#include #include #include #include diff -u --recursive --new-file v2.1.113/linux/arch/m68k/amiga/pcmcia.c linux/arch/m68k/amiga/pcmcia.c --- v2.1.113/linux/arch/m68k/amiga/pcmcia.c Mon Aug 3 12:45:44 1998 +++ linux/arch/m68k/amiga/pcmcia.c Sat Aug 1 11:33:50 1998 @@ -12,7 +12,6 @@ ** Created: 12/10/97 by Alain Malek */ -#include #include #include #include diff -u --recursive --new-file v2.1.113/linux/drivers/block/nbd.c linux/drivers/block/nbd.c --- v2.1.113/linux/drivers/block/nbd.c Thu May 14 19:47:39 1998 +++ linux/drivers/block/nbd.c Sat Aug 1 19:05:05 1998 @@ -110,8 +110,8 @@ if (result <= 0) { #ifdef PARANOIA - printk(KERN_ERR "NBD: %s - sock=%d at buf=%d, size=%d returned %d.\n", - send ? "send" : "receive", (int) sock, (int) buf, size, result); + printk(KERN_ERR "NBD: %s - sock=%ld at buf=%ld, size=%d returned %d.\n", + send ? "send" : "receive", (long) sock, (long) buf, size, result); #endif break; } @@ -371,8 +371,8 @@ return 0; #ifdef PARANOIA case NBD_PRINT_DEBUG: - printk(KERN_INFO "NBD device %d: head = %x, tail = %x. Global: in %d, out %d\n", - dev, (int) lo->head, (int) lo->tail, requests_in, requests_out); + printk(KERN_INFO "NBD device %d: head = %lx, tail = %lx. Global: in %d, out %d\n", + dev, (long) lo->head, (long) lo->tail, requests_in, requests_out); return 0; #endif } diff -u --recursive --new-file v2.1.113/linux/drivers/net/apne.c linux/drivers/net/apne.c --- v2.1.113/linux/drivers/net/apne.c Mon Aug 3 12:45:45 1998 +++ linux/drivers/net/apne.c Sat Aug 1 11:33:50 1998 @@ -29,7 +29,6 @@ #include -#include #include #include #include diff -u --recursive --new-file v2.1.113/linux/drivers/scsi/bvme6000.c linux/drivers/scsi/bvme6000.c --- v2.1.113/linux/drivers/scsi/bvme6000.c Mon Aug 3 12:45:46 1998 +++ linux/drivers/scsi/bvme6000.c Sat Aug 1 11:33:50 1998 @@ -8,7 +8,6 @@ #include #include #include -#include #include #include diff -u --recursive --new-file v2.1.113/linux/fs/lockd/svc.c linux/fs/lockd/svc.c --- v2.1.113/linux/fs/lockd/svc.c Fri Dec 19 10:54:10 1997 +++ linux/fs/lockd/svc.c Sat Aug 1 17:59:46 1998 @@ -186,7 +186,8 @@ nlm_shutdown_hosts(); nlmsvc_pid = 0; } else - printk("lockd: new process, skipping host shutdown\n"); + printk(KERN_DEBUG + "lockd: new process, skipping host shutdown\n"); wake_up(&lockd_exit); /* Exit the RPC thread */ @@ -205,6 +206,7 @@ int lockd_up(void) { + static int warned = 0; struct svc_serv * serv; int error = 0; @@ -225,27 +227,32 @@ * we should be the first user ... */ if (nlmsvc_users > 1) - printk("lockd_up: no pid, %d users??\n", nlmsvc_users); + printk(KERN_WARNING + "lockd_up: no pid, %d users??\n", nlmsvc_users); error = -ENOMEM; serv = svc_create(&nlmsvc_program, 0, NLMSVC_XDRSIZE); if (!serv) { - printk("lockd_up: create service failed\n"); + printk(KERN_WARNING "lockd_up: create service failed\n"); goto out; } if ((error = svc_makesock(serv, IPPROTO_UDP, 0)) < 0 || (error = svc_makesock(serv, IPPROTO_TCP, 0)) < 0) { - printk("lockd_up: makesock failed, error=%d\n", error); + if (warned++ == 0) + printk(KERN_WARNING + "lockd_up: makesock failed, error=%d\n", error); goto destroy_and_out; - } + } + warned = 0; /* * Create the kernel thread and wait for it to start. */ error = svc_create_thread(lockd, serv); if (error) { - printk("lockd_up: create thread failed, error=%d\n", error); + printk(KERN_WARNING + "lockd_up: create thread failed, error=%d\n", error); goto destroy_and_out; } sleep_on(&lockd_start); @@ -267,17 +274,21 @@ void lockd_down(void) { + static int warned = 0; + down(&nlmsvc_sema); if (nlmsvc_users) { if (--nlmsvc_users) goto out; } else - printk("lockd_down: no users! pid=%d\n", nlmsvc_pid); + printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid); if (!nlmsvc_pid) { - printk("lockd_down: nothing to do!\n"); + if (warned++ == 0) + printk(KERN_WARNING "lockd_down: no lockd running.\n"); goto out; } + warned = 0; kill_proc(nlmsvc_pid, SIGKILL, 1); /* @@ -289,7 +300,8 @@ interruptible_sleep_on(&lockd_exit); current->timeout = 0; if (nlmsvc_pid) { - printk("lockd_down: lockd failed to exit, clearing pid\n"); + printk(KERN_WARNING + "lockd_down: lockd failed to exit, clearing pid\n"); nlmsvc_pid = 0; } spin_lock_irq(¤t->sigmask_lock); diff -u --recursive --new-file v2.1.113/linux/include/asm-alpha/smplock.h linux/include/asm-alpha/smplock.h --- v2.1.113/linux/include/asm-alpha/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-alpha/smplock.h Mon Aug 3 12:28:20 1998 @@ -0,0 +1,49 @@ +/* + * + * + * Default SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + if (!++current->lock_depth) + spin_lock(&kernel_flag); +} + +extern __inline__ void unlock_kernel(void) +{ + if (--current->lock_depth < 0) + spin_unlock(&kernel_flag); +} diff -u --recursive --new-file v2.1.113/linux/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h --- v2.1.113/linux/include/asm-arm/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-arm/smplock.h Mon Aug 3 12:28:20 1998 @@ -0,0 +1,49 @@ +/* + * + * + * Default SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + if (!++current->lock_depth) + spin_lock(&kernel_flag); +} + +extern __inline__ void unlock_kernel(void) +{ + if (--current->lock_depth < 0) + spin_unlock(&kernel_flag); +} diff -u --recursive --new-file v2.1.113/linux/include/asm-generic/smplock.h linux/include/asm-generic/smplock.h --- v2.1.113/linux/include/asm-generic/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-generic/smplock.h Mon Aug 3 12:28:20 1998 @@ -0,0 +1,49 @@ +/* + * + * + * Default SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + if (!++current->lock_depth) + spin_lock(&kernel_flag); +} + +extern __inline__ void unlock_kernel(void) +{ + if (--current->lock_depth < 0) + spin_unlock(&kernel_flag); +} diff -u --recursive --new-file v2.1.113/linux/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h --- v2.1.113/linux/include/asm-i386/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-i386/smplock.h Mon Aug 3 13:06:52 1998 @@ -0,0 +1,59 @@ +/* + * + * + * i386 SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + __asm__ __volatile__( + "incl %1\n\t" + "jne 9f" + spin_lock_string + "\n9:" + :"=m" (__dummy_lock(&kernel_flag)), + "=m" (current->lock_depth)); +} + +extern __inline__ void unlock_kernel(void) +{ + __asm__ __volatile__( + "decl %1\n\t" + "jns 9f\n" + spin_unlock_string + "\n9:" + :"=m" (__dummy_lock(&kernel_flag)), + "=m" (current->lock_depth)); +} diff -u --recursive --new-file v2.1.113/linux/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h --- v2.1.113/linux/include/asm-i386/spinlock.h Sun Jun 7 11:16:36 1998 +++ linux/include/asm-i386/spinlock.h Mon Aug 3 12:34:37 1998 @@ -128,8 +128,7 @@ typedef struct { unsigned long a[100]; } __dummy_lock_t; #define __dummy_lock(lock) (*(__dummy_lock_t *)(lock)) -#define spin_lock(lock) \ -__asm__ __volatile__( \ +#define spin_lock_string \ "\n1:\t" \ "lock ; btsl $0,%0\n\t" \ "jc 2f\n" \ @@ -138,12 +137,19 @@ "testb $1,%0\n\t" \ "jne 2b\n\t" \ "jmp 1b\n" \ - ".previous" \ + ".previous" + +#define spin_unlock_string \ + "lock ; btrl $0,%0" + +#define spin_lock(lock) \ +__asm__ __volatile__( \ + spin_lock_string \ :"=m" (__dummy_lock(lock))) #define spin_unlock(lock) \ __asm__ __volatile__( \ - "lock ; btrl $0,%0" \ + spin_unlock_string \ :"=m" (__dummy_lock(lock))) #define spin_trylock(lock) (!test_and_set_bit(0,(lock))) diff -u --recursive --new-file v2.1.113/linux/include/asm-m68k/smplock.h linux/include/asm-m68k/smplock.h --- v2.1.113/linux/include/asm-m68k/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-m68k/smplock.h Mon Aug 3 12:28:20 1998 @@ -0,0 +1,49 @@ +/* + * + * + * Default SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + if (!++current->lock_depth) + spin_lock(&kernel_flag); +} + +extern __inline__ void unlock_kernel(void) +{ + if (--current->lock_depth < 0) + spin_unlock(&kernel_flag); +} diff -u --recursive --new-file v2.1.113/linux/include/asm-mips/smplock.h linux/include/asm-mips/smplock.h --- v2.1.113/linux/include/asm-mips/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-mips/smplock.h Mon Aug 3 12:28:20 1998 @@ -0,0 +1,49 @@ +/* + * + * + * Default SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + if (!++current->lock_depth) + spin_lock(&kernel_flag); +} + +extern __inline__ void unlock_kernel(void) +{ + if (--current->lock_depth < 0) + spin_unlock(&kernel_flag); +} diff -u --recursive --new-file v2.1.113/linux/include/asm-ppc/smplock.h linux/include/asm-ppc/smplock.h --- v2.1.113/linux/include/asm-ppc/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-ppc/smplock.h Mon Aug 3 12:28:20 1998 @@ -0,0 +1,49 @@ +/* + * + * + * Default SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + if (!++current->lock_depth) + spin_lock(&kernel_flag); +} + +extern __inline__ void unlock_kernel(void) +{ + if (--current->lock_depth < 0) + spin_unlock(&kernel_flag); +} diff -u --recursive --new-file v2.1.113/linux/include/asm-sparc/smplock.h linux/include/asm-sparc/smplock.h --- v2.1.113/linux/include/asm-sparc/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-sparc/smplock.h Mon Aug 3 12:28:20 1998 @@ -0,0 +1,49 @@ +/* + * + * + * Default SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + if (!++current->lock_depth) + spin_lock(&kernel_flag); +} + +extern __inline__ void unlock_kernel(void) +{ + if (--current->lock_depth < 0) + spin_unlock(&kernel_flag); +} diff -u --recursive --new-file v2.1.113/linux/include/asm-sparc64/smplock.h linux/include/asm-sparc64/smplock.h --- v2.1.113/linux/include/asm-sparc64/smplock.h Wed Dec 31 16:00:00 1969 +++ linux/include/asm-sparc64/smplock.h Mon Aug 3 12:28:20 1998 @@ -0,0 +1,49 @@ +/* + * + * + * Default SMP lock implementation + */ +#include +#include + +extern spinlock_t kernel_flag; + +/* + * Release global kernel lock and global interrupt lock + */ +#define release_kernel_lock(task, cpu) \ +do { \ + if (task->lock_depth >= 0) \ + spin_unlock(&kernel_flag); \ + release_irqlock(cpu); \ + __sti(); \ +} while (0) + +/* + * Re-acquire the kernel lock + */ +#define reacquire_kernel_lock(task) \ +do { \ + if (task->lock_depth >= 0) \ + spin_lock(&kernel_flag); \ +} while (0) + + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, + * so we only need to worry about other + * CPU's. + */ +extern __inline__ void lock_kernel(void) +{ + if (!++current->lock_depth) + spin_lock(&kernel_flag); +} + +extern __inline__ void unlock_kernel(void) +{ + if (--current->lock_depth < 0) + spin_unlock(&kernel_flag); +} diff -u --recursive --new-file v2.1.113/linux/include/linux/sched.h linux/include/linux/sched.h --- v2.1.113/linux/include/linux/sched.h Mon Aug 3 12:45:47 1998 +++ linux/include/linux/sched.h Mon Aug 3 12:38:10 1998 @@ -213,9 +213,16 @@ /* various fields */ long counter; long priority; - struct linux_binfmt *binfmt; +/* SMP and runqueue state */ + int has_cpu; + int processor; + int last_processor; + int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */ struct task_struct *next_task, *prev_task; struct task_struct *next_run, *prev_run; + +/* task state */ + struct linux_binfmt *binfmt; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ /* ??? */ @@ -282,18 +289,12 @@ /* memory management info */ struct mm_struct *mm; /* signal handlers */ + spinlock_t sigmask_lock; /* Protects signal and blocked */ struct signal_struct *sig; sigset_t signal, blocked; struct signal_queue *sigqueue, **sigqueue_tail; unsigned long sas_ss_sp; size_t sas_ss_size; -/* SMP state */ - int has_cpu; - int processor; - int last_processor; - int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */ - /* Spinlocks for various pieces or per-task state. */ - spinlock_t sigmask_lock; /* Protects signal and blocked */ }; /* @@ -338,8 +339,9 @@ #define INIT_TASK \ /* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \ /* counter */ DEF_PRIORITY,DEF_PRIORITY, \ -/* binfmt */ NULL, \ +/* SMP */ 0,0,0,-1, \ /* schedlink */ &init_task,&init_task, &init_task, &init_task, \ +/* binfmt */ NULL, \ /* ec,brk... */ 0,0,0,0,0,0, \ /* pid etc.. */ 0,0,0,0,0, \ /* proc links*/ &init_task,&init_task,NULL,NULL,NULL, \ @@ -365,10 +367,7 @@ /* fs */ &init_fs, \ /* files */ &init_files, \ /* mm */ &init_mm, \ -/* signals */ &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, \ - 0, 0, \ -/* SMP */ 0,0,0,0, \ -/* locks */ INIT_LOCKS \ +/* signals */ INIT_LOCKS, &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, 0, 0, \ } union task_union { diff -u --recursive --new-file v2.1.113/linux/include/linux/smp_lock.h linux/include/linux/smp_lock.h --- v2.1.113/linux/include/linux/smp_lock.h Mon Aug 3 12:45:47 1998 +++ linux/include/linux/smp_lock.h Mon Aug 3 12:38:10 1998 @@ -10,60 +10,7 @@ #else -#include -#include - -extern spinlock_t kernel_flag; - -/* - * Release global kernel lock and global interrupt lock - */ -#define release_kernel_lock(task, cpu) \ -do { \ - if (task->lock_depth) \ - spin_unlock(&kernel_flag); \ - release_irqlock(cpu); \ - __sti(); \ -} while (0) - -/* - * Re-acquire the kernel lock - */ -#define reacquire_kernel_lock(task) \ -do { \ - if (task->lock_depth) \ - spin_lock(&kernel_flag); \ -} while (0) - - -/* - * Getting the big kernel lock. - * - * This cannot happen asynchronously, - * so we only need to worry about other - * CPU's. - */ -extern __inline__ void lock_kernel(void) -{ - struct task_struct *tsk = current; - int lock_depth; - - lock_depth = tsk->lock_depth; - tsk->lock_depth = lock_depth+1; - if (!lock_depth) - spin_lock(&kernel_flag); -} - -extern __inline__ void unlock_kernel(void) -{ - struct task_struct *tsk = current; - int lock_depth; - - lock_depth = tsk->lock_depth-1; - tsk->lock_depth = lock_depth; - if (!lock_depth) - spin_unlock(&kernel_flag); -} +#include #endif /* __SMP__ */ diff -u --recursive --new-file v2.1.113/linux/kernel/exit.c linux/kernel/exit.c --- v2.1.113/linux/kernel/exit.c Tue Jul 28 14:21:10 1998 +++ linux/kernel/exit.c Mon Aug 3 12:18:17 1998 @@ -36,21 +36,31 @@ { if (p != current) { #ifdef __SMP__ - /* FIXME! Cheesy, but kills the window... -DaveM */ - do { - barrier(); - } while (p->has_cpu); - spin_unlock_wait(&scheduler_lock); + /* + * Wait to make sure the process isn't active on any + * other CPU + */ + for (;;) { + int has_cpu; + spin_lock(&scheduler_lock); + has_cpu = p->has_cpu; + spin_unlock(&scheduler_lock); + if (!has_cpu) + break; + do { + barrier(); + } while (p->has_cpu); + } #endif charge_uid(p, -1); nr_tasks--; add_free_taskslot(p->tarray_ptr); - { - write_lock_irq(&tasklist_lock); - unhash_pid(p); - REMOVE_LINKS(p); - write_unlock_irq(&tasklist_lock); - } + + write_lock_irq(&tasklist_lock); + unhash_pid(p); + REMOVE_LINKS(p); + write_unlock_irq(&tasklist_lock); + release_thread(p); current->cmin_flt += p->min_flt + p->cmin_flt; current->cmaj_flt += p->maj_flt + p->cmaj_flt; @@ -340,35 +350,39 @@ NORET_TYPE void do_exit(long code) { + struct task_struct *tsk = current; + if (in_interrupt()) printk("Aiee, killing interrupt handler\n"); - if (current == task[0]) + if (!tsk->pid) panic("Attempted to kill the idle task!"); + tsk->flags |= PF_EXITING; + del_timer(&tsk->real_timer); + + lock_kernel(); fake_volatile: - current->flags |= PF_EXITING; #ifdef CONFIG_BSD_PROCESS_ACCT acct_process(code); #endif - del_timer(¤t->real_timer); sem_exit(); - __exit_mm(current); + __exit_mm(tsk); #if CONFIG_AP1000 - exit_msc(current); + exit_msc(tsk); #endif - __exit_files(current); - __exit_fs(current); - __exit_sighand(current); + __exit_files(tsk); + __exit_fs(tsk); + __exit_sighand(tsk); exit_thread(); - current->state = TASK_ZOMBIE; - current->exit_code = code; + tsk->state = TASK_ZOMBIE; + tsk->exit_code = code; exit_notify(); #ifdef DEBUG_PROC_TREE audit_ptree(); #endif - if (current->exec_domain && current->exec_domain->module) - __MOD_DEC_USE_COUNT(current->exec_domain->module); - if (current->binfmt && current->binfmt->module) - __MOD_DEC_USE_COUNT(current->binfmt->module); + if (tsk->exec_domain && tsk->exec_domain->module) + __MOD_DEC_USE_COUNT(tsk->exec_domain->module); + if (tsk->binfmt && tsk->binfmt->module) + __MOD_DEC_USE_COUNT(tsk->binfmt->module); schedule(); /* * In order to get rid of the "volatile function does return" message @@ -388,9 +402,7 @@ asmlinkage int sys_exit(int error_code) { - lock_kernel(); do_exit((error_code&0xff)<<8); - unlock_kernel(); } asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru) diff -u --recursive --new-file v2.1.113/linux/kernel/fork.c linux/kernel/fork.c --- v2.1.113/linux/kernel/fork.c Thu Jul 16 18:09:30 1998 +++ linux/kernel/fork.c Mon Aug 3 12:23:12 1998 @@ -476,7 +476,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs) { int nr; - int error = -ENOMEM; + int retval = -ENOMEM; struct task_struct *p; down(¤t->mm->mmap_sem); @@ -485,7 +485,7 @@ if (!p) goto bad_fork; - error = -EAGAIN; + retval = -EAGAIN; nr = find_empty_process(); if (nr < 0) goto bad_fork_free; @@ -504,8 +504,16 @@ copy_flags(clone_flags, p); p->pid = get_pid(clone_flags); - p->next_run = NULL; - p->prev_run = NULL; + /* + * This is a "shadow run" state. The process + * is marked runnable, but isn't actually on + * any run queue yet.. (that happens at the + * very end). + */ + p->state = TASK_RUNNING; + p->next_run = p; + p->prev_run = p; + p->p_pptr = p->p_opptr = current; p->p_cptr = NULL; init_waitqueue(&p->wait_chldexit); @@ -535,12 +543,13 @@ spin_lock_init(&p->sigmask_lock); } #endif - p->lock_depth = 0; + p->lock_depth = -1; /* -1 = no lock */ p->start_time = jiffies; p->tarray_ptr = &task[nr]; *p->tarray_ptr = p; { + /* This makes it visible to the rest of the system */ unsigned long flags; write_lock_irqsave(&tasklist_lock, flags); SET_LINKS(p); @@ -550,7 +559,7 @@ nr_tasks++; - error = -ENOMEM; + retval = -ENOMEM; /* copy all the process information */ if (copy_files(clone_flags, p)) goto bad_fork_cleanup; @@ -560,8 +569,8 @@ goto bad_fork_cleanup_fs; if (copy_mm(nr, clone_flags, p)) goto bad_fork_cleanup_sighand; - error = copy_thread(nr, clone_flags, usp, p, regs); - if (error) + retval = copy_thread(nr, clone_flags, usp, p, regs); + if (retval) goto bad_fork_cleanup_sighand; p->semundo = NULL; @@ -579,18 +588,18 @@ current->counter >>= 1; p->counter = current->counter; - if(p->pid) { - wake_up_process(p); /* do this last, just in case */ - } else { - p->state = TASK_RUNNING; - p->next_run = p->prev_run = p; + /* Ok, add it to the run-queues, let it rip! */ + retval = p->pid; + if (retval) { + p->next_run = NULL; + p->prev_run = NULL; + wake_up_process(p); /* do this last */ } ++total_forks; - error = p->pid; bad_fork: up(¤t->mm->mmap_sem); unlock_kernel(); - return error; + return retval; bad_fork_cleanup_sighand: exit_sighand(p); diff -u --recursive --new-file v2.1.113/linux/kernel/sched.c linux/kernel/sched.c --- v2.1.113/linux/kernel/sched.c Tue Jul 28 14:21:10 1998 +++ linux/kernel/sched.c Mon Aug 3 11:27:32 1998 @@ -146,14 +146,21 @@ current->need_resched = 1; } - +/* + * Careful! + * + * This has to add the process to the _beginning_ of the + * run-queue, not the end. See the comment about "This is + * subtle" in the scheduler proper.. + */ static inline void add_to_runqueue(struct task_struct * p) { - nr_running++; - reschedule_idle(p); - (p->prev_run = init_task.prev_run)->next_run = p; - p->next_run = &init_task; - init_task.prev_run = p; + struct task_struct *next = init_task.next_run; + + p->prev_run = &init_task; + init_task.next_run = p; + p->next_run = next; + next->prev_run = p; } static inline void del_from_runqueue(struct task_struct * p) @@ -229,8 +236,11 @@ spin_lock_irqsave(&runqueue_lock, flags); p->state = TASK_RUNNING; - if (!p->next_run) + if (!p->next_run) { add_to_runqueue(p); + reschedule_idle(p); + nr_running++; + } spin_unlock_irqrestore(&runqueue_lock, flags); } @@ -420,6 +430,9 @@ ret = detach_timer(timer); timer->next = timer->prev = 0; spin_unlock_irqrestore(&timerlist_lock, flags); + + /* Make sure the timer isn't running in parallell.. */ + synchronize_bh(); return ret; } @@ -1351,8 +1364,8 @@ /* * We play safe to avoid deadlocks. */ - spin_lock_irq(&scheduler_lock); - spin_lock(&runqueue_lock); + spin_lock(&scheduler_lock); + spin_lock_irq(&runqueue_lock); read_lock(&tasklist_lock); p = find_process_by_pid(pid); @@ -1398,8 +1411,8 @@ out_unlock: read_unlock(&tasklist_lock); - spin_unlock(&runqueue_lock); - spin_unlock_irq(&scheduler_lock); + spin_unlock_irq(&runqueue_lock); + spin_unlock(&scheduler_lock); out_nounlock: return retval; @@ -1590,13 +1603,13 @@ else printk(" %016lx ", thread_saved_pc(&p->tss)); #endif -#if 0 - for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) { - if (((unsigned long *)p->kernel_stack_page)[free]) - break; + { + unsigned long * n = (unsigned long *) (p+1); + while (!*n) + n++; + free = (unsigned long) n - (unsigned long)(p+1); } -#endif - printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid); + printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid); if (p->p_cptr) printk("%5d ", p->p_cptr->pid); else