diff -u --recursive --new-file v2.3.32/linux/Makefile linux/Makefile --- v2.3.32/linux/Makefile Tue Dec 14 01:27:23 1999 +++ linux/Makefile Tue Dec 14 01:27:05 1999 @@ -1,6 +1,6 @@ VERSION = 2 PATCHLEVEL = 3 -SUBLEVEL = 32 +SUBLEVEL = 33 EXTRAVERSION = ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/) diff -u --recursive --new-file v2.3.32/linux/arch/alpha/Makefile linux/arch/alpha/Makefile --- v2.3.32/linux/arch/alpha/Makefile Tue Dec 7 09:32:39 1999 +++ linux/arch/alpha/Makefile Tue Dec 14 01:26:55 1999 @@ -29,23 +29,32 @@ # the host compiler might have on by default. Given that EV4 and EV5 # have the same instruction set, prefer EV5 because an EV5 schedule is # more likely to keep an EV4 processor busy than vice-versa. + mcpu_done := ifeq ($(CONFIG_ALPHA_GENERIC),y) CFLAGS := $(CFLAGS) -mcpu=ev5 + mcpu_done := y endif - ifeq ($(CONFIG_ALPHA_EV4),y) - CFLAGS := $(CFLAGS) -mcpu=ev4 - endif - ifeq ($(CONFIG_ALPHA_PYXIS),y) + ifeq ($(mcpu_done)$(CONFIG_ALPHA_PYXIS),y) CFLAGS := $(CFLAGS) -mcpu=ev56 + mcpu_done := y endif - ifeq ($(CONFIG_ALPHA_POLARIS),y) + ifeq ($(mcpu_done)$(CONFIG_ALPHA_POLARIS),y) ifeq ($(have_mcpu_pca56),y) CFLAGS := $(CFLAGS) -mcpu=pca56 else CFLAGS := $(CFLAGS) -mcpu=ev56 endif + mcpu_done := y + endif + ifeq ($(mcpu_done)$(CONFIG_ALPHA_NAUTILUS)$(have_mcpu_ev67),yy) + CFLAGS := $(CFLAGS) -mcpu=ev67 + mcpu_done := y + endif + ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV4),y) + CFLAGS := $(CFLAGS) -mcpu=ev4 + mcpu_done := y endif - ifeq ($(CONFIG_ALPHA_EV6),y) + ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV6),y) ifeq ($(have_mcpu_ev6),y) CFLAGS := $(CFLAGS) -mcpu=ev6 else @@ -55,6 +64,7 @@ CFLAGS := $(CFLAGS) -mcpu=ev56 endif endif + mcpu_done := y endif endif diff -u --recursive --new-file v2.3.32/linux/arch/alpha/kernel/alpha_ksyms.c linux/arch/alpha/kernel/alpha_ksyms.c --- v2.3.32/linux/arch/alpha/kernel/alpha_ksyms.c Tue Dec 7 09:32:39 1999 +++ linux/arch/alpha/kernel/alpha_ksyms.c Tue Dec 14 08:51:10 1999 @@ -36,7 +36,6 @@ extern struct hwrpb_struct *hwrpb; extern void dump_thread(struct pt_regs *, struct user *); extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); -extern void ___delay(void); /* these are C runtime functions with special calling conventions: */ extern void __divl (void); @@ -150,11 +149,6 @@ EXPORT_SYMBOL_NOVERS(__down_failed); EXPORT_SYMBOL_NOVERS(__down_failed_interruptible); EXPORT_SYMBOL_NOVERS(__up_wakeup); - -/* - * This is called specially from __delay. - */ -EXPORT_SYMBOL_NOVERS(___delay); /* * SMP-specific symbols. diff -u --recursive --new-file v2.3.32/linux/arch/alpha/kernel/core_irongate.c linux/arch/alpha/kernel/core_irongate.c --- v2.3.32/linux/arch/alpha/kernel/core_irongate.c Tue Dec 7 09:32:39 1999 +++ linux/arch/alpha/kernel/core_irongate.c Tue Dec 14 01:26:55 1999 @@ -338,6 +338,7 @@ { struct pci_controler *hose; + IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100; irongate_pci_clr_err(); irongate_register_dump(__FUNCTION__); diff -u --recursive --new-file v2.3.32/linux/arch/alpha/kernel/core_mcpcia.c linux/arch/alpha/kernel/core_mcpcia.c --- v2.3.32/linux/arch/alpha/kernel/core_mcpcia.c Tue Dec 7 09:32:39 1999 +++ linux/arch/alpha/kernel/core_mcpcia.c Tue Dec 14 01:26:55 1999 @@ -306,7 +306,7 @@ mb(); draina(); wrmces(7); - mcheck_expected(cpu) = 1; + mcheck_expected(cpu) = 2; /* indicates probing */ mcheck_taken(cpu) = 0; mcheck_extra(cpu) = mid; mb(); @@ -415,7 +415,7 @@ #if 0 tmp = *(vuip)MCPCIA_INT_CTL(mid); - printk("mcpcia_init_arch: INT_CTL was 0x%x\n", tmp); + printk("mcpcia_startup_hose: INT_CTL was 0x%x\n", tmp); *(vuip)MCPCIA_INT_CTL(mid) = 1U; mb(); tmp = *(vuip)MCPCIA_INT_CTL(mid); @@ -548,30 +548,37 @@ struct el_common *mchk_header; struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; unsigned int cpu = smp_processor_id(); + int expected; mchk_header = (struct el_common *)la_ptr; mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; + expected = mcheck_expected(cpu); mb(); mb(); /* magic */ draina(); - if (mcheck_expected(cpu)) { - mcpcia_pci_clr_err(mcheck_extra(cpu)); - } else { + + switch (expected) { + case 0: /* FIXME: how do we figure out which hose the error was on? */ struct pci_controler *hose; for (hose = hose_head; hose; hose = hose->next) mcpcia_pci_clr_err(hose2mid(hose->index)); + break; + case 1: + mcpcia_pci_clr_err(mcheck_extra(cpu)); + break; + default: + /* Otherwise, we're being called from mcpcia_probe_hose + and there's no hose clear an error from. */ + break; } + wrmces(0x7); mb(); - if (mcheck_expected(cpu)) { - process_mcheck_info(vector, la_ptr, regs, "MCPCIA", 1); - } else { - process_mcheck_info(vector, la_ptr, regs, "MCPCIA", 0); - if (vector != 0x620 && vector != 0x630) - mcpcia_print_uncorrectable(mchk_logout); - } + process_mcheck_info(vector, la_ptr, regs, "MCPCIA", expected != 0); + if (!expected && vector != 0x620 && vector != 0x630) + mcpcia_print_uncorrectable(mchk_logout); } diff -u --recursive --new-file v2.3.32/linux/arch/alpha/kernel/head.S linux/arch/alpha/kernel/head.S --- v2.3.32/linux/arch/alpha/kernel/head.S Tue Aug 31 17:29:12 1999 +++ linux/arch/alpha/kernel/head.S Tue Dec 14 08:51:10 1999 @@ -95,21 +95,3 @@ .prologue 0 call_pal PAL_halt .end halt - - # - # Having the delay loop out of line guarantees that we wont - # run into weird alignment conditions (on new processors) - # that vary the speed of the loop. - # - .align 5 - .globl ___delay - .ent ___delay -___delay: - .set noat - .frame $30,0,$28,0 - .prologue 0 -1: subq $0,1,$0 - bge $0,1b - ret $31,($28),0 - .set at - .end ___delay diff -u --recursive --new-file v2.3.32/linux/arch/alpha/kernel/pci.c linux/arch/alpha/kernel/pci.c --- v2.3.32/linux/arch/alpha/kernel/pci.c Tue Dec 7 09:32:39 1999 +++ linux/arch/alpha/kernel/pci.c Tue Dec 14 01:26:55 1999 @@ -58,6 +58,17 @@ } static void __init +quirk_ali_ide_ports(struct pci_dev *dev) +{ + if (dev->resource[0].end == 0xffff) + dev->resource[0].end = dev->resource[0].start + 7; + if (dev->resource[2].end == 0xffff) + dev->resource[2].end = dev->resource[2].start + 7; + if (dev->resource[3].end == 0xffff) + dev->resource[3].end = dev->resource[3].start + 7; +} + +static void __init quirk_vga_enable_rom(struct pci_dev *dev) { /* If it's a VGA, enable its BIOS ROM at C0000. @@ -82,6 +93,8 @@ quirk_eisa_bridge }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge }, + { PCI_FIXUP_HEADER, PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229, + quirk_ali_ide_ports }, { PCI_FIXUP_FINAL, PCI_ANY_ID, PCI_ANY_ID, quirk_vga_enable_rom }, { 0 } }; @@ -131,13 +144,7 @@ /* Align to multiple of size of minimum base. */ alignto = MAX(0x1000, size); start = ALIGN(start, alignto); - if (size > 7 * 16*MB) { - printk(KERN_WARNING "PCI: dev %s " - "requests %ld bytes of contiguous " - "address space---don't use sparse " - "memory accesses on this device!\n", - dev->name, size); - } else { + if (size <= 7 * 16*MB) { if (((start / (16*MB)) & 0x7) == 0) { start &= ~(128*MB - 1); start += 16*MB; diff -u --recursive --new-file v2.3.32/linux/arch/alpha/kernel/sys_nautilus.c linux/arch/alpha/kernel/sys_nautilus.c --- v2.3.32/linux/arch/alpha/kernel/sys_nautilus.c Tue Dec 7 09:32:40 1999 +++ linux/arch/alpha/kernel/sys_nautilus.c Tue Dec 14 08:51:10 1999 @@ -88,13 +88,28 @@ void nautilus_kill_arch(int mode) { - u8 tmp; + switch (mode) { + case LINUX_REBOOT_CMD_RESTART: + { + u8 t8; + pcibios_read_config_byte(0, 0x38, 0x43, &t8); + pcibios_write_config_byte(0, 0x38, 0x43, t8 | 0x80); + outb(1, 0x92); + outb(0, 0x92); + /* NOTREACHED */ + } + break; - if (mode == LINUX_REBOOT_CMD_RESTART) { - pcibios_read_config_byte(0, 0x38, 0x43, &tmp); - pcibios_write_config_byte(0, 0x38, 0x43, tmp | 0x80); - outb(1, 0x92); - outb(0, 0x92); + case LINUX_REBOOT_CMD_POWER_OFF: + { + u32 pmuport; + pcibios_read_config_dword(0, 0x88, 0x10, &pmuport); + pmuport &= 0xfffe; + outl(0xffff, pmuport); /* clear pending events */ + outw(0x2000, pmuport+4); /* power off */ + /* NOTREACHED */ + } + break; } } @@ -435,8 +450,8 @@ Add to that the two levels of severity - correctable or not. */ if (vector == SCB_Q_SYSMCHK - && ((IRONGATE0->dramms & 0x3FF) == 0x300)) { - unsigned long nmi_ctl, temp; + && ((IRONGATE0->dramms & 0x300) == 0x300)) { + unsigned long nmi_ctl; /* Clear ALI NMI */ nmi_ctl = inb(0x61); @@ -445,15 +460,15 @@ nmi_ctl &= ~0x0c; outb(nmi_ctl, 0x61); - temp = IRONGATE0->stat_cmd; - IRONGATE0->stat_cmd = temp; /* write again clears error bits */ + /* Write again clears error bits. */ + IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100; mb(); - temp = IRONGATE0->stat_cmd; /* re-read to force write */ + IRONGATE0->stat_cmd; - temp = IRONGATE0->dramms; - IRONGATE0->dramms = temp; /* write again clears error bits */ + /* Write again clears error bits. */ + IRONGATE0->dramms = IRONGATE0->dramms; mb(); - temp = IRONGATE0->dramms; /* re-read to force write */ + IRONGATE0->dramms; draina(); wrmces(0x7); diff -u --recursive --new-file v2.3.32/linux/arch/alpha/mm/init.c linux/arch/alpha/mm/init.c --- v2.3.32/linux/arch/alpha/mm/init.c Tue Dec 7 09:32:40 1999 +++ linux/arch/alpha/mm/init.c Tue Dec 14 08:52:17 1999 @@ -308,6 +308,7 @@ { max_mapnr = num_physpages = max_low_pfn; totalram_pages += free_all_bootmem(); + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); printk_memory_info(); } diff -u --recursive --new-file v2.3.32/linux/drivers/block/Config.in linux/drivers/block/Config.in --- v2.3.32/linux/drivers/block/Config.in Tue Nov 23 22:42:20 1999 +++ linux/drivers/block/Config.in Tue Dec 14 01:26:55 1999 @@ -57,9 +57,7 @@ bool ' AEC6210 Tuning support (EXPERIMENTAL)' CONFIG_BLK_DEV_AEC6210_TUNING fi if [ "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" ]; then - if [ "$CONFIG_X86" = "y" ]; then - bool ' ALI M15x3 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_ALI15X3 - fi + bool ' ALI M15x3 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_ALI15X3 bool ' CMD646 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_CMD646 bool ' CY82C693 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_CY82C693 fi diff -u --recursive --new-file v2.3.32/linux/drivers/char/agp/agp.h linux/drivers/char/agp/agp.h --- v2.3.32/linux/drivers/char/agp/agp.h Tue Dec 14 01:27:23 1999 +++ linux/drivers/char/agp/agp.h Tue Dec 14 08:52:17 1999 @@ -134,13 +134,13 @@ #endif }; -#define OUTREG32(mmap, addr, val) *(volatile u32 *)(mmap + (addr)) = (val) -#define OUTREG16(mmap, addr, val) *(volatile u16 *)(mmap + (addr)) = (val) -#define OUTREG8 (mmap, addr, val) *(volatile u8 *) (mmap + (addr)) = (val) +#define OUTREG32(mmap, addr, val) __raw_writel((val), (mmap)+(addr)) +#define OUTREG16(mmap, addr, val) __raw_writew((val), (mmap)+(addr)) +#define OUTREG8 (mmap, addr, val) __raw_writeb((val), (mmap)+(addr)) -#define INREG32(mmap, addr) *(volatile u32 *)(mmap + (addr)) -#define INREG16(mmap, addr) *(volatile u16 *)(mmap + (addr)) -#define INREG8 (mmap, addr) *(volatile u8 *) (mmap + (addr)) +#define INREG32(mmap, addr) __raw_readl((mmap)+(addr)) +#define INREG16(mmap, addr) __raw_readw((mmap)+(addr)) +#define INREG8 (mmap, addr) __raw_readb((mmap)+(addr)) #define CACHE_FLUSH agp_bridge.cache_flush #define A_SIZE_8(x) ((aper_size_info_8 *) x) diff -u --recursive --new-file v2.3.32/linux/drivers/char/agp/agpgart_be.c linux/drivers/char/agp/agpgart_be.c --- v2.3.32/linux/drivers/char/agp/agpgart_be.c Tue Dec 14 01:27:24 1999 +++ linux/drivers/char/agp/agpgart_be.c Tue Dec 14 08:52:17 1999 @@ -62,6 +62,26 @@ static struct agp_bridge_data agp_bridge; static int agp_try_unsupported __initdata = 0; + + +static inline void flush_cache(void) +{ +#if defined(__i386__) + asm volatile ("wbinvd":::"memory"); +#elif defined(__alpha__) + /* ??? I wonder if we'll really need to flush caches, or if the + core logic can manage to keep the system coherent. The ARM + speaks only of using `cflush' to get things in memory in + preparation for power failure. + + If we do need to call `cflush', we'll need a target page, + as we can only flush one page at a time. */ + mb(); +#else +#error "Please define flush_cache." +#endif +} + #ifdef __SMP__ static atomic_t cpus_waiting; @@ -87,12 +107,6 @@ #define global_cache_flush flush_cache #endif /* __SMP__ */ -static void flush_cache(void) -{ - asm volatile ("wbinvd":::"memory"); -} - - int agp_backend_acquire(void) { atomic_inc(&agp_bridge.agp_in_use); @@ -1356,6 +1370,7 @@ static int amd_irongate_configure(void) { aper_size_info_32 *current_size; + unsigned long addr; u32 temp; u16 enable_reg; @@ -1389,8 +1404,16 @@ /* Get the address for the gart region */ pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp); - temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); - agp_bridge.gart_bus_addr = temp; + addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); +#ifdef __alpha__ + /* ??? Presumably what is wanted is the bus address as seen + from the CPU side, since it appears that this value is + exported to userland via an ioctl. The terminology below + is confused, mixing `physical address' with `bus address', + as x86 folk are wont to do. */ + addr = virt_to_phys(ioremap(addr, 0)); +#endif + agp_bridge.gart_bus_addr = addr; return 0; } @@ -1894,13 +1917,10 @@ static int agp_find_max(void) { - int memory; - float t; - int index; - int result; + long memory, t, index, result; - memory = virt_to_phys(high_memory) / 0x100000; - index = 0; + memory = virt_to_phys(high_memory) >> 20; + index = 1; while ((memory > maxes_table[index].mem) && (index < 8)) { @@ -1914,8 +1934,8 @@ (t * (maxes_table[index].agp - maxes_table[index - 1].agp)); printk(KERN_INFO "agpgart: Maximum main memory to use " - "for agp memory: %dM\n", result); - result = (result * 0x100000) / 4096; + "for agp memory: %ldM\n", result); + result = result << (20 - PAGE_SHIFT); return result; } diff -u --recursive --new-file v2.3.32/linux/drivers/char/pc_keyb.c linux/drivers/char/pc_keyb.c --- v2.3.32/linux/drivers/char/pc_keyb.c Fri Oct 15 15:25:13 1999 +++ linux/drivers/char/pc_keyb.c Tue Dec 14 11:00:45 1999 @@ -412,6 +412,15 @@ #endif } +static inline void handle_keyboard_event(unsigned char scancode) +{ +#ifdef CONFIG_VT + if (do_acknowledge(scancode)) + handle_scancode(scancode, !(scancode & 0x80)); +#endif + mark_bh(KEYBOARD_BH); +} + /* * This reads the keyboard status port, and does the * appropriate action. @@ -428,20 +437,18 @@ unsigned char scancode; scancode = kbd_read_input(); - if (status & KBD_STAT_MOUSE_OBF) { - handle_mouse_event(scancode); - } else { -#ifdef CONFIG_VT - if (do_acknowledge(scancode)) - handle_scancode(scancode, !(scancode & 0x80)); -#endif - mark_bh(KEYBOARD_BH); + + /* Ignore error bytes */ + if (!(status & (KBD_STAT_GTO | KBD_STAT_PERR))) { + if (status & KBD_STAT_MOUSE_OBF) + handle_mouse_event(scancode); + else + handle_keyboard_event(scancode); } status = kbd_read_status(); - if(!work--) - { + if (!--work) { printk(KERN_ERR "pc_keyb: controller jammed (0x%02X).\n", status); break; diff -u --recursive --new-file v2.3.32/linux/drivers/pci/setup.c linux/drivers/pci/setup.c --- v2.3.32/linux/drivers/pci/setup.c Tue Dec 7 09:32:44 1999 +++ linux/drivers/pci/setup.c Tue Dec 14 08:51:16 1999 @@ -34,10 +34,15 @@ int err; err = -EINVAL; - if (root != NULL) + if (root != NULL) { err = request_resource(root, res); - if (err) { - printk(KERN_ERR "PCI: Address space collision on region %d " + if (err) { + printk(KERN_ERR "PCI: Address space collision on " + "region %d of device %s [%lx:%lx]\n", + resource, dev->name, res->start, res->end); + } + } else { + printk(KERN_ERR "PCI: No parent found for region %d " "of device %s\n", resource, dev->name); } @@ -72,14 +77,14 @@ continue; /* Determine the root we allocate from. */ + res->end -= res->start; + res->start = 0; root = pci_find_parent_resource(dev, res); if (root == NULL) continue; min = (res->flags & IORESOURCE_IO ? min_io : min_mem); - min += root->start; - size = res->end - res->start + 1; - + size = res->end + 1; DBGC((" for root[%lx:%lx] min[%lx] size[%lx]\n", root->start, root->end, min, size)); diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/aha1542.c linux/drivers/scsi/aha1542.c --- v2.3.32/linux/drivers/scsi/aha1542.c Tue Dec 14 01:27:24 1999 +++ linux/drivers/scsi/aha1542.c Tue Dec 14 08:49:00 1999 @@ -46,11 +46,33 @@ #define SCSI_PA(address) virt_to_bus(address) -#define BAD_DMA(msg, address, length) \ - { \ - printk(KERN_CRIT "%s address %p length %d\n", msg, address, length); \ - panic("Buffer at physical address > 16Mb used for aha1542"); \ - } +static void BAD_DMA(void * address, unsigned int length) +{ + printk(KERN_CRIT "buf vaddress %p paddress 0x%lx length %d\n", + address, + SCSI_PA(address), + length); + panic("Buffer at physical address > 16Mb used for aha1542"); +} + +static void BAD_SG_DMA(Scsi_Cmnd * SCpnt, + struct scatterlist * sgpnt, + int nseg, + int badseg) +{ + printk(KERN_CRIT "sgpnt[%d:%d] addr %p/0x%lx alt %p/0x%lx length %d\n", + badseg, nseg, + sgpnt[badseg].address, + SCSI_PA(sgpnt[badseg].address), + sgpnt[badseg].alt_address, + sgpnt[badseg].alt_address ? SCSI_PA(sgpnt[badseg].alt_address) : 0, + sgpnt[badseg].length); + + /* + * Not safe to continue. + */ + panic("Buffer at physical address > 16Mb used for aha1542"); +} #include @@ -655,7 +677,7 @@ }; any2scsi(cptr[i].dataptr, SCSI_PA(sgpnt[i].address)); if(SCSI_PA(sgpnt[i].address+sgpnt[i].length-1) > ISA_DMA_THRESHOLD) - BAD_DMA("sgpnt", sgpnt[i].address, sgpnt[i].length); + BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i); any2scsi(cptr[i].datalen, sgpnt[i].length); }; any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain)); @@ -670,7 +692,7 @@ SCpnt->host_scribble = NULL; any2scsi(ccb[mbo].datalen, bufflen); if(buff && SCSI_PA(buff+bufflen-1) > ISA_DMA_THRESHOLD) - BAD_DMA("buff", buff, bufflen); + BAD_DMA(buff, bufflen); any2scsi(ccb[mbo].dataptr, SCSI_PA(buff)); }; ccb[mbo].idlun = (target&7)<<5 | direction | (lun & 7); /*SCSI Target Id*/ diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/qlogicisp.c linux/drivers/scsi/qlogicisp.c --- v2.3.32/linux/drivers/scsi/qlogicisp.c Tue Dec 7 09:32:46 1999 +++ linux/drivers/scsi/qlogicisp.c Tue Dec 14 08:51:10 1999 @@ -1250,6 +1250,12 @@ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 16); pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); #endif +#ifdef __alpha__ + /* Force ALPHA to use bus I/O and not bus MEM. + This is to avoid having to use HAE_MEM registers, + which is broken on some platforms and with SMP. */ + command &= ~PCI_COMMAND_MEMORY; +#endif if ((command & PCI_COMMAND_MEMORY) && ((mem_flags & 1) == 0)) { diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/scsi.c linux/drivers/scsi/scsi.c --- v2.3.32/linux/drivers/scsi/scsi.c Tue Dec 14 01:27:24 1999 +++ linux/drivers/scsi/scsi.c Tue Dec 14 08:49:00 1999 @@ -1139,12 +1139,18 @@ * to complete */ atomic_inc(&SCpnt->host->host_active); + SCpnt->buffer = NULL; + SCpnt->bufflen = 0; + SCpnt->request_buffer = NULL; + SCpnt->request_bufflen = 0; + SCpnt->use_sg = 0; /* Reset the scatter-gather flag */ SCpnt->old_use_sg = 0; SCpnt->transfersize = 0; /* No default transfer size */ SCpnt->cmd_len = 0; SCpnt->underflow = 0; /* Do not flag underflow conditions */ + SCpnt->resid = 0; SCpnt->state = SCSI_STATE_INITIALIZING; SCpnt->owner = SCSI_OWNER_HIGHLEVEL; @@ -1344,7 +1350,7 @@ * need be held upon entry. The old queueing code the lock was * assumed to be held upon entry. * - * Returns: Pointer to command descriptor. + * Returns: Nothing. * * Notes: Prior to the new queue code, this function was not SMP-safe. * Also, this function is now only used for queueing requests @@ -1482,6 +1488,7 @@ * etc, etc. */ if (!tstatus) { + SCpnt->done_late = 1; return; } /* Set the serial numbers back to zero */ diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/scsi.h linux/drivers/scsi/scsi.h --- v2.3.32/linux/drivers/scsi/scsi.h Tue Dec 14 01:27:24 1999 +++ linux/drivers/scsi/scsi.h Tue Dec 14 11:32:00 1999 @@ -398,6 +398,11 @@ unsigned int *secs); /* + * Prototypes for functions in scsi_merge.c + */ +extern void recount_segments(Scsi_Cmnd * SCpnt); + +/* * Prototypes for functions in scsi_lib.c */ extern void initialize_merge_fn(Scsi_Device * SDpnt); @@ -422,8 +427,6 @@ void (*done) (struct scsi_cmnd *), int timeout, int retries); -extern void scsi_request_fn(request_queue_t * q); - extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int); extern Scsi_Cmnd *scsi_request_queueable(struct request *, Scsi_Device *); @@ -627,6 +630,14 @@ unsigned flags; /* + * Used to indicate that a command which has timed out also + * completed normally. Typically the completion function will + * do nothing but set this flag in this instance because the + * timeout handler is already running. + */ + unsigned done_late:1; + + /* * These two flags are used to track commands that are in the * mid-level queue. The idea is that a command can be there for * one of two reasons - either the host is busy or the device is @@ -635,11 +646,6 @@ */ unsigned host_wait:1; unsigned device_wait:1; - - /* These variables are for the cdrom only. Once we have variable size - * buffers in the buffer cache, they will go away. */ - int this_count; - /* End of special cdrom variables */ /* Low-level done function - can be used by low-level driver to point * to completion function. Not used by mid/upper level code. */ diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/scsi_debug.c linux/drivers/scsi/scsi_debug.c --- v2.3.32/linux/drivers/scsi/scsi_debug.c Tue Dec 14 01:27:24 1999 +++ linux/drivers/scsi/scsi_debug.c Tue Dec 14 13:24:55 1999 @@ -208,6 +208,7 @@ sgcount = 0; sgpnt = NULL; +#ifdef CONFIG_SMP /* * The io_request_lock *must* be held at this point. */ @@ -215,6 +216,7 @@ { printk("Warning - io_request_lock is not held in queuecommand\n"); } +#endif /* * If we are being notified of the mid-level reposessing a command due to timeout, diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/scsi_error.c linux/drivers/scsi/scsi_error.c --- v2.3.32/linux/drivers/scsi/scsi_error.c Tue Dec 14 01:27:24 1999 +++ linux/drivers/scsi/scsi_error.c Tue Dec 14 08:49:00 1999 @@ -117,6 +117,8 @@ SCset->eh_timeout.expires = jiffies + timeout; SCset->eh_timeout.function = (void (*)(unsigned long)) complete; + SCset->done_late = 0; + SCSI_LOG_ERROR_RECOVERY(5, printk("Adding timer for command %p at %d (%p)\n", SCset, timeout, complete)); add_timer(&SCset->eh_timeout); @@ -159,11 +161,14 @@ * * Returns: Nothing. * - * Notes: + * Notes: We do not need to lock this. There is the potential for + * a race only in that the normal completion handling might + * run, but if the normal completion function determines + * that the timer has already fired, then it mustn't do + * anything. */ -static void do_scsi_times_out(Scsi_Cmnd * SCpnt) +void scsi_times_out(Scsi_Cmnd * SCpnt) { - /* * Notify the low-level code that this operation failed and we are * reposessing the command. @@ -219,20 +224,15 @@ * If the host is having troubles, then look to see if this was the last * command that might have failed. If so, wake up the error handler. */ + if( SCpnt->host->eh_wait == NULL ) { + panic("Error handler thread not present at %p %p %s %d", + SCpnt, SCpnt->host, __FILE__, __LINE__); + } if (SCpnt->host->host_busy == SCpnt->host->host_failed) { up(SCpnt->host->eh_wait); } } -void scsi_times_out(Scsi_Cmnd * SCpnt) -{ - unsigned long flags; - - spin_lock_irqsave(&io_request_lock, flags); - do_scsi_times_out(SCpnt); - spin_unlock_irqrestore(&io_request_lock, flags); -} - /* * Function scsi_block_when_processing_errors * @@ -277,8 +277,6 @@ unsigned long flags; int rtn = FAILED; - spin_lock_irqsave(&io_request_lock, flags); - SCpnt->eh_state = SCSI_STATE_TIMEOUT; SCpnt->owner = SCSI_OWNER_LOWLEVEL; @@ -286,8 +284,10 @@ * As far as the low level driver is concerned, this command is still * active, so we must give the low level driver a chance to abort it. (DB) */ + spin_lock_irqsave(&io_request_lock, flags); if (SCpnt->host->hostt->eh_abort_handler) rtn = SCpnt->host->hostt->eh_abort_handler(SCpnt); + spin_unlock_irqrestore(&io_request_lock, flags); SCpnt->request.rq_status = RQ_SCSI_DONE; SCpnt->owner = SCSI_OWNER_ERROR_HANDLER; @@ -298,7 +298,6 @@ up(SCpnt->host->eh_action); else printk("Missing scsi error handler thread\n"); - spin_unlock_irqrestore(&io_request_lock, flags); } @@ -319,6 +318,20 @@ STATIC void scsi_eh_done(Scsi_Cmnd * SCpnt) { + int rtn; + + /* + * If the timeout handler is already running, then just set the + * flag which says we finished late, and return. We have no + * way of stopping the timeout handler from running, so we must + * always defer to it. + */ + rtn = del_timer(&SCpnt->eh_timeout); + if (!rtn) { + SCpnt->done_late = 1; + return; + } + SCpnt->request.rq_status = RQ_SCSI_DONE; SCpnt->owner = SCSI_OWNER_ERROR_HANDLER; @@ -418,7 +431,7 @@ {REQUEST_SENSE, 0, 0, 0, 255, 0}; unsigned char scsi_result0[256], *scsi_result = NULL; - ASSERT_LOCK(&io_request_lock, 1); + ASSERT_LOCK(&io_request_lock, 0); memcpy((void *) SCpnt->cmnd, (void *) generic_sense, sizeof(generic_sense)); @@ -426,7 +439,7 @@ SCpnt->cmnd[1] = SCpnt->lun << 5; scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma) - ? &scsi_result0[0] : scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA); + ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA); if (scsi_result == NULL) { printk("cannot allocate scsi_result in scsi_request_sense.\n"); @@ -455,7 +468,7 @@ sizeof(SCpnt->sense_buffer)); if (scsi_result != &scsi_result0[0] && scsi_result != NULL) - scsi_init_free(scsi_result, 512); + kfree(scsi_result); /* * When we eventually call scsi_finish, we really wish to complete @@ -492,7 +505,7 @@ SCpnt->cmnd[1] = SCpnt->lun << 5; scsi_result = (!SCpnt->host->hostt->unchecked_isa_dma) - ? &scsi_result0[0] : scsi_init_malloc(512, GFP_ATOMIC | GFP_DMA); + ? &scsi_result0[0] : kmalloc(512, GFP_ATOMIC | GFP_DMA); if (scsi_result == NULL) { printk("cannot allocate scsi_result in scsi_test_unit_ready.\n"); @@ -520,7 +533,7 @@ sizeof(SCpnt->sense_buffer)); if (scsi_result != &scsi_result0[0] && scsi_result != NULL) - scsi_init_free(scsi_result, 512); + kfree(scsi_result); /* * When we eventually call scsi_finish, we really wish to complete @@ -552,7 +565,6 @@ } } - void scsi_sleep(int timeout) { DECLARE_MUTEX_LOCKED(sem); @@ -582,9 +594,10 @@ */ STATIC void scsi_send_eh_cmnd(Scsi_Cmnd * SCpnt, int timeout) { + unsigned long flags; struct Scsi_Host *host; - ASSERT_LOCK(&io_request_lock, 1); + ASSERT_LOCK(&io_request_lock, 0); host = SCpnt->host; @@ -608,15 +621,14 @@ SCpnt->host->eh_action = &sem; SCpnt->request.rq_status = RQ_SCSI_BUSY; + spin_lock_irqsave(&io_request_lock, flags); host->hostt->queuecommand(SCpnt, scsi_eh_done); - spin_unlock_irq(&io_request_lock); + spin_unlock_irqrestore(&io_request_lock, flags); + down(&sem); - spin_lock_irq(&io_request_lock); SCpnt->host->eh_action = NULL; - del_timer(&SCpnt->eh_timeout); - /* * See if timeout. If so, tell the host to forget about it. * In other words, we don't want a callback any more. @@ -634,7 +646,10 @@ * protection here, since we would end up waiting in the actual low * level driver, we don't know how to wake it up. */ + spin_lock_irqsave(&io_request_lock, flags); temp = host->hostt->command(SCpnt); + spin_unlock_irqrestore(&io_request_lock, flags); + SCpnt->result = temp; if (scsi_eh_completed_normally(SCpnt)) { SCpnt->eh_state = SUCCESS; @@ -734,6 +749,9 @@ */ STATIC int scsi_try_to_abort_command(Scsi_Cmnd * SCpnt, int timeout) { + int rtn; + unsigned long flags; + SCpnt->eh_state = FAILED; /* Until we come up with something better */ if (SCpnt->host->hostt->eh_abort_handler == NULL) { @@ -748,7 +766,10 @@ SCpnt->owner = SCSI_OWNER_LOWLEVEL; - return SCpnt->host->hostt->eh_abort_handler(SCpnt); + spin_lock_irqsave(&io_request_lock, flags); + rtn = SCpnt->host->hostt->eh_abort_handler(SCpnt); + spin_unlock_irqrestore(&io_request_lock, flags); + return rtn; } /* @@ -767,6 +788,7 @@ */ STATIC int scsi_try_bus_device_reset(Scsi_Cmnd * SCpnt, int timeout) { + unsigned long flags; int rtn; SCpnt->eh_state = FAILED; /* Until we come up with something better */ @@ -776,7 +798,9 @@ } SCpnt->owner = SCSI_OWNER_LOWLEVEL; + spin_lock_irqsave(&io_request_lock, flags); rtn = SCpnt->host->hostt->eh_device_reset_handler(SCpnt); + spin_unlock_irqrestore(&io_request_lock, flags); if (rtn == SUCCESS) SCpnt->eh_state = SUCCESS; @@ -796,6 +820,7 @@ */ STATIC int scsi_try_bus_reset(Scsi_Cmnd * SCpnt) { + unsigned long flags; int rtn; SCpnt->eh_state = FAILED; /* Until we come up with something better */ @@ -805,7 +830,10 @@ if (SCpnt->host->hostt->eh_bus_reset_handler == NULL) { return FAILED; } + + spin_lock_irqsave(&io_request_lock, flags); rtn = SCpnt->host->hostt->eh_bus_reset_handler(SCpnt); + spin_unlock_irqrestore(&io_request_lock, flags); if (rtn == SUCCESS) SCpnt->eh_state = SUCCESS; @@ -814,9 +842,7 @@ * If we had a successful bus reset, mark the command blocks to expect * a condition code of unit attention. */ - spin_unlock_irq(&io_request_lock); scsi_sleep(BUS_RESET_SETTLE_TIME); - spin_lock_irq(&io_request_lock); if (SCpnt->eh_state == SUCCESS) { Scsi_Device *SDloop; for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) { @@ -841,6 +867,7 @@ */ STATIC int scsi_try_host_reset(Scsi_Cmnd * SCpnt) { + unsigned long flags; int rtn; SCpnt->eh_state = FAILED; /* Until we come up with something better */ @@ -850,7 +877,9 @@ if (SCpnt->host->hostt->eh_host_reset_handler == NULL) { return FAILED; } + spin_lock_irqsave(&io_request_lock, flags); rtn = SCpnt->host->hostt->eh_host_reset_handler(SCpnt); + spin_unlock_irqrestore(&io_request_lock, flags); if (rtn == SUCCESS) SCpnt->eh_state = SUCCESS; @@ -859,9 +888,7 @@ * If we had a successful host reset, mark the command blocks to expect * a condition code of unit attention. */ - spin_unlock_irq(&io_request_lock); scsi_sleep(HOST_RESET_SETTLE_TIME); - spin_lock_irq(&io_request_lock); if (SCpnt->eh_state == SUCCESS) { Scsi_Device *SDloop; for (SDloop = SCpnt->host->host_queue; SDloop; SDloop = SDloop->next) { @@ -1258,7 +1285,7 @@ Scsi_Cmnd *SCdone; int timed_out; - ASSERT_LOCK(&io_request_lock, 1); + ASSERT_LOCK(&io_request_lock, 0); SCdone = NULL; @@ -1539,14 +1566,7 @@ * is the case, we are worrying about nothing here. */ - /* - * Due to the spinlock, we will never get out of this - * loop without a proper wait (DB) - */ - spin_unlock_irq(&io_request_lock); scsi_sleep(1 * HZ); - spin_lock_irq(&io_request_lock); - goto next_device; } } @@ -1638,9 +1658,7 @@ * Due to the spinlock, we will never get out of this * loop without a proper wait. (DB) */ - spin_unlock_irq(&io_request_lock); scsi_sleep(1 * HZ); - spin_lock_irq(&io_request_lock); goto next_device2; } @@ -1784,7 +1802,6 @@ struct Scsi_Host *host = (struct Scsi_Host *) data; int rtn; DECLARE_MUTEX_LOCKED(sem); - unsigned long flags; siginitsetinv(¤t->blocked, SHUTDOWN_SIGS); @@ -1828,7 +1845,6 @@ SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler waking up\n")); - spin_lock_irqsave(&io_request_lock, flags); host->eh_active = 1; /* @@ -1843,9 +1859,6 @@ } host->eh_active = 0; - - /* The spinlock is really needed up to this point. (DB) */ - spin_unlock_irqrestore(&io_request_lock, flags); /* * Note - if the above fails completely, the action is to take diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/scsi_lib.c linux/drivers/scsi/scsi_lib.c --- v2.3.32/linux/drivers/scsi/scsi_lib.c Tue Dec 14 01:27:24 1999 +++ linux/drivers/scsi/scsi_lib.c Tue Dec 14 08:49:00 1999 @@ -109,6 +109,7 @@ for (req = q->current_request; req; req = req->next) { if (req->next == NULL) { req->next = &SCpnt->request; + break; } } } @@ -383,6 +384,16 @@ scsi_free(SCpnt->buffer, SCpnt->bufflen); } } + + /* + * Zero these out. They now point to freed memory, and it is + * dangerous to hang onto the pointers. + */ + SCpnt->buffer = NULL; + SCpnt->bufflen = 0; + SCpnt->request_buffer = NULL; + SCpnt->request_bufflen = 0; + /* * Next deal with any sectors which we were able to correctly * handle. @@ -630,9 +641,14 @@ /* * Find the actual device driver associated with this command. * The SPECIAL requests are things like character device or - * ioctls, which did not originate from ll_rw_blk. + * ioctls, which did not originate from ll_rw_blk. Note that + * the special field is also used to indicate the SCpnt for + * the remainder of a partially fulfilled request that can + * come up when there is a medium error. We have to treat + * these two cases differently. We differentiate by looking + * at request.cmd, as this tells us the real story. */ - if (req->special != NULL) { + if (req->cmd == SPECIAL) { STpnt = NULL; SCpnt = (Scsi_Cmnd *) req->special; } else { @@ -643,7 +659,20 @@ /* * Now try and find a command block that we can use. */ - SCpnt = scsi_allocate_device(SDpnt, FALSE); + if( req->special != NULL ) { + SCpnt = (Scsi_Cmnd *) req->special; + /* + * We need to recount the number of + * scatter-gather segments here - the + * normal case code assumes this to be + * correct, as it would be a performance + * lose to always recount. Handling + * errors is always unusual, of course. + */ + recount_segments(SCpnt); + } else { + SCpnt = scsi_allocate_device(SDpnt, FALSE); + } /* * If so, we are ready to do something. Bump the count * while the queue is locked and then break out of the loop. @@ -689,8 +718,9 @@ * in this queue are for the same device. */ q->current_request = req->next; + SCpnt->request.next = NULL; - if (req->special == NULL) { + if (req != &SCpnt->request) { memcpy(&SCpnt->request, req, sizeof(struct request)); /* @@ -702,13 +732,15 @@ wake_up(&wait_for_request); } /* - * Now it is finally safe to release the lock. We are not going - * to noodle the request list until this request has been queued - * and we loop back to queue another. + * Now it is finally safe to release the lock. We are + * not going to noodle the request list until this + * request has been queued and we loop back to queue + * another. */ + req = NULL; spin_unlock_irq(&io_request_lock); - if (req->special == NULL) { + if (SCpnt->request.cmd != SPECIAL) { /* * This will do a couple of things: * 1) Fill in the actual SCSI command. diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/scsi_merge.c linux/drivers/scsi/scsi_merge.c --- v2.3.32/linux/drivers/scsi/scsi_merge.c Tue Dec 14 01:27:24 1999 +++ linux/drivers/scsi/scsi_merge.c Tue Dec 14 08:49:00 1999 @@ -156,7 +156,7 @@ * the DMA threshold boundary. */ if (dma_host && - virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) { + virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) { ret++; } else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) { /* @@ -173,6 +173,43 @@ } /* + * Function: recount_segments() + * + * Purpose: Recount the number of scatter-gather segments for this request. + * + * Arguments: req - request that needs recounting. + * + * Returns: Count of the number of SG segments for the request. + * + * Lock status: Irrelevant. + * + * Notes: This is only used when we have partially completed requests + * and the bit that is leftover is of an indeterminate size. + * This can come up if you get a MEDIUM_ERROR, for example, + * as we will have "completed" all of the sectors up to and + * including the bad sector, and the leftover bit is what + * we have to do now. This tends to be a rare occurence, so + * we aren't busting our butts to instantiate separate versions + * of this function for the 4 different flag values. We + * probably should, however. + */ +void +recount_segments(Scsi_Cmnd * SCpnt) +{ + struct request *req; + struct Scsi_Host *SHpnt; + Scsi_Device * SDpnt; + + req = &SCpnt->request; + SHpnt = SCpnt->host; + SDpnt = SCpnt->device; + + req->nr_segments = __count_segments(req, + CLUSTERABLE_DEVICE(SHpnt, SDpnt), + SHpnt->unchecked_isa_dma); +} + +/* * Function: __scsi_merge_fn() * * Purpose: Prototype for queue merge function. @@ -236,7 +273,7 @@ * the DMA threshold boundary. */ if (dma_host && - virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) { + virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) { goto new_segment; } if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) { @@ -256,7 +293,7 @@ * the DMA threshold boundary. */ if (dma_host && - virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) { + virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) { goto new_segment; } if (CONTIGUOUS_BUFFERS(bh, req->bh)) { @@ -380,7 +417,7 @@ * the DMA threshold boundary. */ if (dma_host && - virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) { + virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) { goto dont_combine; } if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) { @@ -573,7 +610,7 @@ bh; bh = bh->b_reqnext) { if (use_clustering && bhprev != NULL) { if (dma_host && - virt_to_phys(bhprev->b_data - 1) == ISA_DMA_THRESHOLD) { + virt_to_phys(bhprev->b_data) - 1 == ISA_DMA_THRESHOLD) { /* Nothing - fall through */ } else if (CONTIGUOUS_BUFFERS(bhprev, bh)) { /* @@ -612,7 +649,23 @@ for (i = 0; i < count; i++) { SCpnt->request_bufflen += sgpnt[i].length; if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 > - ISA_DMA_THRESHOLD && !sgpnt[count].alt_address) { + ISA_DMA_THRESHOLD) { + if( scsi_dma_free_sectors <= 10 ) { + /* + * If the DMA pool is nearly empty, then + * let's stop here. Don't make this request + * any larger. This is kind of a safety valve + * that we use - we could get screwed later on + * if we run out completely. + */ + SCpnt->request_bufflen -= sgpnt[i].length; + SCpnt->use_sg = i; + if (i == 0) { + panic("DMA pool exhausted"); + } + break; + } + sgpnt[i].alt_address = sgpnt[i].address; sgpnt[i].address = (char *) scsi_malloc(sgpnt[i].length); diff -u --recursive --new-file v2.3.32/linux/drivers/scsi/sg.c linux/drivers/scsi/sg.c --- v2.3.32/linux/drivers/scsi/sg.c Tue Dec 14 01:27:24 1999 +++ linux/drivers/scsi/sg.c Tue Dec 14 08:49:00 1999 @@ -981,8 +981,10 @@ SCSI_LOG_TIMEOUT(3, printk("sg_init\n")); sg_dev_arr = (Sg_device *) - scsi_init_malloc((sg_template.dev_noticed + SG_EXTRA_DEVS) - * sizeof(Sg_device), GFP_ATOMIC); + kmalloc((sg_template.dev_noticed + SG_EXTRA_DEVS) + * sizeof(Sg_device), GFP_ATOMIC); + memset(sg_dev_arr, 0, (sg_template.dev_noticed + SG_EXTRA_DEVS) + * sizeof(Sg_device)); if (NULL == sg_dev_arr) { printk("sg_init: no space for sg_dev_arr\n"); return 1; @@ -1085,9 +1087,7 @@ if(sg_dev_arr != NULL) { /* Really worrying situation of writes still pending and get here */ /* Strategy: shorten timeout on release + wait on detach ... */ - scsi_init_free((char *) sg_dev_arr, - (sg_template.dev_noticed + SG_EXTRA_DEVS) - * sizeof(Sg_device)); + kfree((char *) sg_dev_arr); sg_dev_arr = NULL; } sg_template.dev_max = 0; diff -u --recursive --new-file v2.3.32/linux/drivers/usb/usb-serial.c linux/drivers/usb/usb-serial.c --- v2.3.32/linux/drivers/usb/usb-serial.c Thu Nov 11 20:11:49 1999 +++ linux/drivers/usb/usb-serial.c Tue Dec 14 11:16:22 1999 @@ -285,7 +285,7 @@ unsigned char* data = buffer; int i; - debug_info("USB: serial_read_irq\n"); + debug_info("USB serial: serial_read_irq\n"); #ifdef SERIAL_DEBUG if (count) { @@ -356,10 +356,22 @@ { struct usb_serial_state *serial; - debug_info("USB: serial_open\n"); + debug_info("USB Serial: serial_open\n"); /* assign a serial object to the tty pointer */ serial = &serial_state_table [MINOR(tty->device)-tty->driver.minor_start]; + + /* do some sanity checking that we really have a device present */ + if (!serial) { + debug_info("USB Serial: serial == NULL!\n"); + return (-ENODEV); + } + if (!serial->type) { + debug_info("USB Serial: serial->type == NULL!\n"); + return (-ENODEV); + } + + /* make the tty driver remember our serial object, and us it */ tty->driver_data = serial; serial->tty = tty; @@ -375,13 +387,21 @@ static void serial_close(struct tty_struct *tty, struct file * filp) { struct usb_serial_state *serial = (struct usb_serial_state *) tty->driver_data; - debug_info("USB: serial_close\n"); + debug_info("USB Serial: serial_close\n"); + /* do some sanity checking that we really have a device present */ + if (!serial) { + debug_info("USB Serial: serial == NULL!\n"); + return; + } + if (!serial->type) { + debug_info("USB Serial: serial->type == NULL!\n"); + return; + } if (!serial->present) { debug_info("USB Serial: no device registered\n"); return; } - if (!serial->active) { debug_info ("USB Serial: device already open\n"); return; @@ -400,11 +420,19 @@ debug_info("USB Serial: serial_write\n"); + /* do some sanity checking that we really have a device present */ + if (!serial) { + debug_info("USB Serial: serial == NULL!\n"); + return (-ENODEV); + } + if (!serial->type) { + debug_info("USB Serial: serial->type == NULL!\n"); + return (-ENODEV); + } if (!serial->present) { debug_info("USB Serial: device not registered\n"); return (-EINVAL); } - if (!serial->active) { debug_info ("USB Serial: device not opened\n"); return (-EINVAL); @@ -426,11 +454,19 @@ debug_info("USB Serial: serial_put_char\n"); + /* do some sanity checking that we really have a device present */ + if (!serial) { + debug_info("USB Serial: serial == NULL!\n"); + return; + } + if (!serial->type) { + debug_info("USB Serial: serial->type == NULL!\n"); + return; + } if (!serial->present) { debug_info("USB Serial: no device registered\n"); return; } - if (!serial->active) { debug_info ("USB Serial: device not open\n"); return; @@ -451,11 +487,19 @@ debug_info("USB Serial: serial_write_room\n"); + /* do some sanity checking that we really have a device present */ + if (!serial) { + debug_info("USB Serial: serial == NULL!\n"); + return (-ENODEV); + } + if (!serial->type) { + debug_info("USB Serial: serial->type == NULL!\n"); + return (-ENODEV); + } if (!serial->present) { debug_info("USB Serial: no device registered\n"); return (-EINVAL); } - if (!serial->active) { debug_info ("USB Serial: device not open\n"); return (-EINVAL); @@ -476,11 +520,19 @@ debug_info("USB Serial: serial_chars_in_buffer\n"); + /* do some sanity checking that we really have a device present */ + if (!serial) { + debug_info("USB Serial: serial == NULL!\n"); + return (-ENODEV); + } + if (!serial->type) { + debug_info("USB Serial: serial->type == NULL!\n"); + return (-ENODEV); + } if (!serial->present) { debug_info("USB Serial: no device registered\n"); return (-EINVAL); } - if (!serial->active) { debug_info ("USB Serial: device not open\n"); return (-EINVAL); @@ -501,11 +553,19 @@ debug_info("USB Serial: serial_throttle\n"); + /* do some sanity checking that we really have a device present */ + if (!serial) { + debug_info("USB Serial: serial == NULL!\n"); + return; + } + if (!serial->type) { + debug_info("USB Serial: serial->type == NULL!\n"); + return; + } if (!serial->present) { debug_info("USB Serial: no device registered\n"); return; } - if (!serial->active) { debug_info ("USB Serial: device not open\n"); return; @@ -526,11 +586,19 @@ debug_info("USB Serial: serial_unthrottle\n"); + /* do some sanity checking that we really have a device present */ + if (!serial) { + debug_info("USB Serial: serial == NULL!\n"); + return; + } + if (!serial->type) { + debug_info("USB Serial: serial->type == NULL!\n"); + return; + } if (!serial->present) { debug_info("USB Serial: no device registered\n"); return; } - if (!serial->active) { debug_info ("USB Serial: device not open\n"); return; @@ -553,7 +621,7 @@ { struct usb_serial_state *serial = (struct usb_serial_state *) tty->driver_data; - debug_info("USB: etek_serial_open\n"); + debug_info("USB Serial: etek_serial_open\n"); if (!serial->present) { debug_info("USB Serial: no device registered\n"); @@ -580,7 +648,7 @@ static void etek_serial_close(struct tty_struct *tty, struct file * filp) { struct usb_serial_state *serial = (struct usb_serial_state *) tty->driver_data; - debug_info("USB: etek_serial_close\n"); + debug_info("USB Serial: etek_serial_close\n"); /* Need to change the control lines here */ /* FIXME */ @@ -711,7 +779,7 @@ { struct usb_serial_state *serial = (struct usb_serial_state *) tty->driver_data; - debug_info("USB: generic_serial_open\n"); + debug_info("USB Serial: generic_serial_open\n"); if (!serial->present) { debug_info("USB Serial: no device registered\n"); @@ -738,7 +806,7 @@ static void generic_serial_close(struct tty_struct *tty, struct file * filp) { struct usb_serial_state *serial = (struct usb_serial_state *) tty->driver_data; - debug_info("USB: generic_serial_close\n"); + debug_info("USB Serial: generic_serial_close\n"); /* shutdown any bulk reads that might be going on */ if (serial->bulk_out_inuse){ @@ -865,11 +933,9 @@ struct usb_endpoint_descriptor *interrupt_in_endpoint = NULL; struct usb_endpoint_descriptor *bulk_in_endpoint = NULL; struct usb_endpoint_descriptor *bulk_out_endpoint = NULL; -// SERIAL_TYPE type; struct usb_serial_device_type *type; int device_num; int serial_num; -// int ret; int i; char interrupt_pipe; char bulk_in_pipe; @@ -880,14 +946,14 @@ while (usb_serial_devices[device_num] != NULL) { type = usb_serial_devices[device_num]; #ifdef SERIAL_DEBUG - printk ("Looking at %s\nVendor id=%.4x\nProduct id=%.4x", type->name, *(type->idVendor), *(type->idProduct)); + printk ("USB Serial: Looking at %s\nVendor id=%.4x\nProduct id=%.4x\n", type->name, *(type->idVendor), *(type->idProduct)); #endif /* look at the device descriptor */ if ((dev->descriptor.idVendor == *(type->idVendor)) && (dev->descriptor.idProduct == *(type->idProduct))) { - debug_info("descriptor matches...looking at the endpoints\n") + debug_info("USB Serial: descriptor matches...looking at the endpoints\n") /* descriptor matches, let's try to find the endpoints needed */ interrupt_pipe = bulk_in_pipe = bulk_out_pipe = HAS_NOT; @@ -900,7 +966,7 @@ if ((endpoint->bEndpointAddress & 0x80) && ((endpoint->bmAttributes & 3) == 0x02)) { /* we found a bulk in endpoint */ - debug_info("found bulk in\n"); + debug_info("USB Serial: found bulk in\n"); if (bulk_in_pipe == HAS) { printk("USB Serial: can't have more than one bulk in endpoint\n"); goto probe_error; @@ -912,7 +978,7 @@ if (((endpoint->bEndpointAddress & 0x80) == 0x00) && ((endpoint->bmAttributes & 3) == 0x02)) { /* we found a bulk out endpoint */ - debug_info("found bulk out\n"); + debug_info("USB Serial: found bulk out\n"); if (bulk_out_pipe == HAS) { printk("USB Serial: can't have more than one bulk out endpoint\n"); goto probe_error; @@ -924,7 +990,7 @@ if ((endpoint->bEndpointAddress & 0x80) && ((endpoint->bmAttributes & 3) == 0x03)) { /* we found a interrupt in endpoint */ - debug_info("found interrupt in\n"); + debug_info("USB Serial: found interrupt in\n"); if (interrupt_pipe == HAS) { printk("USB Serial: can't have more than one interrupt in endpoint\n"); goto probe_error; @@ -1005,7 +1071,7 @@ /* set up our interrupt to be the time for the bulk in read */ ret = usb_request_irq (dev, serial->bulk_in_pipe, usb_serial_irq, serial->bulk_in_interval, serial, &serial->irq_handle); if (ret) { - printk(KERN_INFO "USB Serial failed usb_request_irq (0x%x)\n", ret); + printk(KERN_INFO "USB Serial: failed usb_request_irq (0x%x)\n", ret); goto probe_error; } #endif @@ -1015,7 +1081,7 @@ return serial; } else { - printk(KERN_INFO "USB Serial, descriptors matched, but endpoints did not\n"); + printk(KERN_INFO "USB Serial: descriptors matched, but endpoints did not\n"); } } @@ -1023,9 +1089,6 @@ ++device_num; } - - - probe_error: if (serial) { if (serial->bulk_in_buffer) @@ -1073,7 +1136,7 @@ MOD_DEC_USE_COUNT; - printk (KERN_INFO "USB Serial device disconnected.\n"); + printk (KERN_INFO "USB Serial: device disconnected.\n"); } @@ -1138,7 +1201,7 @@ return -1; } - printk(KERN_INFO "USB Serial support registered.\n"); + printk(KERN_INFO "USB Serial: support registered.\n"); return 0; } diff -u --recursive --new-file v2.3.32/linux/drivers/video/dummycon.c linux/drivers/video/dummycon.c --- v2.3.32/linux/drivers/video/dummycon.c Mon Aug 9 10:25:01 1999 +++ linux/drivers/video/dummycon.c Tue Dec 14 08:51:10 1999 @@ -25,7 +25,7 @@ #define DUMMY_ROWS 25 #endif -static const char __init *dummycon_startup(void) +static const char *__init dummycon_startup(void) { return "dummy device"; } diff -u --recursive --new-file v2.3.32/linux/drivers/video/fbcon.c linux/drivers/video/fbcon.c --- v2.3.32/linux/drivers/video/fbcon.c Tue Dec 7 09:32:46 1999 +++ linux/drivers/video/fbcon.c Tue Dec 14 08:51:10 1999 @@ -683,7 +683,7 @@ } if (save) { q = (unsigned short *)(conp->vc_origin + conp->vc_size_row * old_rows); - memcpy(q, save, logo_lines * nr_cols * 2); + scr_memcpyw(q, save, logo_lines * nr_cols * 2); conp->vc_y += logo_lines; conp->vc_pos += logo_lines * conp->vc_size_row; kfree(save); diff -u --recursive --new-file v2.3.32/linux/drivers/video/tgafb.c linux/drivers/video/tgafb.c --- v2.3.32/linux/drivers/video/tgafb.c Tue Nov 23 22:42:21 1999 +++ linux/drivers/video/tgafb.c Tue Dec 14 08:51:16 1999 @@ -937,7 +937,7 @@ static void tgafb_set_disp(const void *fb_par, struct display *disp, struct fb_info_gen *info) { - disp->screen_base = ioremap(fb_info.tga_fb_base, 0); + disp->screen_base = fb_info.tga_fb_base; switch (fb_info.tga_type) { #ifdef FBCON_HAS_CFB8 case 0: /* 8-plane */ @@ -1034,14 +1034,16 @@ pdev = pci_find_device(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TGA, NULL); if (!pdev) return -ENXIO; - fb_info.tga_mem_base = pdev->resource[0].start; + fb_info.tga_mem_base = ioremap(pdev->resource[0].start, 0); + #ifdef DEBUG printk(KERN_DEBUG "tgafb_init: mem_base 0x%x\n", fb_info.tga_mem_base); #endif /* DEBUG */ - fb_info.tga_type = (readl((unsigned long)fb_info.tga_mem_base) >> 12) & 0x0f; - fb_info.tga_regs_base = ((unsigned long)fb_info.tga_mem_base + TGA_REGS_OFFSET); - fb_info.tga_fb_base = ((unsigned long)fb_info.tga_mem_base + fb_offset_presets[fb_info.tga_type]); + fb_info.tga_type = (readl(fb_info.tga_mem_base) >> 12) & 0x0f; + fb_info.tga_regs_base = fb_info.tga_mem_base + TGA_REGS_OFFSET; + fb_info.tga_fb_base = (fb_info.tga_mem_base + + fb_offset_presets[fb_info.tga_type]); /* XXX Why the fuck is it called modename if it identifies the board? */ strcpy (fb_info.gen.info.modename,"DEC 21030 TGA "); diff -u --recursive --new-file v2.3.32/linux/drivers/video/tgafb.h linux/drivers/video/tgafb.h --- v2.3.32/linux/drivers/video/tgafb.h Thu Jul 1 10:57:36 1999 +++ linux/drivers/video/tgafb.h Tue Dec 14 08:51:16 1999 @@ -168,7 +168,7 @@ /* Device dependent information */ int tga_type; /* TGA type: {8plane, 24plane, 24plusZ} */ - unsigned int tga_mem_base; + unsigned long tga_mem_base; unsigned long tga_fb_base; unsigned long tga_regs_base; struct fb_var_screeninfo default_var; /* default video mode */ diff -u --recursive --new-file v2.3.32/linux/fs/ext2/acl.c linux/fs/ext2/acl.c --- v2.3.32/linux/fs/ext2/acl.c Tue Dec 7 09:32:47 1999 +++ linux/fs/ext2/acl.c Tue Dec 14 11:16:22 1999 @@ -15,46 +15,3 @@ * This file will contain the Access Control Lists management for the * second extended file system. */ - - -/* - * ext2_permission () - * - * Check for access rights - */ -int ext2_permission (struct inode * inode, int mask) -{ - unsigned short mode = inode->i_mode; - - /* - * Nobody gets write access to a file on a readonly-fs - */ - if ((mask & S_IWOTH) && - (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) && - IS_RDONLY(inode)) - return -EROFS; - /* - * Nobody gets write access to an immutable file - */ - if ((mask & S_IWOTH) && IS_IMMUTABLE(inode)) - return -EACCES; - - /* - * If no ACL, checks using the file mode - */ - else if (current->fsuid == inode->i_uid) - mode >>= 6; - else if (in_group_p (inode->i_gid)) - mode >>= 3; - /* - * Access is always granted for root. We now check last, - * though, for BSD process accounting correctness - */ - if (((mode & mask & S_IRWXO) == mask) || capable(CAP_DAC_OVERRIDE)) - return 0; - if ((mask == S_IROTH) || - (S_ISDIR(mode) && !(mask & ~(S_IROTH | S_IXOTH)))) - if (capable(CAP_DAC_READ_SEARCH)) - return 0; - return -EACCES; -} diff -u --recursive --new-file v2.3.32/linux/fs/ext2/dir.c linux/fs/ext2/dir.c --- v2.3.32/linux/fs/ext2/dir.c Tue Dec 7 09:32:47 1999 +++ linux/fs/ext2/dir.c Tue Dec 14 11:16:22 1999 @@ -67,7 +67,7 @@ NULL, /* readpage */ NULL, /* writepage */ NULL, /* truncate */ - ext2_permission, /* permission */ + NULL, /* permission */ NULL /* revalidate */ }; diff -u --recursive --new-file v2.3.32/linux/fs/ext2/file.c linux/fs/ext2/file.c --- v2.3.32/linux/fs/ext2/file.c Tue Dec 7 09:32:47 1999 +++ linux/fs/ext2/file.c Tue Dec 14 11:16:22 1999 @@ -177,6 +177,6 @@ block_read_full_page, /* readpage */ block_write_full_page, /* writepage */ ext2_truncate, /* truncate */ - ext2_permission, /* permission */ + NULL, /* permission */ NULL, /* revalidate */ }; diff -u --recursive --new-file v2.3.32/linux/include/asm-alpha/core_irongate.h linux/include/asm-alpha/core_irongate.h --- v2.3.32/linux/include/asm-alpha/core_irongate.h Tue Dec 7 09:32:49 1999 +++ linux/include/asm-alpha/core_irongate.h Tue Dec 14 01:26:55 1999 @@ -12,7 +12,8 @@ * This file is based on: * * IronGate management library, (c) 1999 Alpha Processor, Inc. - * Begun 19 January 1999 by Stig Telfer, Alpha Processor, Inc. + * Copyright (C) 1999 Alpha Processor, Inc., + * (David Daniel, Stig Telfer, Soohoon Lee) */ /* @@ -24,9 +25,9 @@ * */ -/* Eh? Not offset from memory? */ -#define IRONGATE_DMA_WIN_BASE (0U) -#define IRONGATE_DMA_WIN_SIZE (0U) +#define IRONGATE_DMA_WIN_BASE (0UL) +#define IRONGATE_DMA_WIN_SIZE (0UL) + /* * Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access @@ -334,9 +335,7 @@ * Memory spaces: */ -/* ??? the following probably needs fixing */ /* Irongate is consistent with a subset of the Tsunami memory map */ -/* XXX: Do we need to conditionalize on this? */ #ifdef USE_48_BIT_KSEG #define IRONGATE_BIAS 0x80000000000UL #else @@ -348,7 +347,6 @@ #define IRONGATE_IACK_SC (IDENT_ADDR | IRONGATE_BIAS | 0x1F8000000UL) #define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL) #define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL) - #define IRONGATE0 ((Irongate0 *) IRONGATE_CONF) diff -u --recursive --new-file v2.3.32/linux/include/asm-alpha/delay.h linux/include/asm-alpha/delay.h --- v2.3.32/linux/include/asm-alpha/delay.h Tue Aug 31 17:29:14 1999 +++ linux/include/asm-alpha/delay.h Tue Dec 14 08:51:10 1999 @@ -9,16 +9,19 @@ * Delay routines, using a pre-computed "loops_per_second" value. */ +/* We can make the delay loop inline, but we have to be very careful wrt + scheduling for ev6 machines, so that we keep a consistent number of + iterations for all invocations. */ + extern __inline__ void __delay(unsigned long loops) { - register unsigned long r0 __asm__("$0") = loops; -#ifdef MODULE - __asm__ __volatile__("lda $28,___delay; jsr $28,($28),0" - : "=r"(r0) : "r"(r0) : "$28"); -#else - __asm__ __volatile__("bsr $28,___delay" : "=r"(r0) : "r"(r0) : "$28"); -#endif + __asm__ __volatile__( + ".align 4\n" + "1: subq %0,1,%0\n" + " bge %0,1b\n" + " nop" + : "=r" (loops) : "0"(loops)); } /* diff -u --recursive --new-file v2.3.32/linux/include/asm-alpha/io.h linux/include/asm-alpha/io.h --- v2.3.32/linux/include/asm-alpha/io.h Tue Dec 7 09:32:49 1999 +++ linux/include/asm-alpha/io.h Tue Dec 14 08:52:17 1999 @@ -53,10 +53,7 @@ */ static inline unsigned long virt_to_phys(volatile void * address) { - /* Conditionalize this on the CPU? This here is 40 bits, - whereas EV4 only supports 34. But KSEG is farther out - so it shouldn't _really_ matter. */ - return 0xffffffffffUL & (unsigned long) address; + return (unsigned long)address - IDENT_ADDR; } static inline void * phys_to_virt(unsigned long address) @@ -265,6 +262,11 @@ static inline void iounmap(void *addr) { } + +static inline void * ioremap_nocache(unsigned long offset, unsigned long size) +{ + return ioremap(offset, size); +} /* Indirect back to the macros provided. */ diff -u --recursive --new-file v2.3.32/linux/include/asm-alpha/pgtable.h linux/include/asm-alpha/pgtable.h --- v2.3.32/linux/include/asm-alpha/pgtable.h Tue Dec 7 09:32:49 1999 +++ linux/include/asm-alpha/pgtable.h Tue Dec 14 08:52:17 1999 @@ -296,7 +296,7 @@ #define kern_addr_valid(addr) (1) #define io_remap_page_range(start, busaddr, size, prot) \ - remap_page_range(start, virt_to_phys(__ioremap(busaddr)), size, prot) + remap_page_range(start, virt_to_phys(__ioremap(busaddr, 0)), size, prot) #define pte_ERROR(e) \ printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) diff -u --recursive --new-file v2.3.32/linux/include/asm-alpha/system.h linux/include/asm-alpha/system.h --- v2.3.32/linux/include/asm-alpha/system.h Wed Dec 8 14:11:28 1999 +++ linux/include/asm-alpha/system.h Tue Dec 14 08:52:17 1999 @@ -245,6 +245,7 @@ return __r0; \ } +__CALL_PAL_W1(cflush, unsigned long); __CALL_PAL_R0(rdmces, unsigned long); __CALL_PAL_R0(rdps, unsigned long); __CALL_PAL_R0(rdusp, unsigned long); diff -u --recursive --new-file v2.3.32/linux/include/linux/pci_ids.h linux/include/linux/pci_ids.h --- v2.3.32/linux/include/linux/pci_ids.h Wed Dec 8 14:11:28 1999 +++ linux/include/linux/pci_ids.h Tue Dec 14 11:16:22 1999 @@ -146,6 +146,7 @@ #define PCI_DEVICE_ID_NCR_53C875 0x000f #define PCI_DEVICE_ID_NCR_53C1510 0x0010 #define PCI_DEVICE_ID_NCR_53C875J 0x008f +#define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701 #define PCI_VENDOR_ID_ATI 0x1002 #define PCI_DEVICE_ID_ATI_68800 0x4158 @@ -627,6 +628,7 @@ #define PCI_DEVICE_ID_INTERG_1682 0x1682 #define PCI_DEVICE_ID_INTERG_2000 0x2000 #define PCI_DEVICE_ID_INTERG_2010 0x2010 +#define PCI_DEVICE_ID_INTERG_5000 0x5000 #define PCI_VENDOR_ID_REALTEK 0x10ec #define PCI_DEVICE_ID_REALTEK_8029 0x8029 diff -u --recursive --new-file v2.3.32/linux/ipc/util.c linux/ipc/util.c --- v2.3.32/linux/ipc/util.c Tue Dec 7 09:32:51 1999 +++ linux/ipc/util.c Tue Dec 14 10:01:21 1999 @@ -130,7 +130,7 @@ if(ids->seq > ids->seq_max) ids->seq = 0; - ipc_lock(ids,id); + spin_lock(&ids->ary); ids->entries[id].p = new; return id; } diff -u --recursive --new-file v2.3.32/linux/kernel/panic.c linux/kernel/panic.c --- v2.3.32/linux/kernel/panic.c Thu Aug 26 13:05:42 1999 +++ linux/kernel/panic.c Tue Dec 14 08:51:10 1999 @@ -16,10 +16,6 @@ #include #include -#ifdef __alpha__ -#include -#endif - asmlinkage void sys_sync(void); /* it's really int */ extern void unblank_console(void); extern int C_A_D; @@ -77,10 +73,6 @@ } #ifdef __sparc__ printk("Press L1-A to return to the boot prom\n"); -#endif -#ifdef __alpha__ - if (alpha_using_srm) - halt(); #endif sti(); for(;;) { diff -u --recursive --new-file v2.3.32/linux/kernel/resource.c linux/kernel/resource.c --- v2.3.32/linux/kernel/resource.c Tue Dec 7 09:32:52 1999 +++ linux/kernel/resource.c Tue Dec 14 11:16:22 1999 @@ -55,7 +55,7 @@ int retval; fmt = " %08lx-%08lx : %s\n"; - if (root == &ioport_resource) + if (root->end < 0x10000) fmt = " %04lx-%04lx : %s\n"; read_lock(&resource_lock); retval = do_resource_list(root->child, fmt, 8, buf, buf + size) - buf; diff -u --recursive --new-file v2.3.32/linux/mm/mremap.c linux/mm/mremap.c --- v2.3.32/linux/mm/mremap.c Wed Dec 8 14:11:28 1999 +++ linux/mm/mremap.c Tue Dec 14 11:26:54 1999 @@ -165,11 +165,15 @@ * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. + * + * "__new_addr" toying in order to not change the saved stack layout + * for old x86 binaries that don't want %edi to change.. */ asmlinkage unsigned long sys_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr) + unsigned long flags, unsigned long __new_addr) { + unsigned long new_addr = __new_addr; struct vm_area_struct *vma; unsigned long ret = -EINVAL;