diff -u --recursive --new-file v2.4.0/linux/Documentation/Changes linux/Documentation/Changes --- v2.4.0/linux/Documentation/Changes Mon Jan 1 10:00:04 2001 +++ linux/Documentation/Changes Mon Jan 8 15:18:32 2001 @@ -56,7 +56,7 @@ o e2fsprogs 1.19 # tune2fs --version o pcmcia-cs 3.1.21 # cardmgr -V o PPP 2.4.0 # pppd --version -o isdn4k-utils 3.1beta7 # isdnctrl 2>&1|grep version +o isdn4k-utils 3.1pre1 # isdnctrl 2>&1|grep version Kernel compilation ================== diff -u --recursive --new-file v2.4.0/linux/Documentation/Configure.help linux/Documentation/Configure.help --- v2.4.0/linux/Documentation/Configure.help Thu Jan 4 13:00:55 2001 +++ linux/Documentation/Configure.help Mon Jan 8 15:06:01 2001 @@ -14619,6 +14619,14 @@ This enables Van Jacobson header compression for synchronous PPP. Say Y if the other end of the connection supports it. +CONFIG_ISDN_PPP_BSDCOMP + Support for the BSD-Compress compression method for PPP, which uses + the LZW compression method to compress each PPP packet before it is + sent over the wire. The machine at the other end of the PPP link + (usually your ISP) has to support the BSD-Compress compression + method as well for this to be useful. Even if they don't support it, + it is safe to say Y here. + Support audio via ISDN CONFIG_ISDN_AUDIO If you say Y here, the modem-emulator will support a subset of the diff -u --recursive --new-file v2.4.0/linux/Makefile linux/Makefile --- v2.4.0/linux/Makefile Thu Jan 4 13:48:13 2001 +++ linux/Makefile Thu Jan 11 00:23:44 2001 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 -SUBLEVEL = 0 -EXTRAVERSION = +SUBLEVEL = 1 +EXTRAVERSION =-pre2 KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) diff -u --recursive --new-file v2.4.0/linux/arch/i386/Makefile linux/arch/i386/Makefile --- v2.4.0/linux/arch/i386/Makefile Fri Dec 29 14:07:19 2000 +++ linux/arch/i386/Makefile Wed Jan 10 15:06:14 2001 @@ -50,7 +50,7 @@ CFLAGS += -march=i686 endif -ifdef CONFIG_M686FXSR +ifdef CONFIG_MPENTIUMIII CFLAGS += -march=i686 endif diff -u --recursive --new-file v2.4.0/linux/arch/i386/config.in linux/arch/i386/config.in --- v2.4.0/linux/arch/i386/config.in Fri Dec 29 14:35:47 2000 +++ linux/arch/i386/config.in Mon Jan 8 13:27:56 2001 @@ -33,7 +33,7 @@ Pentium-Classic CONFIG_M586TSC \ Pentium-MMX CONFIG_M586MMX \ Pentium-Pro/Celeron/Pentium-II CONFIG_M686 \ - Pentium-III CONFIG_M686FXSR \ + Pentium-III CONFIG_MPENTIUMIII \ Pentium-4 CONFIG_MPENTIUM4 \ K6/K6-II/K6-III CONFIG_MK6 \ Athlon/K7 CONFIG_MK7 \ @@ -45,8 +45,6 @@ # Define implied options from the CPU selection here # -unset CONFIG_X86_FXSR - if [ "$CONFIG_M386" = "y" ]; then define_bool CONFIG_X86_CMPXCHG n define_int CONFIG_X86_L1_CACHE_SHIFT 4 @@ -87,14 +85,12 @@ define_bool CONFIG_X86_PGE y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y fi -if [ "$CONFIG_M686FXSR" = "y" ]; then +if [ "$CONFIG_MPENTIUMIII" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_PGE y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y - define_bool CONFIG_X86_FXSR y - define_bool CONFIG_X86_XMM y fi if [ "$CONFIG_MPENTIUM4" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 7 @@ -102,8 +98,6 @@ define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_PGE y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y - define_bool CONFIG_X86_FXSR y - define_bool CONFIG_X86_XMM y fi if [ "$CONFIG_MK6" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 5 @@ -158,9 +152,7 @@ define_bool CONFIG_X86_PAE y fi -if [ "$CONFIG_X86_FXSR" != "y" ]; then - bool 'Math emulation' CONFIG_MATH_EMULATION -fi +bool 'Math emulation' CONFIG_MATH_EMULATION bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR bool 'Symmetric multi-processing support' CONFIG_SMP if [ "$CONFIG_SMP" != "y" ]; then diff -u --recursive --new-file v2.4.0/linux/arch/i386/defconfig linux/arch/i386/defconfig --- v2.4.0/linux/arch/i386/defconfig Sun Dec 31 09:17:18 2000 +++ linux/arch/i386/defconfig Mon Jan 8 15:05:57 2001 @@ -27,7 +27,7 @@ # CONFIG_M586TSC is not set # CONFIG_M586MMX is not set # CONFIG_M686 is not set -CONFIG_M686FXSR=y +CONFIG_MPENTIUMIII=y # CONFIG_MPENTIUM4 is not set # CONFIG_MK6 is not set # CONFIG_MK7 is not set @@ -45,8 +45,6 @@ CONFIG_X86_GOOD_APIC=y CONFIG_X86_PGE=y CONFIG_X86_USE_PPRO_CHECKSUM=y -CONFIG_X86_FXSR=y -CONFIG_X86_XMM=y # CONFIG_TOSHIBA is not set # CONFIG_MICROCODE is not set # CONFIG_X86_MSR is not set @@ -54,6 +52,7 @@ CONFIG_NOHIGHMEM=y # CONFIG_HIGHMEM4G is not set # CONFIG_HIGHMEM64G is not set +# CONFIG_MATH_EMULATION is not set # CONFIG_MTRR is not set CONFIG_SMP=y CONFIG_HAVE_DEC_LOCK=y diff -u --recursive --new-file v2.4.0/linux/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c --- v2.4.0/linux/arch/i386/kernel/i387.c Fri Nov 3 09:47:48 2000 +++ linux/arch/i386/kernel/i387.c Thu Jan 11 17:12:18 2001 @@ -18,14 +18,6 @@ #include #include -#if defined(CONFIG_X86_FXSR) -#define HAVE_FXSR 1 -#elif defined(CONFIG_X86_RUNTIME_FXSR) -#define HAVE_FXSR (cpu_has_fxsr) -#else -#define HAVE_FXSR 0 -#endif - #ifdef CONFIG_MATH_EMULATION #define HAVE_HWFP (boot_cpu_data.hard_math) #else @@ -35,13 +27,13 @@ /* * The _current_ task is using the FPU for the first time * so initialize it and set the mxcsr to its default - * value at reset if we support FXSR and then + * value at reset if we support XMM instructions and then * remeber the current task has used the FPU. */ void init_fpu(void) { __asm__("fninit"); - if ( HAVE_FXSR ) + if ( cpu_has_xmm ) load_mxcsr(0x1f80); current->used_math = 1; @@ -51,9 +43,9 @@ * FPU lazy state save handling. */ -void save_init_fpu( struct task_struct *tsk ) +static inline void __save_init_fpu( struct task_struct *tsk ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { asm volatile( "fxsave %0 ; fnclex" : "=m" (tsk->thread.i387.fxsave) ); } else { @@ -61,12 +53,28 @@ : "=m" (tsk->thread.i387.fsave) ); } tsk->flags &= ~PF_USEDFPU; +} + +void save_init_fpu( struct task_struct *tsk ) +{ + __save_init_fpu(tsk); stts(); } +void kernel_fpu_begin(void) +{ + struct task_struct *tsk = current; + + if (tsk->flags & PF_USEDFPU) { + __save_init_fpu(tsk); + return; + } + clts(); +} + void restore_fpu( struct task_struct *tsk ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { asm volatile( "fxrstor %0" : : "m" (tsk->thread.i387.fxsave) ); } else { @@ -144,7 +152,7 @@ unsigned short get_fpu_cwd( struct task_struct *tsk ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { return tsk->thread.i387.fxsave.cwd; } else { return (unsigned short)tsk->thread.i387.fsave.cwd; @@ -153,7 +161,7 @@ unsigned short get_fpu_swd( struct task_struct *tsk ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { return tsk->thread.i387.fxsave.swd; } else { return (unsigned short)tsk->thread.i387.fsave.swd; @@ -162,7 +170,7 @@ unsigned short get_fpu_twd( struct task_struct *tsk ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { return tsk->thread.i387.fxsave.twd; } else { return (unsigned short)tsk->thread.i387.fsave.twd; @@ -171,7 +179,7 @@ unsigned short get_fpu_mxcsr( struct task_struct *tsk ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { return tsk->thread.i387.fxsave.mxcsr; } else { return 0x1f80; @@ -180,7 +188,7 @@ void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { tsk->thread.i387.fxsave.cwd = cwd; } else { tsk->thread.i387.fsave.cwd = ((long)cwd | 0xffff0000); @@ -189,7 +197,7 @@ void set_fpu_swd( struct task_struct *tsk, unsigned short swd ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { tsk->thread.i387.fxsave.swd = swd; } else { tsk->thread.i387.fsave.swd = ((long)swd | 0xffff0000); @@ -198,7 +206,7 @@ void set_fpu_twd( struct task_struct *tsk, unsigned short twd ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { tsk->thread.i387.fxsave.twd = twd_i387_to_fxsr(twd); } else { tsk->thread.i387.fsave.twd = ((long)twd | 0xffff0000); @@ -207,7 +215,7 @@ void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr ) { - if ( HAVE_FXSR ) { + if ( cpu_has_xmm ) { tsk->thread.i387.fxsave.mxcsr = mxcsr; } } @@ -321,7 +329,7 @@ current->used_math = 0; if ( HAVE_HWFP ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { return save_i387_fxsave( buf ); } else { return save_i387_fsave( buf ); @@ -354,7 +362,7 @@ int err; if ( HAVE_HWFP ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { err = restore_i387_fxsave( buf ); } else { err = restore_i387_fsave( buf ); @@ -387,7 +395,7 @@ int get_fpregs( struct user_i387_struct *buf, struct task_struct *tsk ) { if ( HAVE_HWFP ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { return get_fpregs_fxsave( buf, tsk ); } else { return get_fpregs_fsave( buf, tsk ); @@ -415,7 +423,7 @@ int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf ) { if ( HAVE_HWFP ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { return set_fpregs_fxsave( tsk, buf ); } else { return set_fpregs_fsave( tsk, buf ); @@ -428,9 +436,10 @@ int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk ) { - if ( HAVE_FXSR ) { - __copy_to_user( (void *)buf, &tsk->thread.i387.fxsave, - sizeof(struct user_fxsr_struct) ); + if ( cpu_has_fxsr ) { + if (__copy_to_user( (void *)buf, &tsk->thread.i387.fxsave, + sizeof(struct user_fxsr_struct) )) + return -EFAULT; return 0; } else { return -EIO; @@ -439,7 +448,7 @@ int set_fpxregs( struct task_struct *tsk, struct user_fxsr_struct *buf ) { - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { __copy_from_user( &tsk->thread.i387.fxsave, (void *)buf, sizeof(struct user_fxsr_struct) ); /* mxcsr bit 6 and 31-16 must be zero for security reasons */ @@ -485,7 +494,7 @@ fpvalid = tsk->used_math; if ( fpvalid ) { unlazy_fpu( tsk ); - if ( HAVE_FXSR ) { + if ( cpu_has_fxsr ) { copy_fpu_fxsave( tsk, fpu ); } else { copy_fpu_fsave( tsk, fpu ); @@ -500,7 +509,7 @@ int fpvalid; struct task_struct *tsk = current; - fpvalid = tsk->used_math && HAVE_FXSR; + fpvalid = tsk->used_math && cpu_has_fxsr; if ( fpvalid ) { unlazy_fpu( tsk ); memcpy( fpu, &tsk->thread.i387.fxsave, diff -u --recursive --new-file v2.4.0/linux/arch/i386/kernel/setup.c linux/arch/i386/kernel/setup.c --- v2.4.0/linux/arch/i386/kernel/setup.c Sun Dec 31 10:26:18 2000 +++ linux/arch/i386/kernel/setup.c Thu Jan 11 17:35:25 2001 @@ -147,6 +147,7 @@ extern unsigned long cpu_khz; static int disable_x86_serial_nr __initdata = 1; +static int disable_x86_fxsr __initdata = 0; /* * This is set up by the setup-routine at boot-time @@ -518,7 +519,7 @@ e820.nr_map = 0; add_memory_region(0, LOWMEMSIZE(), E820_RAM); - add_memory_region(HIGH_MEMORY, (mem_size << 10) - HIGH_MEMORY, E820_RAM); + add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM); } printk("BIOS-provided physical RAM map:\n"); print_memory_map(who); @@ -1796,6 +1797,13 @@ } __setup("serialnumber", x86_serial_nr_setup); +int __init x86_fxsr_setup(char * s) +{ + disable_x86_fxsr = 1; + return 1; +} +__setup("nofxsr", x86_fxsr_setup); + /* Standard macro to see if a specific flag is changeable */ static inline int flag_is_changeable_p(u32 flag) @@ -2004,6 +2012,12 @@ if ( tsc_disable ) clear_bit(X86_FEATURE_TSC, &c->x86_capability); #endif + + /* FXSR disabled? */ + if (disable_x86_fxsr) { + clear_bit(X86_FEATURE_FXSR, &c->x86_capability); + clear_bit(X86_FEATURE_XMM, &c->x86_capability); + } /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); diff -u --recursive --new-file v2.4.0/linux/arch/i386/lib/mmx.c linux/arch/i386/lib/mmx.c --- v2.4.0/linux/arch/i386/lib/mmx.c Wed Nov 8 17:09:49 2000 +++ linux/arch/i386/lib/mmx.c Thu Jan 11 17:42:24 2001 @@ -2,6 +2,8 @@ #include #include +#include + /* * MMX 3DNow! library helper functions * @@ -26,13 +28,7 @@ void *p=to; int i= len >> 6; /* len/64 */ - if (!(current->flags & PF_USEDFPU)) - clts(); - else - { - __asm__ __volatile__ ( " fnsave %0; fwait\n"::"m"(current->thread.i387)); - current->flags &= ~PF_USEDFPU; - } + kernel_fpu_begin(); __asm__ __volatile__ ( "1: prefetch (%0)\n" /* This set is 28 bytes */ @@ -88,20 +84,15 @@ * Now do the tail of the block */ __memcpy(to, from, len&63); - stts(); + kernel_fpu_end(); return p; } static void fast_clear_page(void *page) { int i; - if (!(current->flags & PF_USEDFPU)) - clts(); - else - { - __asm__ __volatile__ ( " fnsave %0; fwait\n"::"m"(current->thread.i387)); - current->flags &= ~PF_USEDFPU; - } + + kernel_fpu_begin(); __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : @@ -127,19 +118,14 @@ __asm__ __volatile__ ( " sfence \n" : : ); - stts(); + kernel_fpu_end(); } static void fast_copy_page(void *to, void *from) { int i; - if (!(current->flags & PF_USEDFPU)) - clts(); - else - { - __asm__ __volatile__ ( " fnsave %0; fwait\n"::"m"(current->thread.i387)); - current->flags &= ~PF_USEDFPU; - } + + kernel_fpu_begin(); /* maybe the prefetch stuff can go before the expensive fnsave... * but that is for later. -AV @@ -199,7 +185,7 @@ __asm__ __volatile__ ( " sfence \n" : : ); - stts(); + kernel_fpu_end(); } /* diff -u --recursive --new-file v2.4.0/linux/drivers/isdn/hisax/Makefile linux/drivers/isdn/hisax/Makefile --- v2.4.0/linux/drivers/isdn/hisax/Makefile Fri Dec 29 14:40:54 2000 +++ linux/drivers/isdn/hisax/Makefile Mon Jan 8 15:06:01 2001 @@ -34,8 +34,8 @@ hisax-objs-$(CONFIG_HISAX_ASUSCOM) += asuscom.o isac.o arcofi.o hscx.o hisax-objs-$(CONFIG_HISAX_TELEINT) += teleint.o isac.o arcofi.o hfc_2bs0.o hisax-objs-$(CONFIG_HISAX_SEDLBAUER) += sedlbauer.o isac.o arcofi.o hscx.o isar.o -hisax-objs-$(CONFIG_HISAX_SPORTSTER) += sportster.o isac.o arcofi.o hfc_2bs0.o -hisax-objs-$(CONFIG_HISAX_MIC) += mic.o isac.o arcofi.o hfc_2bs0.o +hisax-objs-$(CONFIG_HISAX_SPORTSTER) += sportster.o isac.o arcofi.o hscx.o +hisax-objs-$(CONFIG_HISAX_MIC) += mic.o isac.o arcofi.o hscx.o hisax-objs-$(CONFIG_HISAX_NETJET) += nj_s.o netjet.o isac.o arcofi.o hisax-objs-$(CONFIG_HISAX_NETJET_U) += nj_u.o netjet.o icc.o hisax-objs-$(CONFIG_HISAX_HFCS) += hfcscard.o hfc_2bds0.o diff -u --recursive --new-file v2.4.0/linux/drivers/isdn/hisax/isdnl3.c linux/drivers/isdn/hisax/isdnl3.c --- v2.4.0/linux/drivers/isdn/hisax/isdnl3.c Tue Nov 28 21:43:13 2000 +++ linux/drivers/isdn/hisax/isdnl3.c Mon Jan 8 15:19:34 2001 @@ -566,7 +566,7 @@ } else { struct sk_buff *skb = arg; - skb_queue_head(&st->l3.squeue, skb); + skb_queue_tail(&st->l3.squeue, skb); FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL); } break; diff -u --recursive --new-file v2.4.0/linux/drivers/isdn/hisax/md5sums.asc linux/drivers/isdn/hisax/md5sums.asc --- v2.4.0/linux/drivers/isdn/hisax/md5sums.asc Thu Jan 4 13:20:17 2001 +++ linux/drivers/isdn/hisax/md5sums.asc Wed Jan 10 14:12:53 2001 @@ -10,7 +10,7 @@ ca7bd9bac39203f3074f3f093948cc3c isac.c a2ad619fd404b3149099a2984de9d23c isdnl1.c d2a78e407f3d94876deac160c6f9aae6 isdnl2.c -a109841c2e75b11fc8ef2c8718e24c3e isdnl3.c +e7932ca7ae39c497c17f13a2e1434fcd isdnl3.c afb5f2f4ac296d6de45c856993b161e1 tei.c 00023e2a482cb86a26ea870577ade5d6 callc.c a1834e9b2ec068440cff2e899eff4710 cert.c @@ -25,9 +25,9 @@ Version: 2.6.3i Charset: noconv -iQCVAwUBOlMTgDpxHvX/mS9tAQFSbgP/W9y6tnnWHTRLGqyr3EY1OHZiQXERkAAu -hp+Y8PIoX1GgAh4yZ7xhYwUsk6y0z5USdGuhC9ZHh+oZd57lPsJMnhkEZR5BVsYT -r7jHwelP527+QCLkVUCHIVIWUW0ANzeZBhDV2vefkFb+gWLiZsBhaHssbcKGsMNG -Ak4xS1ByqsM= -=lsIJ +iQCVAwUBOlxeLTpxHvX/mS9tAQH6RwP8DhyvqAnXFV6WIGi16iQ3vKikkPoqnDQs +GEn5uCW0dPYKlwthD2Grj/JbMYZhOmCFuDxF7ufJnjTSDe/D8XNe2wngxzAiwcIe +WjCrT8X95cuP3HZHscbFTEinVV0GAnoI0ZEgs5eBDhVHDqILLYMaTFBQaRH3jgXc +i5VH88jPfUM= +=qc+J -----END PGP SIGNATURE----- diff -u --recursive --new-file v2.4.0/linux/drivers/isdn/isdn_common.c linux/drivers/isdn/isdn_common.c --- v2.4.0/linux/drivers/isdn/isdn_common.c Tue Jan 2 16:45:38 2001 +++ linux/drivers/isdn/isdn_common.c Mon Jan 8 15:06:01 2001 @@ -1512,7 +1512,7 @@ int i; if ((ret = verify_area(VERIFY_READ, (void *) arg, - (ISDN_MODEM_NUMREG + ISDN_MSNLEN) + (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS))) return ret; @@ -1521,6 +1521,9 @@ ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; + if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN)) + return -EFAULT; + p += ISDN_LMSNLEN; if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; diff -u --recursive --new-file v2.4.0/linux/drivers/isdn/isdn_net.c linux/drivers/isdn/isdn_net.c --- v2.4.0/linux/drivers/isdn/isdn_net.c Fri Dec 29 14:07:22 2000 +++ linux/drivers/isdn/isdn_net.c Mon Jan 8 15:06:01 2001 @@ -2325,6 +2325,7 @@ memset(netdev, 0, sizeof(isdn_net_dev)); if (!(netdev->local = (isdn_net_local *) kmalloc(sizeof(isdn_net_local), GFP_KERNEL))) { printk(KERN_WARNING "isdn_net: Could not allocate device locals\n"); + kfree(netdev); return NULL; } memset(netdev->local, 0, sizeof(isdn_net_local)); diff -u --recursive --new-file v2.4.0/linux/drivers/isdn/isdn_ppp.c linux/drivers/isdn/isdn_ppp.c --- v2.4.0/linux/drivers/isdn/isdn_ppp.c Tue Nov 28 21:43:13 2000 +++ linux/drivers/isdn/isdn_ppp.c Mon Jan 8 15:20:19 2001 @@ -1131,9 +1131,9 @@ proto = PPP_IPX; /* untested */ break; default: - dev_kfree_skb(skb); printk(KERN_ERR "isdn_ppp: skipped unsupported protocol: %#x.\n", skb->protocol); + dev_kfree_skb(skb); return 0; } diff -u --recursive --new-file v2.4.0/linux/drivers/net/3c59x.c linux/drivers/net/3c59x.c --- v2.4.0/linux/drivers/net/3c59x.c Tue Nov 14 11:34:25 2000 +++ linux/drivers/net/3c59x.c Sat Jan 6 09:27:42 2001 @@ -118,6 +118,14 @@ LK1.1.11 13 Nov 2000 andrewm - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER + LK1.1.12 1 Jan 2001 andrewm + - Call pci_enable_device before we request our IRQ (Tobias Ringstrom) + - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra) + - Added extended wait_for_completion for the 3c905CX. + - Look for an MII on PHY index 24 first (3c905CX oddity). + - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger) + - Don't free skbs we don't own on oom path in vortex_open(). + - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details. - Also see Documentation/networking/vortex.txt */ @@ -203,7 +211,7 @@ #include static char version[] __devinitdata = -"3c59x.c:LK1.1.11 13 Nov 2000 Donald Becker and others. http://www.scyld.com/network/vortex.html " "$Revision: 1.102.2.46 $\n"; +"3c59x.c:LK1.1.12 06 Jan 2000 Donald Becker and others. http://www.scyld.com/network/vortex.html " "$Revision: 1.102.2.46 $\n"; MODULE_AUTHOR("Donald Becker "); MODULE_DESCRIPTION("3Com 3c59x/3c90x/3c575 series Vortex/Boomerang/Cyclone driver"); @@ -424,7 +432,7 @@ PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, }, {"3cSOHO100-TX Hurricane", - PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, }, + PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, }, {"3c555 Laptop Hurricane", PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT, 128, }, {"3c556 Laptop Tornado", @@ -843,10 +851,15 @@ { int rc; - rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq, - ent->driver_data, vortex_cards_found); - if (rc == 0) - vortex_cards_found++; + /* wake up and enable device */ + if (pci_enable_device (pdev)) { + rc = -EIO; + } else { + rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq, + ent->driver_data, vortex_cards_found); + if (rc == 0) + vortex_cards_found++; + } return rc; } @@ -863,7 +876,7 @@ struct vortex_private *vp; int option; unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ - int i; + int i, step; struct net_device *dev; static int printed_version; int retval; @@ -889,7 +902,6 @@ vci->name, ioaddr); - /* private struct aligned and zeroed by init_etherdev */ vp = dev->priv; dev->base_addr = ioaddr; dev->irq = irq; @@ -908,19 +920,29 @@ if (pdev) { /* EISA resources already marked, so only PCI needs to do this here */ /* Ignore return value, because Cardbus drivers already allocate for us */ - if (request_region(ioaddr, vci->io_size, dev->name) != NULL) { + if (request_region(ioaddr, vci->io_size, dev->name) != NULL) vp->must_free_region = 1; - } - - /* wake up and enable device */ - if (pci_enable_device (pdev)) { - retval = -EIO; - goto free_region; - } /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master (pdev); + + if (vci->drv_flags & IS_VORTEX) { + u8 pci_latency; + u8 new_latency = 248; + + /* Check the PCI latency value. On the 3c590 series the latency timer + must be set to the maximum value to avoid data corruption that occurs + when the timer expires during a transfer. This bug exists the Vortex + chip only. */ + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); + if (pci_latency < new_latency) { + printk(KERN_INFO "%s: Overriding PCI latency" + " timer (CFLT) setting of %d, new value is %d.\n", + dev->name, pci_latency, new_latency); + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); + } + } } spin_lock_init(&vp->lock); @@ -1025,6 +1047,13 @@ dev->irq); #endif + EL3WINDOW(4); + step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1; + printk(KERN_INFO " product code '%c%c' rev %02x.%d date %02d-" + "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], + step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); + + if (pdev && vci->drv_flags & HAS_CB_FNS) { unsigned long fn_st_addr; /* Cardbus function status space */ unsigned short n; @@ -1089,8 +1118,19 @@ mii_preamble_required++; mii_preamble_required++; mdio_read(dev, 24, 1); - for (phy = 1; phy <= 32 && phy_idx < sizeof(vp->phys); phy++) { - int mii_status, phyx = phy & 0x1f; + for (phy = 0; phy < 32 && phy_idx < 1; phy++) { + int mii_status, phyx; + + /* + * For the 3c905CX we look at index 24 first, because it bogusly + * reports an external PHY at all indices + */ + if (phy == 0) + phyx = 24; + else if (phy <= 24) + phyx = phy - 1; + else + phyx = phy; mii_status = mdio_read(dev, phyx, 1); if (mii_status && mii_status != 0xffff) { vp->phys[phy_idx++] = phyx; @@ -1135,12 +1175,13 @@ dev->set_multicast_list = set_rx_mode; dev->tx_timeout = vortex_tx_timeout; dev->watchdog_timeo = (watchdog * HZ) / 1000; - +// publish_netdev(dev); return 0; free_region: if (vp->must_free_region) release_region(ioaddr, vci->io_size); +// withdraw_netdev(dev); unregister_netdev(dev); kfree (dev); printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval); @@ -1150,13 +1191,23 @@ static void wait_for_completion(struct net_device *dev, int cmd) { - int i = 4000; + int i; outw(cmd, dev->base_addr + EL3_CMD); - while (--i > 0) { + for (i = 0; i < 2000; i++) { if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) return; } + + /* OK, that didn't work. Do it the slow way. One second */ + for (i = 0; i < 100000; i++) { + if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) { + printk(KERN_INFO "%s: command 0x%04x took %d usecs! Please tell andrewm@uow.edu.au\n", + dev->name, cmd, i * 10); + return; + } + udelay(10); + } printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n", dev->name, cmd, inw(dev->base_addr + EL3_STATUS)); } @@ -1331,6 +1382,7 @@ set_rx_mode(dev); outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ +// wait_for_completion(dev, SetTxStart|0x07ff); outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ /* Allow status bits to be seen. */ @@ -1384,7 +1436,8 @@ } if (i != RX_RING_SIZE) { int j; - for (j = 0; j < RX_RING_SIZE; j++) { + printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name); + for (j = 0; j < i; j++) { if (vp->rx_skbuff[j]) { dev_kfree_skb(vp->rx_skbuff[j]); vp->rx_skbuff[j] = 0; @@ -1532,7 +1585,10 @@ printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n", dev->name, inb(ioaddr + TxStatus), inw(ioaddr + EL3_STATUS)); - + EL3WINDOW(4); + printk(KERN_ERR " diagnostics: net %04x media %04x dma %8.8x.\n", + inw(ioaddr + Wn4_NetDiag), inw(ioaddr + Wn4_Media), + inl(ioaddr + PktStatus)); /* Slight code bloat to be user friendly. */ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) printk(KERN_ERR "%s: Transmitter encountered 16 collisions --" @@ -1663,6 +1719,12 @@ dev->name, fifo_diag); /* Adapter failure requires Tx/Rx reset and reinit. */ if (vp->full_bus_master_tx) { + int bus_status = inl(ioaddr + PktStatus); + /* 0x80000000 PCI master abort. */ + /* 0x40000000 PCI target abort. */ + if (vortex_debug) + printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); + /* In this case, blow the card away */ vortex_down(dev); wait_for_completion(dev, TotalReset | 0xff); diff -u --recursive --new-file v2.4.0/linux/drivers/net/Makefile linux/drivers/net/Makefile --- v2.4.0/linux/drivers/net/Makefile Thu Jan 4 13:00:55 2001 +++ linux/drivers/net/Makefile Sat Jan 6 19:45:14 2001 @@ -26,7 +26,7 @@ obj-$(CONFIG_ISDN) += slhc.o endif -subdir-$(CONFIG_PCMCIA) += pcmcia +subdir-$(CONFIG_NET_PCMCIA) += pcmcia subdir-$(CONFIG_TULIP) += tulip subdir-$(CONFIG_IRDA) += irda subdir-$(CONFIG_TR) += tokenring diff -u --recursive --new-file v2.4.0/linux/drivers/net/depca.c linux/drivers/net/depca.c --- v2.4.0/linux/drivers/net/depca.c Mon Oct 23 15:51:36 2000 +++ linux/drivers/net/depca.c Mon Jan 8 09:09:36 2001 @@ -1817,7 +1817,9 @@ ManCode[5]='\0'; for (i=0;(*signatures[i] != '\0') && (*name == '\0');i++) { - if (strstr(ManCode, signatures[i]) != NULL) { + const char * volatile lhs = ManCode; + const char * volatile rhs = signatures[i]; /* egcs-1.1.2 bug */ + if (strstr(lhs, rhs) != NULL) { strcpy(name,ManCode); status = 1; } diff -u --recursive --new-file v2.4.0/linux/drivers/net/dmfe.c linux/drivers/net/dmfe.c --- v2.4.0/linux/drivers/net/dmfe.c Tue Dec 5 12:29:38 2000 +++ linux/drivers/net/dmfe.c Mon Jan 8 09:09:36 2001 @@ -1596,10 +1596,10 @@ break; } - rc = pci_register_driver(&dmfe_driver); + rc = pci_module_init(&dmfe_driver); if (rc < 0) return rc; - if (rc > 0) { + if (rc >= 0) { printk (KERN_INFO "Davicom DM91xx net driver loaded, version " DMFE_VERSION "\n"); return 0; diff -u --recursive --new-file v2.4.0/linux/drivers/scsi/megaraid.c linux/drivers/scsi/megaraid.c --- v2.4.0/linux/drivers/scsi/megaraid.c Wed Dec 6 12:06:18 2000 +++ linux/drivers/scsi/megaraid.c Tue Jan 9 10:40:43 2001 @@ -149,7 +149,6 @@ #include #ifdef MODULE -#include #include char kernel_version[] = UTS_RELEASE; diff -u --recursive --new-file v2.4.0/linux/drivers/scsi/ppa.c linux/drivers/scsi/ppa.c --- v2.4.0/linux/drivers/scsi/ppa.c Thu Jan 4 13:00:55 2001 +++ linux/drivers/scsi/ppa.c Tue Jan 9 10:40:03 2001 @@ -222,8 +222,8 @@ printk(" supported by the imm (ZIP Plus) driver. If the\n"); printk(" cable is marked with \"AutoDetect\", this is what has\n"); printk(" happened.\n"); - return 0; spin_lock_irq(&io_request_lock); + return 0; } try_again = 1; goto retry_entry; diff -u --recursive --new-file v2.4.0/linux/fs/buffer.c linux/fs/buffer.c --- v2.4.0/linux/fs/buffer.c Wed Jan 3 20:45:26 2001 +++ linux/fs/buffer.c Sun Jan 7 19:47:32 2001 @@ -1151,7 +1151,7 @@ /* grab the lru lock here to block bdflush. */ spin_lock(&lru_list_lock); write_lock(&hash_table_lock); - if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf)) + if (!atomic_dec_and_test(&buf->b_count) || buffer_locked(buf) || buffer_protected(buf)) goto in_use; __hash_unlink(buf); remove_inode_queue(buf); diff -u --recursive --new-file v2.4.0/linux/fs/exec.c linux/fs/exec.c --- v2.4.0/linux/fs/exec.c Wed Jan 3 20:45:26 2001 +++ linux/fs/exec.c Mon Jan 8 13:31:56 2001 @@ -407,6 +407,7 @@ /* Add it to the list of mm's */ spin_lock(&mmlist_lock); list_add(&mm->mmlist, &init_mm.mmlist); + mmlist_nr++; spin_unlock(&mmlist_lock); task_lock(current); diff -u --recursive --new-file v2.4.0/linux/fs/nfs/flushd.c linux/fs/nfs/flushd.c --- v2.4.0/linux/fs/nfs/flushd.c Wed Jun 21 07:25:17 2000 +++ linux/fs/nfs/flushd.c Wed Jan 10 14:18:29 2001 @@ -71,18 +71,17 @@ int status = 0; dprintk("NFS: writecache_init\n"); + + /* Create the RPC task */ + if (!(task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC))) + return -ENOMEM; + spin_lock(&nfs_flushd_lock); cache = server->rw_requests; if (cache->task) goto out_unlock; - /* Create the RPC task */ - status = -ENOMEM; - task = rpc_new_task(server->client, NULL, RPC_TASK_ASYNC); - if (!task) - goto out_unlock; - task->tk_calldata = server; cache->task = task; @@ -99,6 +98,7 @@ return 0; out_unlock: spin_unlock(&nfs_flushd_lock); + rpc_release_task(task); return status; } @@ -195,7 +195,9 @@ if (*q) { *q = inode->u.nfs_i.hash_next; NFS_FLAGS(inode) &= ~NFS_INO_FLUSH; + spin_unlock(&nfs_flushd_lock); iput(inode); + return; } out: spin_unlock(&nfs_flushd_lock); diff -u --recursive --new-file v2.4.0/linux/fs/ramfs/inode.c linux/fs/ramfs/inode.c --- v2.4.0/linux/fs/ramfs/inode.c Fri Dec 29 19:26:31 2000 +++ linux/fs/ramfs/inode.c Fri Jan 5 23:06:19 2001 @@ -81,6 +81,7 @@ static int ramfs_writepage(struct page *page) { SetPageDirty(page); + UnlockPage(page); return 0; } diff -u --recursive --new-file v2.4.0/linux/include/asm-i386/bugs.h linux/include/asm-i386/bugs.h --- v2.4.0/linux/include/asm-i386/bugs.h Thu Jan 4 14:50:45 2001 +++ linux/include/asm-i386/bugs.h Thu Jan 11 17:41:26 2001 @@ -76,26 +76,23 @@ } /* Enable FXSR and company _before_ testing for FP problems. */ -#if defined(CONFIG_X86_FXSR) || defined(CONFIG_X86_RUNTIME_FXSR) /* * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. */ - if (offsetof(struct task_struct, thread.i387.fxsave) & 15) - panic("Kernel compiled for PII/PIII+ with FXSR, data not 16-byte aligned!"); - + if (offsetof(struct task_struct, thread.i387.fxsave) & 15) { + extern void __buggy_fxsr_alignment(void); + __buggy_fxsr_alignment(); + } if (cpu_has_fxsr) { printk(KERN_INFO "Enabling fast FPU save and restore... "); set_in_cr4(X86_CR4_OSFXSR); printk("done.\n"); } -#endif -#ifdef CONFIG_X86_XMM if (cpu_has_xmm) { printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... "); set_in_cr4(X86_CR4_OSXMMEXCPT); printk("done.\n"); } -#endif /* Test for the divl bug.. */ __asm__("fninit\n\t" @@ -202,14 +199,6 @@ && boot_cpu_data.x86_model == 2 && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!"); -#endif - -/* - * If we configured ourselves for FXSR, we'd better have it. - */ -#ifdef CONFIG_X86_FXSR - if (!cpu_has_fxsr) - panic("Kernel compiled for PII/PIII+, requires FXSR feature!"); #endif } diff -u --recursive --new-file v2.4.0/linux/include/asm-i386/i387.h linux/include/asm-i386/i387.h --- v2.4.0/linux/include/asm-i386/i387.h Thu Jan 4 14:52:01 2001 +++ linux/include/asm-i386/i387.h Thu Jan 11 17:46:51 2001 @@ -23,6 +23,10 @@ extern void save_init_fpu( struct task_struct *tsk ); extern void restore_fpu( struct task_struct *tsk ); +extern void kernel_fpu_begin(void); +#define kernel_fpu_end() stts() + + #define unlazy_fpu( tsk ) do { \ if ( tsk->flags & PF_USEDFPU ) \ save_init_fpu( tsk ); \ @@ -50,10 +54,8 @@ extern void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr ); #define load_mxcsr( val ) do { \ - if ( cpu_has_xmm ) { \ - unsigned long __mxcsr = ((unsigned long)(val) & 0xffff); \ - asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \ - } \ + unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \ + asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \ } while (0) /* diff -u --recursive --new-file v2.4.0/linux/include/asm-i386/pgtable.h linux/include/asm-i386/pgtable.h --- v2.4.0/linux/include/asm-i386/pgtable.h Thu Jan 4 14:50:46 2001 +++ linux/include/asm-i386/pgtable.h Thu Jan 11 17:45:39 2001 @@ -140,7 +140,11 @@ #define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \ ~(VMALLOC_OFFSET-1)) #define VMALLOC_VMADDR(x) ((unsigned long)(x)) -#define VMALLOC_END (FIXADDR_START) +#if CONFIG_HIGHMEM +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) +#else +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) +#endif /* * The 4MB page is guessing.. Detailed in the infamous "Chapter H" diff -u --recursive --new-file v2.4.0/linux/include/asm-i386/system.h linux/include/asm-i386/system.h --- v2.4.0/linux/include/asm-i386/system.h Thu Jan 4 14:50:46 2001 +++ linux/include/asm-i386/system.h Thu Jan 11 17:45:39 2001 @@ -267,15 +267,8 @@ * I expect future Intel CPU's to have a weaker ordering, * but I'd also expect them to finally get their act together * and add some real memory barriers if so. - * - * The Pentium III does add a real memory barrier with the - * sfence instruction, so we use that where appropriate. */ -#ifndef CONFIG_X86_XMM #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") -#else -#define mb() __asm__ __volatile__ ("sfence": : :"memory") -#endif #define rmb() mb() #define wmb() __asm__ __volatile__ ("": : :"memory") diff -u --recursive --new-file v2.4.0/linux/include/asm-i386/xor.h linux/include/asm-i386/xor.h --- v2.4.0/linux/include/asm-i386/xor.h Sun Nov 12 19:39:51 2000 +++ linux/include/asm-i386/xor.h Mon Jan 8 13:27:56 2001 @@ -843,7 +843,7 @@ do { \ xor_speed(&xor_block_8regs); \ xor_speed(&xor_block_32regs); \ - if (cpu_has_xmm) \ + if (HAVE_XMM) \ xor_speed(&xor_block_pIII_sse); \ if (md_cpu_has_mmx()) { \ xor_speed(&xor_block_pII_mmx); \ @@ -855,4 +855,4 @@ We may also be able to load into the L1 only depending on how the cpu deals with a load to a line that is being prefetched. */ #define XOR_SELECT_TEMPLATE(FASTEST) \ - (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) + (HAVE_XMM ? &xor_block_pIII_sse : FASTEST) diff -u --recursive --new-file v2.4.0/linux/include/linux/sched.h linux/include/linux/sched.h --- v2.4.0/linux/include/linux/sched.h Thu Jan 4 14:50:47 2001 +++ linux/include/linux/sched.h Thu Jan 11 17:45:40 2001 @@ -219,12 +219,13 @@ unsigned long rss, total_vm, locked_vm; unsigned long def_flags; unsigned long cpu_vm_mask; - unsigned long swap_cnt; /* number of pages to swap on next pass */ unsigned long swap_address; /* Architecture-specific MM context */ mm_context_t context; }; + +extern int mmlist_nr; #define INIT_MM(name) \ { \ diff -u --recursive --new-file v2.4.0/linux/include/linux/swap.h linux/include/linux/swap.h --- v2.4.0/linux/include/linux/swap.h Thu Jan 4 14:50:46 2001 +++ linux/include/linux/swap.h Thu Jan 11 17:45:39 2001 @@ -107,7 +107,7 @@ extern int page_launder(int, int); extern int free_shortage(void); extern int inactive_shortage(void); -extern void wakeup_kswapd(int); +extern void wakeup_kswapd(void); extern int try_to_free_pages(unsigned int gfp_mask); /* linux/mm/page_io.c */ diff -u --recursive --new-file v2.4.0/linux/kernel/fork.c linux/kernel/fork.c --- v2.4.0/linux/kernel/fork.c Wed Jan 3 20:45:26 2001 +++ linux/kernel/fork.c Wed Jan 10 14:53:54 2001 @@ -134,7 +134,6 @@ mm->mmap_cache = NULL; mm->map_count = 0; mm->cpu_vm_mask = 0; - mm->swap_cnt = 0; mm->swap_address = 0; pprev = &mm->mmap; for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) { @@ -193,6 +192,7 @@ } spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; +int mmlist_nr; #define allocate_mm() (kmem_cache_alloc(mm_cachep, SLAB_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) @@ -246,6 +246,7 @@ { if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) { list_del(&mm->mmlist); + mmlist_nr--; spin_unlock(&mmlist_lock); exit_mmap(mm); mmdrop(mm); @@ -326,6 +327,7 @@ */ spin_lock(&mmlist_lock); list_add(&mm->mmlist, &oldmm->mmlist); + mmlist_nr++; spin_unlock(&mmlist_lock); if (retval) diff -u --recursive --new-file v2.4.0/linux/mm/filemap.c linux/mm/filemap.c --- v2.4.0/linux/mm/filemap.c Tue Jan 2 18:59:45 2001 +++ linux/mm/filemap.c Wed Jan 10 14:24:32 2001 @@ -306,7 +306,7 @@ */ age_page_up(page); if (inactive_shortage() > inactive_target / 2 && free_shortage()) - wakeup_kswapd(0); + wakeup_kswapd(); not_found: return page; } @@ -1835,7 +1835,8 @@ n->vm_end = end; setup_read_behavior(n, behavior); n->vm_raend = 0; - get_file(n->vm_file); + if (n->vm_file) + get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); lock_vma_mappings(vma); @@ -1861,7 +1862,8 @@ n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT; setup_read_behavior(n, behavior); n->vm_raend = 0; - get_file(n->vm_file); + if (n->vm_file) + get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); lock_vma_mappings(vma); @@ -1893,7 +1895,8 @@ right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT; left->vm_raend = 0; right->vm_raend = 0; - atomic_add(2, &vma->vm_file->f_count); + if (vma->vm_file) + atomic_add(2, &vma->vm_file->f_count); if (vma->vm_ops && vma->vm_ops->open) { vma->vm_ops->open(left); diff -u --recursive --new-file v2.4.0/linux/mm/memory.c linux/mm/memory.c --- v2.4.0/linux/mm/memory.c Mon Jan 1 10:37:41 2001 +++ linux/mm/memory.c Mon Jan 8 15:39:38 2001 @@ -207,7 +207,8 @@ src_pte = pte_offset(src_pmd, address); dst_pte = pte_offset(dst_pmd, address); - + + spin_lock(&src->page_table_lock); do { pte_t pte = *src_pte; struct page *ptepage; @@ -240,16 +241,21 @@ cont_copy_pte_range: set_pte(dst_pte, pte); cont_copy_pte_range_noset: address += PAGE_SIZE; if (address >= end) - goto out; + goto out_unlock; src_pte++; dst_pte++; } while ((unsigned long)src_pte & PTE_TABLE_MASK); + spin_unlock(&src->page_table_lock); cont_copy_pmd_range: src_pmd++; dst_pmd++; } while ((unsigned long)src_pmd & PMD_TABLE_MASK); } out: + return 0; + +out_unlock: + spin_unlock(&src->page_table_lock); return 0; nomem: diff -u --recursive --new-file v2.4.0/linux/mm/page_alloc.c linux/mm/page_alloc.c --- v2.4.0/linux/mm/page_alloc.c Wed Jan 3 09:59:06 2001 +++ linux/mm/page_alloc.c Thu Jan 11 15:52:31 2001 @@ -16,6 +16,7 @@ #include #include #include +#include int nr_swap_pages; int nr_active_pages; @@ -303,7 +304,7 @@ * an inactive page shortage, wake up kswapd. */ if (inactive_shortage() > inactive_target / 2 && free_shortage()) - wakeup_kswapd(0); + wakeup_kswapd(); /* * If we are about to get low on free pages and cleaning * the inactive_dirty pages would fix the situation, @@ -379,7 +380,7 @@ * - if we don't have __GFP_IO set, kswapd may be * able to free some memory we can't free ourselves */ - wakeup_kswapd(0); + wakeup_kswapd(); if (gfp_mask & __GFP_WAIT) { __set_current_state(TASK_RUNNING); current->policy |= SCHED_YIELD; @@ -404,7 +405,7 @@ * - we're doing a higher-order allocation * --> move pages to the free list until we succeed * - we're /really/ tight on memory - * --> wait on the kswapd waitqueue until memory is freed + * --> try to free pages ourselves with page_launder */ if (!(current->flags & PF_MEMALLOC)) { /* @@ -443,36 +444,23 @@ /* * When we arrive here, we are really tight on memory. * - * We wake up kswapd and sleep until kswapd wakes us - * up again. After that we loop back to the start. - * - * We have to do this because something else might eat - * the memory kswapd frees for us and we need to be - * reliable. Note that we don't loop back for higher - * order allocations since it is possible that kswapd - * simply cannot free a large enough contiguous area - * of memory *ever*. - */ - if ((gfp_mask & (__GFP_WAIT|__GFP_IO)) == (__GFP_WAIT|__GFP_IO)) { - wakeup_kswapd(1); - memory_pressure++; - if (!order) - goto try_again; - /* - * If __GFP_IO isn't set, we can't wait on kswapd because - * kswapd just might need some IO locks /we/ are holding ... - * - * SUBTLE: The scheduling point above makes sure that - * kswapd does get the chance to free memory we can't - * free ourselves... + * We try to free pages ourselves by: + * - shrinking the i/d caches. + * - reclaiming unused memory from the slab caches. + * - swapping/syncing pages to disk (done by page_launder) + * - moving clean pages from the inactive dirty list to + * the inactive clean list. (done by page_launder) */ - } else if (gfp_mask & __GFP_WAIT) { - try_to_free_pages(gfp_mask); - memory_pressure++; + if (gfp_mask & __GFP_WAIT) { + shrink_icache_memory(6, gfp_mask); + shrink_dcache_memory(6, gfp_mask); + kmem_cache_reap(gfp_mask); + + page_launder(gfp_mask, 1); + if (!order) goto try_again; } - } /* @@ -554,14 +542,8 @@ void free_pages(unsigned long addr, unsigned long order) { - struct page *fpage; - -#ifdef CONFIG_DISCONTIGMEM - if (addr == 0) return; -#endif - fpage = virt_to_page(addr); - if (VALID_PAGE(fpage)) - __free_pages(fpage, order); + if (addr != 0) + __free_pages(virt_to_page(addr), order); } /* diff -u --recursive --new-file v2.4.0/linux/mm/slab.c linux/mm/slab.c --- v2.4.0/linux/mm/slab.c Sun Oct 1 19:55:17 2000 +++ linux/mm/slab.c Wed Jan 10 14:24:32 2001 @@ -1702,7 +1702,7 @@ * kmem_cache_reap - Reclaim memory from caches. * @gfp_mask: the type of memory required. * - * Called from try_to_free_page(). + * Called from do_try_to_free_pages() and __alloc_pages() */ void kmem_cache_reap (int gfp_mask) { diff -u --recursive --new-file v2.4.0/linux/mm/vmscan.c linux/mm/vmscan.c --- v2.4.0/linux/mm/vmscan.c Wed Jan 3 20:45:26 2001 +++ linux/mm/vmscan.c Wed Jan 10 15:04:07 2001 @@ -35,45 +35,21 @@ * using a process that no longer actually exists (it might * have died while we slept). */ -static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask) +static void try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page) { pte_t pte; swp_entry_t entry; - struct page * page; - int onlist; - - pte = *page_table; - if (!pte_present(pte)) - goto out_failed; - page = pte_page(pte); - if ((!VALID_PAGE(page)) || PageReserved(page)) - goto out_failed; - - if (!mm->swap_cnt) - return 1; - mm->swap_cnt--; - - onlist = PageActive(page); /* Don't look at this pte if it's been accessed recently. */ if (ptep_test_and_clear_young(page_table)) { - age_page_up(page); - goto out_failed; + page->age += PAGE_AGE_ADV; + if (page->age > PAGE_AGE_MAX) + page->age = PAGE_AGE_MAX; + return; } - if (!onlist) - /* The page is still mapped, so it can't be freeable... */ - age_page_down_ageonly(page); - - /* - * If the page is in active use by us, or if the page - * is in active use by others, don't unmap it or - * (worse) start unneeded IO. - */ - if (page->age > 0) - goto out_failed; if (TryLockPage(page)) - goto out_failed; + return; /* From this point on, the odds are that we're going to * nuke this pte, so read and clear the pte. This hook @@ -87,9 +63,6 @@ * Is the page already in the swap cache? If so, then * we can just drop our reference to it without doing * any IO - it's already up-to-date on disk. - * - * Return 0, as we didn't actually free any real - * memory, and we should just continue our scan. */ if (PageSwapCache(page)) { entry.val = page->index; @@ -103,8 +76,7 @@ mm->rss--; deactivate_page(page); page_cache_release(page); -out_failed: - return 0; + return; } /* @@ -153,34 +125,20 @@ out_unlock_restore: set_pte(page_table, pte); UnlockPage(page); - return 0; + return; } -/* - * A new implementation of swap_out(). We do not swap complete processes, - * but only a small number of blocks, before we continue with the next - * process. The number of blocks actually swapped is determined on the - * number of page faults, that this process actually had in the last time, - * so we won't swap heavily used processes all the time ... - * - * Note: the priority argument is a hint on much CPU to waste with the - * swap block search, not a hint, of how much blocks to swap with - * each process. - * - * (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de - */ - -static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask) +static int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count) { pte_t * pte; unsigned long pmd_end; if (pmd_none(*dir)) - return 0; + return count; if (pmd_bad(*dir)) { pmd_ERROR(*dir); pmd_clear(dir); - return 0; + return count; } pte = pte_offset(dir, address); @@ -190,28 +148,33 @@ end = pmd_end; do { - int result; - mm->swap_address = address + PAGE_SIZE; - result = try_to_swap_out(mm, vma, address, pte, gfp_mask); - if (result) - return result; + if (pte_present(*pte)) { + struct page *page = pte_page(*pte); + + if (VALID_PAGE(page) && !PageReserved(page)) { + try_to_swap_out(mm, vma, address, pte, page); + if (--count) + break; + } + } address += PAGE_SIZE; pte++; } while (address && (address < end)); - return 0; + mm->swap_address = address + PAGE_SIZE; + return count; } -static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int gfp_mask) +static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count) { pmd_t * pmd; unsigned long pgd_end; if (pgd_none(*dir)) - return 0; + return count; if (pgd_bad(*dir)) { pgd_ERROR(*dir); pgd_clear(dir); - return 0; + return count; } pmd = pmd_offset(dir, address); @@ -221,23 +184,23 @@ end = pgd_end; do { - int result = swap_out_pmd(mm, vma, pmd, address, end, gfp_mask); - if (result) - return result; + count = swap_out_pmd(mm, vma, pmd, address, end, count); + if (!count) + break; address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); - return 0; + return count; } -static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int gfp_mask) +static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count) { pgd_t *pgdir; unsigned long end; /* Don't swap out areas which are locked down */ if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) - return 0; + return count; pgdir = pgd_offset(mm, address); @@ -245,18 +208,17 @@ if (address >= end) BUG(); do { - int result = swap_out_pgd(mm, vma, pgdir, address, end, gfp_mask); - if (result) - return result; + count = swap_out_pgd(mm, vma, pgdir, address, end, count); + if (!count) + break; address = (address + PGDIR_SIZE) & PGDIR_MASK; pgdir++; } while (address && (address < end)); - return 0; + return count; } -static int swap_out_mm(struct mm_struct * mm, int gfp_mask) +static int swap_out_mm(struct mm_struct * mm, int count) { - int result = 0; unsigned long address; struct vm_area_struct* vma; @@ -276,8 +238,8 @@ address = vma->vm_start; for (;;) { - result = swap_out_vma(mm, vma, address, gfp_mask); - if (result) + count = swap_out_vma(mm, vma, address, count); + if (!count) goto out_unlock; vma = vma->vm_next; if (!vma) @@ -287,94 +249,63 @@ } /* Reset to 0 when we reach the end of address space */ mm->swap_address = 0; - mm->swap_cnt = 0; out_unlock: spin_unlock(&mm->page_table_lock); - return result; + return !count; } /* - * Select the task with maximal swap_cnt and try to swap out a page. * N.B. This function returns only 0 or 1. Return values != 1 from * the lower level routines result in continued processing. */ #define SWAP_SHIFT 5 #define SWAP_MIN 8 +static inline int swap_amount(struct mm_struct *mm) +{ + int nr = mm->rss >> SWAP_SHIFT; + return nr < SWAP_MIN ? SWAP_MIN : nr; +} + static int swap_out(unsigned int priority, int gfp_mask) { int counter; - int __ret = 0; + int retval = 0; + struct mm_struct *mm = current->mm; - /* - * We make one or two passes through the task list, indexed by - * assign = {0, 1}: - * Pass 1: select the swappable task with maximal RSS that has - * not yet been swapped out. - * Pass 2: re-assign rss swap_cnt values, then select as above. - * - * With this approach, there's no need to remember the last task - * swapped out. If the swap-out fails, we clear swap_cnt so the - * task won't be selected again until all others have been tried. - * - * Think of swap_cnt as a "shadow rss" - it tells us which process - * we want to page out (always try largest first). - */ - counter = (nr_threads << SWAP_SHIFT) >> priority; - if (counter < 1) - counter = 1; + /* Always start by trying to penalize the process that is allocating memory */ + if (mm) + retval = swap_out_mm(mm, swap_amount(mm)); - for (; counter >= 0; counter--) { + /* Then, look at the other mm's */ + counter = mmlist_nr >> priority; + do { struct list_head *p; - unsigned long max_cnt = 0; - struct mm_struct *best = NULL; - int assign = 0; - int found_task = 0; - select: + spin_lock(&mmlist_lock); p = init_mm.mmlist.next; - for (; p != &init_mm.mmlist; p = p->next) { - struct mm_struct *mm = list_entry(p, struct mm_struct, mmlist); - if (mm->rss <= 0) - continue; - found_task++; - /* Refresh swap_cnt? */ - if (assign == 1) { - mm->swap_cnt = (mm->rss >> SWAP_SHIFT); - if (mm->swap_cnt < SWAP_MIN) - mm->swap_cnt = SWAP_MIN; - } - if (mm->swap_cnt > max_cnt) { - max_cnt = mm->swap_cnt; - best = mm; - } - } + if (p == &init_mm.mmlist) + goto empty; + + /* Move it to the back of the queue.. */ + list_del(p); + list_add_tail(p, &init_mm.mmlist); + mm = list_entry(p, struct mm_struct, mmlist); - /* Make sure it doesn't disappear */ - if (best) - atomic_inc(&best->mm_users); + /* Make sure the mm doesn't disappear when we drop the lock.. */ + atomic_inc(&mm->mm_users); spin_unlock(&mmlist_lock); - /* - * We have dropped the tasklist_lock, but we - * know that "mm" still exists: we are running - * with the big kernel lock, and exit_mm() - * cannot race with us. - */ - if (!best) { - if (!assign && found_task > 0) { - assign = 1; - goto select; - } - break; - } else { - __ret = swap_out_mm(best, gfp_mask); - mmput(best); - break; - } - } - return __ret; + /* Walk about 6% of the address space each time */ + retval |= swap_out_mm(mm, swap_amount(mm)); + mmput(mm); + } while (--counter >= 0); + return retval; + +empty: + spin_unlock(&mmlist_lock); + return 0; } @@ -808,6 +739,9 @@ int inactive_shortage(void) { int shortage = 0; + pg_data_t *pgdat = pgdat_list; + + /* Is the inactive dirty list too small? */ shortage += freepages.high; shortage += inactive_target; @@ -818,7 +752,27 @@ if (shortage > 0) return shortage; - return 0; + /* If not, do we have enough per-zone pages on the inactive list? */ + + shortage = 0; + + do { + int i; + for(i = 0; i < MAX_NR_ZONES; i++) { + int zone_shortage; + zone_t *zone = pgdat->node_zones+ i; + + zone_shortage = zone->pages_high; + zone_shortage -= zone->inactive_dirty_pages; + zone_shortage -= zone->inactive_clean_pages; + zone_shortage -= zone->free_pages; + if (zone_shortage > 0) + shortage += zone_shortage; + } + pgdat = pgdat->node_next; + } while (pgdat); + + return shortage; } /* @@ -833,72 +787,35 @@ * really care about latency. In that case we don't try * to free too many pages. */ +#define DEF_PRIORITY (6) static int refill_inactive(unsigned int gfp_mask, int user) { - int priority, count, start_count, made_progress; + int count, start_count, maxtry; count = inactive_shortage() + free_shortage(); if (user) count = (1 << page_cluster); start_count = count; - /* Always trim SLAB caches when memory gets low. */ - kmem_cache_reap(gfp_mask); - - priority = 6; + maxtry = 6; do { - made_progress = 0; - if (current->need_resched) { __set_current_state(TASK_RUNNING); schedule(); } - while (refill_inactive_scan(priority, 1)) { - made_progress = 1; + while (refill_inactive_scan(DEF_PRIORITY, 1)) { if (--count <= 0) goto done; } - /* - * don't be too light against the d/i cache since - * refill_inactive() almost never fail when there's - * really plenty of memory free. - */ - shrink_dcache_memory(priority, gfp_mask); - shrink_icache_memory(priority, gfp_mask); + /* If refill_inactive_scan failed, try to page stuff out.. */ + swap_out(DEF_PRIORITY, gfp_mask); - /* - * Then, try to page stuff out.. - */ - while (swap_out(priority, gfp_mask)) { - made_progress = 1; - if (--count <= 0) - goto done; - } - - /* - * If we either have enough free memory, or if - * page_launder() will be able to make enough - * free memory, then stop. - */ - if (!inactive_shortage() || !free_shortage()) - goto done; - - /* - * Only switch to a lower "priority" if we - * didn't make any useful progress in the - * last loop. - */ - if (!made_progress) - priority--; - } while (priority >= 0); - - /* Always end on a refill_inactive.., may sleep... */ - while (refill_inactive_scan(0, 1)) { - if (--count <= 0) - goto done; - } + if (--maxtry <= 0) + return 0; + + } while (inactive_shortage()); done: return (count < start_count); @@ -922,20 +839,20 @@ /* * If needed, we move pages from the active list - * to the inactive list. We also "eat" pages from - * the inode and dentry cache whenever we do this. + * to the inactive list. */ - if (free_shortage() || inactive_shortage()) { - shrink_dcache_memory(6, gfp_mask); - shrink_icache_memory(6, gfp_mask); + if (inactive_shortage()) ret += refill_inactive(gfp_mask, user); - } else { - /* - * Reclaim unused slab cache memory. - */ + + /* + * Delete pages from the inode and dentry caches and + * reclaim unused slab cache if memory is low. + */ + if (free_shortage()) { + shrink_dcache_memory(DEF_PRIORITY, gfp_mask); + shrink_icache_memory(DEF_PRIORITY, gfp_mask); kmem_cache_reap(gfp_mask); - ret = 1; - } + } return ret; } @@ -988,13 +905,8 @@ static int recalc = 0; /* If needed, try to free some memory. */ - if (inactive_shortage() || free_shortage()) { - int wait = 0; - /* Do we need to do some synchronous flushing? */ - if (waitqueue_active(&kswapd_done)) - wait = 1; - do_try_to_free_pages(GFP_KSWAPD, wait); - } + if (inactive_shortage() || free_shortage()) + do_try_to_free_pages(GFP_KSWAPD, 0); /* * Do some (very minimal) background scanning. This @@ -1002,7 +914,7 @@ * every minute. This clears old referenced bits * and moves unused pages to the inactive list. */ - refill_inactive_scan(6, 0); + refill_inactive_scan(DEF_PRIORITY, 0); /* Once a second, recalculate some VM stats. */ if (time_after(jiffies, recalc + HZ)) { @@ -1010,11 +922,6 @@ recalculate_vm_stats(); } - /* - * Wake up everybody waiting for free memory - * and unplug the disk queue. - */ - wake_up_all(&kswapd_done); run_task_queue(&tq_disk); /* @@ -1045,33 +952,10 @@ } } -void wakeup_kswapd(int block) +void wakeup_kswapd(void) { - DECLARE_WAITQUEUE(wait, current); - - if (current == kswapd_task) - return; - - if (!block) { - if (waitqueue_active(&kswapd_wait)) - wake_up(&kswapd_wait); - return; - } - - /* - * Kswapd could wake us up before we get a chance - * to sleep, so we have to be very careful here to - * prevent SMP races... - */ - __set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&kswapd_done, &wait); - - if (waitqueue_active(&kswapd_wait)) - wake_up(&kswapd_wait); - schedule(); - - remove_wait_queue(&kswapd_done, &wait); - __set_current_state(TASK_RUNNING); + if (current != kswapd_task) + wake_up_process(kswapd_task); } /* @@ -1096,7 +980,7 @@ /* * Kreclaimd will move pages from the inactive_clean list to the * free list, in order to keep atomic allocations possible under - * all circumstances. Even when kswapd is blocked on IO. + * all circumstances. */ int kreclaimd(void *unused) { diff -u --recursive --new-file v2.4.0/linux/net/ipv4/igmp.c linux/net/ipv4/igmp.c --- v2.4.0/linux/net/ipv4/igmp.c Thu Sep 7 08:32:01 2000 +++ linux/net/ipv4/igmp.c Tue Jan 9 10:54:57 2001 @@ -504,8 +504,8 @@ im->timer.function=&igmp_timer_expire; im->unsolicit_count = IGMP_Unsolicited_Report_Count; im->reporter = 0; - im->loaded = 0; #endif + im->loaded = 0; write_lock_bh(&in_dev->lock); im->next=in_dev->mc_list; in_dev->mc_list=im; diff -u --recursive --new-file v2.4.0/linux/net/ipv4/tcp.c linux/net/ipv4/tcp.c --- v2.4.0/linux/net/ipv4/tcp.c Tue Nov 28 21:53:45 2000 +++ linux/net/ipv4/tcp.c Wed Jan 10 14:12:12 2001 @@ -954,7 +954,7 @@ */ skb = sk->write_queue.prev; if (tp->send_head && - (mss_now - skb->len) > 0) { + (mss_now > skb->len)) { copy = skb->len; if (skb_tailroom(skb) > 0) { int last_byte_was_odd = (copy % 4); diff -u --recursive --new-file v2.4.0/linux/net/ipv4/tcp_input.c linux/net/ipv4/tcp_input.c --- v2.4.0/linux/net/ipv4/tcp_input.c Fri Dec 29 14:07:24 2000 +++ linux/net/ipv4/tcp_input.c Wed Jan 10 14:12:12 2001 @@ -1705,7 +1705,7 @@ if ((__s32)when < (__s32)tp->rttvar) when = tp->rttvar; - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, when); + tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, min(when, TCP_RTO_MAX)); } } diff -u --recursive --new-file v2.4.0/linux/net/sunrpc/sunrpc_syms.c linux/net/sunrpc/sunrpc_syms.c --- v2.4.0/linux/net/sunrpc/sunrpc_syms.c Fri Apr 21 16:08:52 2000 +++ linux/net/sunrpc/sunrpc_syms.c Thu Jan 11 15:53:02 2001 @@ -36,6 +36,7 @@ EXPORT_SYMBOL(rpciod_up); EXPORT_SYMBOL(rpc_new_task); EXPORT_SYMBOL(rpc_wake_up_status); +EXPORT_SYMBOL(rpc_release_task); /* RPC client functions */ EXPORT_SYMBOL(rpc_create_client);