From b36e4758dc1b9ff1f6d97e951edba22366230d11 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Aug 2006 12:26:34 +0100 Subject: [ARM] Fix kernel/fork.c for lockdep on ARM ARM has interrupts enabled over context switches (iow, has __ARCH_WANT_INTERRUPTS_ON_CTXSW defined.) The lockdep code in fork.c assumes that interrupts are always disabled. Fix this wrong assumption by making the initialisation of 'p->hardirqs_enabled' depend on __ARCH_WANT_INTERRUPTS_ON_CTXSW. Acked-by: Ingo Molnar Signed-off-by: Russell King --- kernel/fork.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index f9b014e3e700..8f76adf1c6a6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1056,7 +1056,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW + p->hardirqs_enabled = 1; +#else p->hardirqs_enabled = 0; +#endif p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; -- cgit v1.2.3 From fe4944e59c357f945f81bc67edb7ed1392e875ad Mon Sep 17 00:00:00 2001 From: Thomas Graf Date: Fri, 4 Aug 2006 23:03:05 -0700 Subject: [NETLINK]: Extend netlink messaging interface Adds: nlmsg_get_pos() return current position in message nlmsg_trim() trim part of message nla_reserve_nohdr(skb, len) reserve room for an attribute w/o hdr nla_put_nohdr(skb, len, data) add attribute w/o hdr nla_find_nested() find attribute in nested attributes Fixes nlmsg_new() to take allocation flags and consider size. Signed-off-by: Thomas Graf Signed-off-by: David S. Miller --- kernel/taskstats.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/taskstats.c b/kernel/taskstats.c index e78187657330..2ed4040d0dc5 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -75,7 +75,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, /* * If new attributes are added, please revisit this allocation */ - skb = nlmsg_new(size); + skb = nlmsg_new(size, GFP_KERNEL); if (!skb) return -ENOMEM; -- cgit v1.2.3 From 1cc5f7142eca352109895fe20b1fc6405dd17727 Mon Sep 17 00:00:00 2001 From: Ed Swierk Date: Mon, 25 Sep 2006 16:25:36 -0700 Subject: [PATCH] load_module: no BUG if module_subsys uninitialized Invoking load_module() before param_sysfs_init() is called crashes in mod_sysfs_setup(), since the kset in module_subsys is not initialized yet. In my case, net-pf-1 is getting modprobed as a result of hotplug trying to create a UNIX socket. Calls to hotplug begin after the topology_init initcall. Another patch for the same symptom (module_subsys-initialize-earlier.patch) moves param_sysfs_init() to the subsys initcalls, but this is still not early enough in the boot process in some cases. In particular, topology_init() causes /sbin/hotplug to run, which requests net-pf-1 (the UNIX socket protocol) which can be compiled as a module. Moving param_sysfs_init() to the postcore initcalls fixes this particular race, but there might well be other cases where a usermodehelper causes a module to load earlier still. The patch makes load_module() return an error rather than crashing the kernel if invoked before module_subsys is initialized. Cc: Mark Huang Cc: Greg KH Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/module.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 2a19cd47c046..b7fe6e840963 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1054,6 +1054,12 @@ static int mod_sysfs_setup(struct module *mod, { int err; + if (!module_subsys.kset.subsys) { + printk(KERN_ERR "%s: module_subsys not initialized\n", + mod->name); + err = -EINVAL; + goto out; + } memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); err = kobject_set_name(&mod->mkobj.kobj, "%s", mod->name); if (err) -- cgit v1.2.3 From 7c8265f51073bc8632a99de78d5fd19117ed78b7 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 24 Jun 2006 14:50:29 -0700 Subject: Suspend infrastructure cleanup and extension Allow devices to participate in the suspend process more intimately, in particular, allow the final phase (with interrupts disabled) to also be open to normal devices, not just system devices. Also, allow classes to participate in device suspend. Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/power/main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/power/main.c b/kernel/power/main.c index 6d295c776794..0c3ed6ac938e 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -57,6 +57,10 @@ static int suspend_prepare(suspend_state_t state) if (!pm_ops || !pm_ops->enter) return -EPERM; + error = device_prepare_suspend(PMSG_SUSPEND); + if (error) + return error; + pm_prepare_console(); disable_nonboot_cpus(); -- cgit v1.2.3 From f1cc0a894c963923b766eb2d455747495e6e982d Mon Sep 17 00:00:00 2001 From: David Brownell Date: Mon, 14 Aug 2006 23:11:08 -0700 Subject: PM: issue PM_EVENT_PRETHAW This patch is the first of this series that should actually change any behavior ... by issuing the new event, now tha the rest of the kernel is prepared to receive it. This converts the PM core to issue the new PRETHAW message, which the rest of the kernel is now ready to receive. Signed-off-by: David Brownell Cc: "Rafael J. Wysocki" Cc: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- kernel/power/disk.c | 4 ++-- kernel/power/swsusp.c | 9 ++++++++- kernel/power/user.c | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/power/disk.c b/kernel/power/disk.c index e13e74067845..a3c34fb14321 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -98,7 +98,7 @@ static void unprepare_processes(void) } /** - * pm_suspend_disk - The granpappy of power management. + * pm_suspend_disk - The granpappy of hibernation power management. * * If we're going through the firmware, then get it over with quickly. * @@ -207,7 +207,7 @@ static int software_resume(void) pr_debug("PM: Preparing devices for restore.\n"); - if ((error = device_suspend(PMSG_FREEZE))) { + if ((error = device_suspend(PMSG_PRETHAW))) { printk("Some devices failed to suspend\n"); swsusp_free(); goto Thaw; diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 17f669c83012..62752899b1a1 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -248,6 +248,9 @@ int swsusp_suspend(void) restore_processor_state(); Restore_highmem: restore_highmem(); + /* NOTE: device_power_up() is just a resume() for devices + * that suspended with irqs off ... no overall powerup. + */ device_power_up(); Enable_irqs: local_irq_enable(); @@ -257,8 +260,12 @@ Enable_irqs: int swsusp_resume(void) { int error; + local_irq_disable(); - if (device_power_down(PMSG_FREEZE)) + /* NOTE: device_power_down() is just a suspend() with irqs off; + * it has no special "power things down" semantics + */ + if (device_power_down(PMSG_PRETHAW)) printk(KERN_ERR "Some devices failed to power down, very bad\n"); /* We'll ignore saved state, but this gets preempt count (etc) right */ save_processor_state(); diff --git a/kernel/power/user.c b/kernel/power/user.c index 3f1539fbe48a..5a8d060d7909 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -191,7 +191,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, } down(&pm_sem); pm_prepare_console(); - error = device_suspend(PMSG_FREEZE); + error = device_suspend(PMSG_PRETHAW); if (!error) { error = swsusp_resume(); device_resume(); -- cgit v1.2.3 From 2bca293e56b6a8cd16bb6e70a09b2adac9c723b5 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 30 Aug 2006 13:54:36 -0700 Subject: PM: add kconfig option for deprecated .../power/state files Add a new PM_SYSFS_DEPRECATED config option to control whether or not the /sys/devices/.../power/state files are provided. This will make it easier to get rid of that mechanism when the time comes, and to verify that userspace tools work right without it. Signed-off-by: David Brownell Acked-by: Pavel Machek Signed-off-by: Greg Kroah-Hartman --- kernel/power/Kconfig | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 619ecabf7c58..1ed972070d19 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -53,6 +53,17 @@ config PM_TRACE CAUTION: this option will cause your machine's real-time clock to be set to an invalid time after a resume. +config PM_SYSFS_DEPRECATED + bool "Driver model /sys/devices/.../power/state files (DEPRECATED)" + depends on PM && SYSFS + default n + help + The driver model started out with a sysfs file intended to provide + a userspace hook for device power management. This feature has never + worked very well, except for limited testing purposes, and so it will + be removed. It's not clear that a generic mechanism could really + handle the wide variability of device power states; any replacements + are likely to be bus or driver specific. config SOFTWARE_SUSPEND bool "Software Suspend" -- cgit v1.2.3 From 1d3a82af45428c5e8deaa119cdeb79611ae46371 Mon Sep 17 00:00:00 2001 From: David Brownell Date: Wed, 30 Aug 2006 14:09:47 -0700 Subject: PM: no suspend_prepare() phase Remove the new suspend_prepare() phase. It doesn't seem very usable, has never been tested, doesn't address fault cleanup, and would need a sibling resume_complete(); plus there are no real use cases. It could be restored later if those issues get resolved. Signed-off-by: David Brownell Cc: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- kernel/power/main.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'kernel') diff --git a/kernel/power/main.c b/kernel/power/main.c index 0c3ed6ac938e..6d295c776794 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -57,10 +57,6 @@ static int suspend_prepare(suspend_state_t state) if (!pm_ops || !pm_ops->enter) return -EPERM; - error = device_prepare_suspend(PMSG_SUSPEND); - if (error) - return error; - pm_prepare_console(); disable_nonboot_cpus(); -- cgit v1.2.3 From 2fbe7b25c8edaf2d10e6c1a4cc9f8afe714c4764 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 26 Sep 2006 10:52:27 +0200 Subject: [PATCH] i386/x86-64: Remove un/set_nmi_callback and reserve/release_lapic_nmi functions Removes the un/set_nmi_callback and reserve/release_lapic_nmi functions as they are no longer needed. The various subsystems are modified to register with the die_notifier instead. Also includes compile fixes by Andrew Morton. Signed-off-by: Don Zickus Signed-off-by: Andi Kleen --- kernel/sysctl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 362a0cc37138..83f168361624 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -76,8 +76,6 @@ extern int compat_log; #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) int unknown_nmi_panic; -extern int proc_unknown_nmi_panic(ctl_table *, int, struct file *, - void __user *, size_t *, loff_t *); #endif /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ @@ -628,7 +626,7 @@ static ctl_table kern_table[] = { .data = &unknown_nmi_panic, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_unknown_nmi_panic, + .proc_handler = &proc_dointvec, }, #endif #if defined(CONFIG_X86) -- cgit v1.2.3 From 407984f1af259b31957c7c05075a454a751bb801 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 26 Sep 2006 10:52:27 +0200 Subject: [PATCH] x86: Add abilty to enable/disable nmi watchdog with sysctl Adds a new /proc/sys/kernel/nmi call that will enable/disable the nmi watchdog. Signed-off-by: Don Zickus Signed-off-by: Andi Kleen --- kernel/sysctl.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 83f168361624..040de6bd74dd 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -76,6 +76,9 @@ extern int compat_log; #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) int unknown_nmi_panic; +int nmi_watchdog_enabled; +extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, + void __user *, size_t *, loff_t *); #endif /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ @@ -628,6 +631,14 @@ static ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, + { + .ctl_name = KERN_NMI_WATCHDOG, + .procname = "nmi_watchdog", + .data = &nmi_watchdog_enabled, + .maxlen = sizeof (int), + .mode = 0644, + .proc_handler = &proc_nmi_enabled, + }, #endif #if defined(CONFIG_X86) { -- cgit v1.2.3 From 8da5adda91df3d2fcc5300e68da491694c9af019 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 26 Sep 2006 10:52:27 +0200 Subject: [PATCH] x86: Allow users to force a panic on NMI To quote Alan Cox: The default Linux behaviour on an NMI of either memory or unknown is to continue operation. For many environments such as scientific computing it is preferable that the box is taken out and the error dealt with than an uncorrected parity/ECC error get propogated. A small number of systems do generate NMI's for bizarre random reasons such as power management so the default is unchanged. In other respects the new proc/sys entry works like the existing panic controls already in that directory. This is separate to the edac support - EDAC allows supported chipsets to handle ECC errors well, this change allows unsupported cases to at least panic rather than cause problems further down the line. Signed-off-by: Don Zickus Signed-off-by: Andi Kleen --- kernel/panic.c | 1 + kernel/sysctl.c | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 8010b9b17aca..d2db3e2209e0 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -21,6 +21,7 @@ #include int panic_on_oops; +int panic_on_unrecovered_nmi; int tainted; static int pause_on_oops; static int pause_on_oops_flag; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 040de6bd74dd..220e20564124 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -641,6 +641,14 @@ static ctl_table kern_table[] = { }, #endif #if defined(CONFIG_X86) + { + .ctl_name = KERN_PANIC_ON_NMI, + .procname = "panic_on_unrecovered_nmi", + .data = &panic_on_unrecovered_nmi, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, { .ctl_name = KERN_BOOTLOADER_TYPE, .procname = "bootloader_type", -- cgit v1.2.3 From 3cfc348bf90ffaa777c188652aa297f04eb94de8 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 26 Sep 2006 10:52:28 +0200 Subject: [PATCH] x86: Add portable getcpu call For NUMA optimization and some other algorithms it is useful to have a fast to get the current CPU and node numbers in user space. x86-64 added a fast way to do this in a vsyscall. This adds a generic syscall for other architectures to make it a generic portable facility. I expect some of them will also implement it as a faster vsyscall. The cache is an optimization for the x86-64 vsyscall optimization. Since what the syscall returns is an approximation anyways and user space often wants very fast results it can be cached for some time. The norma methods to get this information in user space are relatively slow The vsyscall is in a better position to manage the cache because it has direct access to a fast time stamp (jiffies). For the generic syscall optimization it doesn't help much, but enforce a valid argument to keep programs portable I only added an i386 syscall entry for now. Other architectures can follow as needed. AK: Also added some cleanups from Andrew Morton Signed-off-by: Andi Kleen --- kernel/sys.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index e236f98f7ec5..3f894775488d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -2062,3 +2063,33 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, } return error; } + +asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, + struct getcpu_cache __user *cache) +{ + int err = 0; + int cpu = raw_smp_processor_id(); + if (cpup) + err |= put_user(cpu, cpup); + if (nodep) + err |= put_user(cpu_to_node(cpu), nodep); + if (cache) { + /* + * The cache is not needed for this implementation, + * but make sure user programs pass something + * valid. vsyscall implementations can instead make + * good use of the cache. Only use t0 and t1 because + * these are available in both 32bit and 64bit ABI (no + * need for a compat_getcpu). 32bit has enough + * padding + */ + unsigned long t0, t1; + get_user(t0, &cache->t0); + get_user(t1, &cache->t1); + t0++; + t1++; + put_user(t0, &cache->t0); + put_user(t1, &cache->t1); + } + return err ? -EFAULT : 0; +} -- cgit v1.2.3 From 0cb91a2293648507886563ccb91979cfc94d6a4b Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 26 Sep 2006 10:52:28 +0200 Subject: [PATCH] i386: Account spinlocks to the caller during profiling for !FP kernels This ports the algorithm from x86-64 (with improvements) to i386. Previously this only worked for frame pointer enabled kernels. But spinlocks have a very simple stack frame that can be manually analyzed. Do this. Signed-off-by: Andi Kleen --- kernel/spinlock.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/spinlock.c b/kernel/spinlock.c index fb524b009eef..9644a41e0bef 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -7,6 +7,11 @@ * * This file contains the spinlock/rwlock implementations for the * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) + * + * Note that some architectures have special knowledge about the + * stack frames of these functions in their profile_pc. If you + * change anything significant here that could change the stack + * frame contact the architecture maintainers. */ #include -- cgit v1.2.3 From 5a1b3999d6cb7ab87f1f3b1700bc91839fd6fa29 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 26 Sep 2006 10:52:34 +0200 Subject: [PATCH] x86: Some preparationary cleanup for stack trace - Remove unused all_contexts parameter No caller used it - Move skip argument into the structure (needed for followon patches) Cc: mingo@elte.hu Signed-off-by: Andi Kleen --- kernel/lockdep.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 9bad17884513..900b4cb1a024 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -224,7 +224,10 @@ static int save_trace(struct stack_trace *trace) trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; trace->entries = stack_trace + nr_stack_trace_entries; - save_stack_trace(trace, NULL, 0, 3); + trace->skip = 3; + trace->all_contexts = 0; + + save_stack_trace(trace, NULL); trace->max_entries = trace->nr_entries; -- cgit v1.2.3 From 3fa7c794fe4dc127f7fac3fad4d13628e68f89ce Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 26 Sep 2006 10:52:34 +0200 Subject: [PATCH] Avoid recursion in lockdep when stack tracer takes locks The new dwarf2 unwinder needs to take locks to do backtraces inside modules. This patch makes sure lockdep which calls stacktrace is not reentered. Thanks to Ingo for suggesting this simpler approach. Cc: mingo@elte.hu Signed-off-by: Andi Kleen --- kernel/lockdep.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 900b4cb1a024..c088e5542e84 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -227,7 +227,11 @@ static int save_trace(struct stack_trace *trace) trace->skip = 3; trace->all_contexts = 0; + /* Make sure to not recurse in case the the unwinder needs to tak +e locks. */ + lockdep_off(); save_stack_trace(trace, NULL); + lockdep_on(); trace->max_entries = trace->nr_entries; -- cgit v1.2.3 From 0a4254058037eb172758961d0a5b94f4320a1425 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Tue, 26 Sep 2006 10:52:38 +0200 Subject: [PATCH] Add the canary field to the PDA area and the task struct This patch adds the per thread cookie field to the task struct and the PDA. Also it makes sure that the PDA value gets the new cookie value at context switch, and that a new task gets a new cookie at task creation time. Signed-off-by: Arjan van Ven Signed-off-by: Ingo Molnar Signed-off-by: Andi Kleen CC: Andi Kleen --- kernel/fork.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index f9b014e3e700..a0dad84567c9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include @@ -175,6 +176,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) tsk->thread_info = ti; setup_thread_stack(tsk, orig); +#ifdef CONFIG_CC_STACKPROTECTOR + tsk->stack_canary = get_random_int(); +#endif + /* One for us, one for whoever does the "release_task()" (usually parent) */ atomic_set(&tsk->usage,2); atomic_set(&tsk->fs_excl, 0); -- cgit v1.2.3 From 3162f751d04086a9d006342de63ac8f44fe0f72a Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Tue, 26 Sep 2006 10:52:39 +0200 Subject: [PATCH] Add the __stack_chk_fail() function GCC emits a call to a __stack_chk_fail() function when the stack canary is not matching the expected value. Since this is a bad security issue; lets panic the kernel rather than limping along; the kernel really can't be trusted anymore when this happens. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Andi Kleen CC: Andi Kleen --- kernel/panic.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index d2db3e2209e0..6ceb664fb52a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -271,3 +271,15 @@ void oops_exit(void) { do_oops_enter_exit(); } + +#ifdef CONFIG_CC_STACKPROTECTOR +/* + * Called when gcc's -fstack-protector feature is used, and + * gcc detects corruption of the on-stack canary value + */ +void __stack_chk_fail(void) +{ + panic("stack-protector: Kernel stack is corrupted"); +} +EXPORT_SYMBOL(__stack_chk_fail); +#endif -- cgit v1.2.3 From adf1423698f00d00b267f7dca8231340ce7d65ef Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 26 Sep 2006 10:52:41 +0200 Subject: [PATCH] i386/x86-64: Work around gcc bug with noreturn functions in unwinder Current gcc generates calls not jumps to noreturn functions. When that happens the return address can point to the next function, which confuses the unwinder. This patch works around it by marking asynchronous exception frames in contrast normal call frames in the unwind information. Then teach the unwinder to decode this. For normal call frames the unwinder now subtracts one from the address which avoids this problem. The standard libgcc unwinder uses the same trick. It doesn't include adjustment of the printed address (i.e. for the original example, it'd still be kernel_math_error+0 that gets displayed, but the unwinder wouldn't get confused anymore. This only works with binutils 2.6.17+ and some versions of H.J.Lu's 2.6.16 unfortunately because earlier binutils don't support .cfi_signal_frame [AK: added automatic detection of the new binutils and wrote description] Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen --- kernel/unwind.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/unwind.c b/kernel/unwind.c index f69c804c8e62..3430475fcd88 100644 --- a/kernel/unwind.c +++ b/kernel/unwind.c @@ -603,6 +603,7 @@ int unwind(struct unwind_frame_info *frame) #define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs]) const u32 *fde = NULL, *cie = NULL; const u8 *ptr = NULL, *end = NULL; + unsigned long pc = UNW_PC(frame) - frame->call_frame; unsigned long startLoc = 0, endLoc = 0, cfa; unsigned i; signed ptrType = -1; @@ -612,7 +613,7 @@ int unwind(struct unwind_frame_info *frame) if (UNW_PC(frame) == 0) return -EINVAL; - if ((table = find_table(UNW_PC(frame))) != NULL + if ((table = find_table(pc)) != NULL && !(table->size & (sizeof(*fde) - 1))) { unsigned long tableSize = table->size; @@ -647,7 +648,7 @@ int unwind(struct unwind_frame_info *frame) ptrType & DW_EH_PE_indirect ? ptrType : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); - if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc) + if (pc >= startLoc && pc < endLoc) break; cie = NULL; } @@ -657,16 +658,28 @@ int unwind(struct unwind_frame_info *frame) state.cieEnd = ptr; /* keep here temporarily */ ptr = (const u8 *)(cie + 2); end = (const u8 *)(cie + 1) + *cie; + frame->call_frame = 1; if ((state.version = *ptr) != 1) cie = NULL; /* unsupported version */ else if (*++ptr) { /* check if augmentation size is first (and thus present) */ if (*ptr == 'z') { - /* check for ignorable (or already handled) - * nul-terminated augmentation string */ - while (++ptr < end && *ptr) - if (strchr("LPR", *ptr) == NULL) + while (++ptr < end && *ptr) { + switch(*ptr) { + /* check for ignorable (or already handled) + * nul-terminated augmentation string */ + case 'L': + case 'P': + case 'R': + continue; + case 'S': + frame->call_frame = 0; + continue; + default: break; + } + break; + } } if (ptr >= end || *ptr) cie = NULL; @@ -755,7 +768,7 @@ int unwind(struct unwind_frame_info *frame) state.org = startLoc; memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); /* process instructions */ - if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state) + if (!processCFI(ptr, end, pc, ptrType, &state) || state.loc > endLoc || state.regs[retAddrReg].where == Nowhere || state.cfa.reg >= ARRAY_SIZE(reg_info) @@ -763,6 +776,11 @@ int unwind(struct unwind_frame_info *frame) || state.cfa.offs % sizeof(unsigned long)) return -EIO; /* update frame */ +#ifndef CONFIG_AS_CFI_SIGNAL_FRAME + if(frame->call_frame + && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign)) + frame->call_frame = 0; +#endif cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs; startLoc = min((unsigned long)UNW_SP(frame), cfa); endLoc = max((unsigned long)UNW_SP(frame), cfa); @@ -866,6 +884,7 @@ int unwind_init_frame_info(struct unwind_frame_info *info, /*const*/ struct pt_regs *regs) { info->task = tsk; + info->call_frame = 0; arch_unw_init_frame_info(info, regs); return 0; @@ -879,6 +898,7 @@ int unwind_init_blocked(struct unwind_frame_info *info, struct task_struct *tsk) { info->task = tsk; + info->call_frame = 0; arch_unw_init_blocked(info); return 0; @@ -894,6 +914,7 @@ int unwind_init_running(struct unwind_frame_info *info, void *arg) { info->task = current; + info->call_frame = 0; return arch_unwind_init_running(info, callback, arg); } -- cgit v1.2.3 From 0a2966b48fb784e437520e400ddc94874ddbd4e8 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:30:51 -0700 Subject: [PATCH] Fix longstanding load balancing bug in the scheduler The scheduler will stop load balancing if the most busy processor contains processes pinned via processor affinity. The scheduler currently only does one search for busiest cpu. If it cannot pull any tasks away from the busiest cpu because they were pinned then the scheduler goes into a corner and sulks leaving the idle processors idle. F.e. If you have processor 0 busy running four tasks pinned via taskset, there are none on processor 1 and one just started two processes on processor 2 then the scheduler will not move one of the two processes away from processor 2. This patch fixes that issue by forcing the scheduler to come out of its corner and retrying the load balancing by considering other processors for load balancing. This patch was originally developed by John Hawkes and discussed at http://marc.theaimsgroup.com/?l=linux-kernel&m=113901368523205&w=2. I have removed extraneous material and gone back to equipping struct rq with the cpu the queue is associated with since this makes the patch much easier and it is likely that others in the future will have the same difficulty of figuring out which processor owns which runqueue. The overhead added through these patches is a single word on the stack if the kernel is configured to support 32 cpus or less (32 bit). For 32 bit environments the maximum number of cpus that can be configued is 255 which would result in the use of 32 bytes additional on the stack. On IA64 up to 1k cpus can be configured which will result in the use of 128 additional bytes on the stack. The maximum additional cache footprint is one cacheline. Typically memory use will be much less than a cacheline and the additional cpumask will be placed on the stack in a cacheline that already contains other local variable. Signed-off-by: Christoph Lameter Cc: John Hawkes Cc: "Siddha, Suresh B" Cc: Ingo Molnar Cc: Nick Piggin Cc: Peter Williams Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 46 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index a234fbee1238..5c848fd4e461 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -238,6 +238,7 @@ struct rq { /* For active balancing */ int active_balance; int push_cpu; + int cpu; /* cpu of this runqueue */ struct task_struct *migration_thread; struct list_head migration_queue; @@ -267,6 +268,15 @@ struct rq { static DEFINE_PER_CPU(struct rq, runqueues); +static inline int cpu_of(struct rq *rq) +{ +#ifdef CONFIG_SMP + return rq->cpu; +#else + return 0; +#endif +} + /* * The domain tree (rq->sd) is protected by RCU's quiescent state transition. * See detach_destroy_domains: synchronize_sched for details. @@ -2211,7 +2221,8 @@ out: */ static struct sched_group * find_busiest_group(struct sched_domain *sd, int this_cpu, - unsigned long *imbalance, enum idle_type idle, int *sd_idle) + unsigned long *imbalance, enum idle_type idle, int *sd_idle, + cpumask_t *cpus) { struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; unsigned long max_load, avg_load, total_load, this_load, total_pwr; @@ -2248,7 +2259,12 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, sum_weighted_load = sum_nr_running = avg_load = 0; for_each_cpu_mask(i, group->cpumask) { - struct rq *rq = cpu_rq(i); + struct rq *rq; + + if (!cpu_isset(i, *cpus)) + continue; + + rq = cpu_rq(i); if (*sd_idle && !idle_cpu(i)) *sd_idle = 0; @@ -2466,13 +2482,17 @@ ret: */ static struct rq * find_busiest_queue(struct sched_group *group, enum idle_type idle, - unsigned long imbalance) + unsigned long imbalance, cpumask_t *cpus) { struct rq *busiest = NULL, *rq; unsigned long max_load = 0; int i; for_each_cpu_mask(i, group->cpumask) { + + if (!cpu_isset(i, *cpus)) + continue; + rq = cpu_rq(i); if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance) @@ -2511,6 +2531,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, struct sched_group *group; unsigned long imbalance; struct rq *busiest; + cpumask_t cpus = CPU_MASK_ALL; if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) @@ -2518,13 +2539,15 @@ static int load_balance(int this_cpu, struct rq *this_rq, schedstat_inc(sd, lb_cnt[idle]); - group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle); +redo: + group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle, + &cpus); if (!group) { schedstat_inc(sd, lb_nobusyg[idle]); goto out_balanced; } - busiest = find_busiest_queue(group, idle, imbalance); + busiest = find_busiest_queue(group, idle, imbalance, &cpus); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; @@ -2549,8 +2572,12 @@ static int load_balance(int this_cpu, struct rq *this_rq, double_rq_unlock(this_rq, busiest); /* All tasks on this runqueue were pinned by CPU affinity */ - if (unlikely(all_pinned)) + if (unlikely(all_pinned)) { + cpu_clear(cpu_of(busiest), cpus); + if (!cpus_empty(cpus)) + goto redo; goto out_balanced; + } } if (!nr_moved) { @@ -2639,18 +2666,22 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) unsigned long imbalance; int nr_moved = 0; int sd_idle = 0; + cpumask_t cpus = CPU_MASK_ALL; if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings) sd_idle = 1; schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); - group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, &sd_idle); +redo: + group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, + &sd_idle, &cpus); if (!group) { schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); goto out_balanced; } - busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance); + busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance, + &cpus); if (!busiest) { schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); goto out_balanced; @@ -2668,6 +2699,12 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) minus_1_or_zero(busiest->nr_running), imbalance, sd, NEWLY_IDLE, NULL); spin_unlock(&busiest->lock); + + if (!nr_moved) { + cpu_clear(cpu_of(busiest), cpus); + if (!cpus_empty(cpus)) + goto redo; + } } if (!nr_moved) { @@ -6747,6 +6784,7 @@ void __init sched_init(void) rq->cpu_load[j] = 0; rq->active_balance = 0; rq->push_cpu = 0; + rq->cpu = i; rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); #endif -- cgit v1.2.3 From 9b819d204cf602eab1a53a9ec4b8d2ca51e02a1d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:40 -0700 Subject: [PATCH] Add __GFP_THISNODE to avoid fallback to other nodes and ignore cpuset/memory policy restrictions Add a new gfp flag __GFP_THISNODE to avoid fallback to other nodes. This flag is essential if a kernel component requires memory to be located on a certain node. It will be needed for alloc_pages_node() to force allocation on the indicated node and for alloc_pages() to force allocation on the current node. Signed-off-by: Christoph Lameter Cc: Andy Whitcroft Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4ea6f0dc2fc5..76940361273e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2316,7 +2316,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) const struct cpuset *cs; /* current cpuset ancestors */ int allowed; /* is allocation in zone z allowed? */ - if (in_interrupt()) + if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; node = z->zone_pgdat->node_id; might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); -- cgit v1.2.3 From fbd98167e653535c5816be154f2149c0efa7757d Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:45 -0700 Subject: [PATCH] Profiling: require buffer allocation on the correct node Profiling really suffers with off node buffers. Fail if no memory is available on the nodes. The profiling code can deal with these failures should they occur. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/profile.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/profile.c b/kernel/profile.c index d5bd75e7501c..fb660c7d35ba 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -309,13 +309,17 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, node = cpu_to_node(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { - page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + page = alloc_pages_node(node, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + 0); if (!page) return NOTIFY_BAD; per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); } if (!per_cpu(cpu_profile_hits, cpu)[0]) { - page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + page = alloc_pages_node(node, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + 0); if (!page) goto out_free; per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); @@ -491,12 +495,16 @@ static int __init create_hash_tables(void) int node = cpu_to_node(cpu); struct page *page; - page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + page = alloc_pages_node(node, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + 0); if (!page) goto out_cleanup; per_cpu(cpu_profile_hits, cpu)[1] = (struct profile_hit *)page_address(page); - page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + page = alloc_pages_node(node, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + 0); if (!page) goto out_cleanup; per_cpu(cpu_profile_hits, cpu)[0] -- cgit v1.2.3 From 0ff38490c836dc379ff7ec45b10a15a662f4e5f6 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:52 -0700 Subject: [PATCH] zone_reclaim: dynamic slab reclaim Currently one can enable slab reclaim by setting an explicit option in /proc/sys/vm/zone_reclaim_mode. Slab reclaim is then used as a final option if the freeing of unmapped file backed pages is not enough to free enough pages to allow a local allocation. However, that means that the slab can grow excessively and that most memory of a node may be used by slabs. We have had a case where a machine with 46GB of memory was using 40-42GB for slab. Zone reclaim was effective in dealing with pagecache pages. However, slab reclaim was only done during global reclaim (which is a bit rare on NUMA systems). This patch implements slab reclaim during zone reclaim. Zone reclaim occurs if there is a danger of an off node allocation. At that point we 1. Shrink the per node page cache if the number of pagecache pages is more than min_unmapped_ratio percent of pages in a zone. 2. Shrink the slab cache if the number of the nodes reclaimable slab pages (patch depends on earlier one that implements that counter) are more than min_slab_ratio (a new /proc/sys/vm tunable). The shrinking of the slab cache is a bit problematic since it is not node specific. So we simply calculate what point in the slab we want to reach (current per node slab use minus the number of pages that neeed to be allocated) and then repeately run the global reclaim until that is unsuccessful or we have reached the limit. I hope we will have zone based slab reclaim at some point which will make that easier. The default for the min_slab_ratio is 5% Also remove the slab option from /proc/sys/vm/zone_reclaim_mode. [akpm@osdl.org: cleanups] Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 362a0cc37138..fd43c3e6786b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -943,6 +943,17 @@ static ctl_table vm_table[] = { .extra1 = &zero, .extra2 = &one_hundred, }, + { + .ctl_name = VM_MIN_SLAB, + .procname = "min_slab_ratio", + .data = &sysctl_min_slab_ratio, + .maxlen = sizeof(sysctl_min_slab_ratio), + .mode = 0644, + .proc_handler = &sysctl_min_slab_ratio_sysctl_handler, + .strategy = &sysctl_intvec, + .extra1 = &zero, + .extra2 = &one_hundred, + }, #endif #ifdef CONFIG_X86_32 { -- cgit v1.2.3 From 89fa30242facca249aead2aac03c4c69764f911c Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:55 -0700 Subject: [PATCH] NUMA: Add zone_to_nid function There are many places where we need to determine the node of a zone. Currently we use a difficult to read sequence of pointer dereferencing. Put that into an inline function and use throughout VM. Maybe we can find a way to optimize the lookup in the future. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 76940361273e..cff41511269f 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2245,7 +2245,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) int i; for (i = 0; zl->zones[i]; i++) { - int nid = zl->zones[i]->zone_pgdat->node_id; + int nid = zone_to_nid(zl->zones[i]); if (node_isset(nid, current->mems_allowed)) return 1; @@ -2318,7 +2318,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) return 1; - node = z->zone_pgdat->node_id; + node = zone_to_nid(z); might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); if (node_isset(node, current->mems_allowed)) return 1; -- cgit v1.2.3 From 62bac0185ad3dfef11d9602980445c54d45199c6 Mon Sep 17 00:00:00 2001 From: Stephen Smalley Date: Mon, 25 Sep 2006 23:31:56 -0700 Subject: [PATCH] selinux: eliminate selinux_task_ctxid Eliminate selinux_task_ctxid since it duplicates selinux_task_get_sid. Signed-off-by: Stephen Smalley Acked-by: James Morris Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/auditsc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 1bd8827a0102..331e17010393 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -385,7 +385,7 @@ static int audit_filter_rules(struct task_struct *tsk, logged upon error */ if (f->se_rule) { if (need_sid) { - selinux_task_ctxid(tsk, &sid); + selinux_get_task_sid(tsk, &sid); need_sid = 0; } result = selinux_audit_rule_match(sid, f->type, -- cgit v1.2.3 From 1a70cd40cb291c25b67ec0da715a49d76719329d Mon Sep 17 00:00:00 2001 From: Stephen Smalley Date: Mon, 25 Sep 2006 23:31:57 -0700 Subject: [PATCH] selinux: rename selinux_ctxid_to_string Rename selinux_ctxid_to_string to selinux_sid_to_string to be consistent with other interfaces. Signed-off-by: Stephen Smalley Acked-by: James Morris Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/audit.c | 14 +++++++------- kernel/auditfilter.c | 2 +- kernel/auditsc.c | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 963fd15c9621..f9889ee77825 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -244,7 +244,7 @@ static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sid) char *ctx = NULL; u32 len; int rc; - if ((rc = selinux_ctxid_to_string(sid, &ctx, &len))) + if ((rc = selinux_sid_to_string(sid, &ctx, &len))) return rc; else audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, @@ -267,7 +267,7 @@ static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sid) char *ctx = NULL; u32 len; int rc; - if ((rc = selinux_ctxid_to_string(sid, &ctx, &len))) + if ((rc = selinux_sid_to_string(sid, &ctx, &len))) return rc; else audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, @@ -293,7 +293,7 @@ static int audit_set_enabled(int state, uid_t loginuid, u32 sid) char *ctx = NULL; u32 len; int rc; - if ((rc = selinux_ctxid_to_string(sid, &ctx, &len))) + if ((rc = selinux_sid_to_string(sid, &ctx, &len))) return rc; else audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, @@ -321,7 +321,7 @@ static int audit_set_failure(int state, uid_t loginuid, u32 sid) char *ctx = NULL; u32 len; int rc; - if ((rc = selinux_ctxid_to_string(sid, &ctx, &len))) + if ((rc = selinux_sid_to_string(sid, &ctx, &len))) return rc; else audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, @@ -538,7 +538,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (status_get->mask & AUDIT_STATUS_PID) { int old = audit_pid; if (sid) { - if ((err = selinux_ctxid_to_string( + if ((err = selinux_sid_to_string( sid, &ctx, &len))) return err; else @@ -576,7 +576,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) "user pid=%d uid=%u auid=%u", pid, uid, loginuid); if (sid) { - if (selinux_ctxid_to_string( + if (selinux_sid_to_string( sid, &ctx, &len)) { audit_log_format(ab, " ssid=%u", sid); @@ -614,7 +614,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) loginuid, sid); break; case AUDIT_SIGNAL_INFO: - err = selinux_ctxid_to_string(audit_sig_sid, &ctx, &len); + err = selinux_sid_to_string(audit_sig_sid, &ctx, &len); if (err) return err; sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL); diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index a44879b0c72f..1a58a81fb09d 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -1398,7 +1398,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sid, char *action, if (sid) { char *ctx = NULL; u32 len; - if (selinux_ctxid_to_string(sid, &ctx, &len)) + if (selinux_sid_to_string(sid, &ctx, &len)) audit_log_format(ab, " ssid=%u", sid); else audit_log_format(ab, " subj=%s", ctx); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 331e17010393..fb83c5cb8c32 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -898,7 +898,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts if (axi->osid != 0) { char *ctx = NULL; u32 len; - if (selinux_ctxid_to_string( + if (selinux_sid_to_string( axi->osid, &ctx, &len)) { audit_log_format(ab, " osid=%u", axi->osid); @@ -1005,7 +1005,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts if (n->osid != 0) { char *ctx = NULL; u32 len; - if (selinux_ctxid_to_string( + if (selinux_sid_to_string( n->osid, &ctx, &len)) { audit_log_format(ab, " osid=%u", n->osid); call_panic = 2; -- cgit v1.2.3 From af8c65b57aaa4ae321af34dbfc5ca7f5625263fe Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 25 Sep 2006 23:32:07 -0700 Subject: [PATCH] FRV: permit __do_IRQ() to be dispensed with Permit __do_IRQ() to be dispensed with based on a configuration option. Signed-off-by: David Howells Cc: Benjamin Herrenschmidt Cc: Thomas Gleixner Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/handle.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 48a53f68af96..4c6cdbaed661 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -154,6 +154,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, return retval; } +#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ /** * __do_IRQ - original all in one highlevel IRQ handler * @irq: the interrupt number @@ -253,6 +254,7 @@ out: return 1; } +#endif #ifdef CONFIG_TRACE_IRQFLAGS -- cgit v1.2.3 From 3a4f7577c9ef393ca80c783f02ffbc125de771c7 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 25 Sep 2006 23:32:41 -0700 Subject: [PATCH] swsusp: add write-speed instrumentation Add some instrumentation to the swsusp writeout code to show what bandwidth we're achieving. Cc: Pavel Machek Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/swap.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index f1dd146bd64d..79b66e734bdb 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -146,6 +146,26 @@ static void release_swap_writer(struct swap_map_handle *handle) handle->bitmap = NULL; } +static void show_speed(struct timeval *start, struct timeval *stop, + unsigned nr_pages, char *msg) +{ + s64 elapsed_centisecs64; + int centisecs; + int k; + int kps; + + elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start); + do_div(elapsed_centisecs64, NSEC_PER_SEC / 100); + centisecs = elapsed_centisecs64; + if (centisecs == 0) + centisecs = 1; /* avoid div-by-zero */ + k = nr_pages * (PAGE_SIZE / 1024); + kps = (k * 100) / centisecs; + printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k, + centisecs / 100, centisecs % 100, + kps / 1000, (kps % 1000) / 10); +} + static int get_swap_writer(struct swap_map_handle *handle) { handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); @@ -206,17 +226,21 @@ static int flush_swap_writer(struct swap_map_handle *handle) static int save_image(struct swap_map_handle *handle, struct snapshot_handle *snapshot, - unsigned int nr_pages) + unsigned int nr_to_write) { unsigned int m; int ret; int error = 0; + int nr_pages; + struct timeval start; + struct timeval stop; - printk("Saving image data pages (%u pages) ... ", nr_pages); - m = nr_pages / 100; + printk("Saving image data pages (%u pages) ... ", nr_to_write); + m = nr_to_write / 100; if (!m) m = 1; nr_pages = 0; + do_gettimeofday(&start); do { ret = snapshot_read_next(snapshot, PAGE_SIZE); if (ret > 0) { @@ -228,8 +252,10 @@ static int save_image(struct swap_map_handle *handle, nr_pages++; } } while (ret > 0); + do_gettimeofday(&stop); if (!error) printk("\b\b\b\bdone\n"); + show_speed(&start, &stop, nr_to_write, "Wrote"); return error; } -- cgit v1.2.3 From ab954160350c91c77ae03740ef90458c3ad5412c Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 25 Sep 2006 23:32:42 -0700 Subject: [PATCH] swsusp: write speedup Switch the swsusp writeout code from 4k-at-a-time to 4MB-at-a-time. Crufty old PIII testbox: 12.9 MB/s -> 20.9 MB/s Sony Vaio: 14.7 MB/s -> 26.5 MB/s The implementation is crude. A better one would use larger BIOs, but wouldn't gain any performance. The memcpys will be mostly pipelined with the IO and basically come for free. The ENOMEM path has not been tested. It should be. Cc: Pavel Machek Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/swap.c | 93 ++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 75 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 79b66e734bdb..2dc883d361d5 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -49,18 +49,16 @@ static int mark_swapfiles(swp_entry_t start) { int error; - rw_swap_page_sync(READ, - swp_entry(root_swap, 0), - virt_to_page((unsigned long)&swsusp_header)); + rw_swap_page_sync(READ, swp_entry(root_swap, 0), + virt_to_page((unsigned long)&swsusp_header), NULL); if (!memcmp("SWAP-SPACE",swsusp_header.sig, 10) || !memcmp("SWAPSPACE2",swsusp_header.sig, 10)) { memcpy(swsusp_header.orig_sig,swsusp_header.sig, 10); memcpy(swsusp_header.sig,SWSUSP_SIG, 10); swsusp_header.image = start; - error = rw_swap_page_sync(WRITE, - swp_entry(root_swap, 0), - virt_to_page((unsigned long) - &swsusp_header)); + error = rw_swap_page_sync(WRITE, swp_entry(root_swap, 0), + virt_to_page((unsigned long)&swsusp_header), + NULL); } else { pr_debug("swsusp: Partition is not swap space.\n"); error = -ENODEV; @@ -88,16 +86,37 @@ static int swsusp_swap_check(void) /* This is called before saving image */ * write_page - Write one page to given swap location. * @buf: Address we're writing. * @offset: Offset of the swap page we're writing to. + * @bio_chain: Link the next write BIO here */ -static int write_page(void *buf, unsigned long offset) +static int write_page(void *buf, unsigned long offset, struct bio **bio_chain) { swp_entry_t entry; int error = -ENOSPC; if (offset) { + struct page *page = virt_to_page(buf); + + if (bio_chain) { + /* + * Whether or not we successfully allocated a copy page, + * we take a ref on the page here. It gets undone in + * wait_on_bio_chain(). + */ + struct page *page_copy; + page_copy = alloc_page(GFP_ATOMIC); + if (page_copy == NULL) { + WARN_ON_ONCE(1); + bio_chain = NULL; /* Go synchronous */ + get_page(page); + } else { + memcpy(page_address(page_copy), + page_address(page), PAGE_SIZE); + page = page_copy; + } + } entry = swp_entry(root_swap, offset); - error = rw_swap_page_sync(WRITE, entry, virt_to_page(buf)); + error = rw_swap_page_sync(WRITE, entry, page, bio_chain); } return error; } @@ -185,37 +204,68 @@ static int get_swap_writer(struct swap_map_handle *handle) return 0; } -static int swap_write_page(struct swap_map_handle *handle, void *buf) +static int wait_on_bio_chain(struct bio **bio_chain) { - int error; + struct bio *bio; + struct bio *next_bio; + int ret = 0; + + if (bio_chain == NULL) + return 0; + + bio = *bio_chain; + while (bio) { + struct page *page; + + next_bio = bio->bi_private; + page = bio->bi_io_vec[0].bv_page; + wait_on_page_locked(page); + if (!PageUptodate(page) || PageError(page)) + ret = -EIO; + put_page(page); + bio_put(bio); + bio = next_bio; + } + *bio_chain = NULL; + return ret; +} + +static int swap_write_page(struct swap_map_handle *handle, void *buf, + struct bio **bio_chain) +{ + int error = 0; unsigned long offset; if (!handle->cur) return -EINVAL; offset = alloc_swap_page(root_swap, handle->bitmap); - error = write_page(buf, offset); + error = write_page(buf, offset, bio_chain); if (error) return error; handle->cur->entries[handle->k++] = offset; if (handle->k >= MAP_PAGE_ENTRIES) { + error = wait_on_bio_chain(bio_chain); + if (error) + goto out; offset = alloc_swap_page(root_swap, handle->bitmap); if (!offset) return -ENOSPC; handle->cur->next_swap = offset; - error = write_page(handle->cur, handle->cur_swap); + error = write_page(handle->cur, handle->cur_swap, NULL); if (error) - return error; + goto out; memset(handle->cur, 0, PAGE_SIZE); handle->cur_swap = offset; handle->k = 0; } - return 0; +out: + return error; } static int flush_swap_writer(struct swap_map_handle *handle) { if (handle->cur && handle->cur_swap) - return write_page(handle->cur, handle->cur_swap); + return write_page(handle->cur, handle->cur_swap, NULL); else return -EINVAL; } @@ -232,6 +282,8 @@ static int save_image(struct swap_map_handle *handle, int ret; int error = 0; int nr_pages; + int err2; + struct bio *bio; struct timeval start; struct timeval stop; @@ -240,11 +292,13 @@ static int save_image(struct swap_map_handle *handle, if (!m) m = 1; nr_pages = 0; + bio = NULL; do_gettimeofday(&start); do { ret = snapshot_read_next(snapshot, PAGE_SIZE); if (ret > 0) { - error = swap_write_page(handle, data_of(*snapshot)); + error = swap_write_page(handle, data_of(*snapshot), + &bio); if (error) break; if (!(nr_pages % m)) @@ -252,7 +306,10 @@ static int save_image(struct swap_map_handle *handle, nr_pages++; } } while (ret > 0); + err2 = wait_on_bio_chain(&bio); do_gettimeofday(&stop); + if (!error) + error = err2; if (!error) printk("\b\b\b\bdone\n"); show_speed(&start, &stop, nr_to_write, "Wrote"); @@ -307,7 +364,7 @@ int swsusp_write(void) error = get_swap_writer(&handle); if (!error) { unsigned long start = handle.cur_swap; - error = swap_write_page(&handle, header); + error = swap_write_page(&handle, header, NULL); if (!error) error = save_image(&handle, &snapshot, header->pages - 1); -- cgit v1.2.3 From 8c002494b55119a3fd1dddee83b4fb75cfda47e5 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 25 Sep 2006 23:32:43 -0700 Subject: [PATCH] swsusp: add read-speed instrumentation Add some instrumentation to the swsusp readin code to show what bandwidth we're achieving. Cc: Pavel Machek Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/swap.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 2dc883d361d5..9ab989572164 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -522,12 +522,15 @@ static int load_image(struct swap_map_handle *handle, unsigned int m; int ret; int error = 0; + struct timeval start; + struct timeval stop; printk("Loading image data pages (%u pages) ... ", nr_pages); m = nr_pages / 100; if (!m) m = 1; nr_pages = 0; + do_gettimeofday(&start); do { ret = snapshot_write_next(snapshot, PAGE_SIZE); if (ret > 0) { @@ -539,11 +542,13 @@ static int load_image(struct swap_map_handle *handle, nr_pages++; } } while (ret > 0); + do_gettimeofday(&stop); if (!error) { printk("\b\b\b\bdone\n"); if (!snapshot_image_loaded(snapshot)) error = -ENODATA; } + show_speed(&start, &stop, nr_pages, "Read"); return error; } -- cgit v1.2.3 From 546e0d271941dd1ff6961e2a1f7eac75f1fc277e Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 25 Sep 2006 23:32:44 -0700 Subject: [PATCH] swsusp: read speedup Implement async reads for swsusp resuming. Crufty old PIII testbox: 15.7 MB/s -> 20.3 MB/s Sony Vaio: 14.6 MB/s -> 33.3 MB/s I didn't implement the post-resume bio_set_pages_dirty(). I don't really understand why resume needs to run set_page_dirty() against these pages. It might be a worry that this code modifies PG_Uptodate, PG_Error and PG_Locked against the image pages. Can this possibly affect the resumed-into kernel? Hopefully not, if we're atomically restoring its mem_map? Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Jens Axboe Cc: Laurent Riffard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/power.h | 1 + kernel/power/snapshot.c | 11 ++-- kernel/power/swap.c | 138 ++++++++++++++++++++++++------------------------ 3 files changed, 79 insertions(+), 71 deletions(-) (limited to 'kernel') diff --git a/kernel/power/power.h b/kernel/power/power.h index 57a792982fb9..59ce712f082d 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -58,6 +58,7 @@ struct snapshot_handle { struct pbe *pbe, *last_pbe; void *buffer; unsigned int buf_offset; + int sync_read; }; #define data_of(handle) ((handle).buffer + (handle).buf_offset) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 75d4886e648e..591301ae8b7d 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -314,7 +314,7 @@ static unsigned int unsafe_pages; * and we count them using unsafe_pages */ -static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) +static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; @@ -828,13 +828,16 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) } if (!handle->offset) handle->buffer = buffer; + handle->sync_read = 1; if (handle->prev < handle->page) { if (!handle->prev) { - error = load_header(handle, (struct swsusp_info *)buffer); + error = load_header(handle, + (struct swsusp_info *)buffer); if (error) return error; } else if (handle->prev <= nr_meta_pages) { - handle->pbe = unpack_orig_addresses(buffer, handle->pbe); + handle->pbe = unpack_orig_addresses(buffer, + handle->pbe); if (!handle->pbe) { error = prepare_image(handle); if (error) @@ -842,10 +845,12 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) handle->pbe = pagedir_nosave; handle->last_pbe = NULL; handle->buffer = get_buffer(handle); + handle->sync_read = 0; } } else { handle->pbe = handle->pbe->next; handle->buffer = get_buffer(handle); + handle->sync_read = 0; } handle->prev = handle->page; } diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 9ab989572164..8309d20b2563 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -214,6 +215,8 @@ static int wait_on_bio_chain(struct bio **bio_chain) return 0; bio = *bio_chain; + if (bio == NULL) + return 0; while (bio) { struct page *page; @@ -349,7 +352,8 @@ int swsusp_write(void) int error; if ((error = swsusp_swap_check())) { - printk(KERN_ERR "swsusp: Cannot find swap device, try swapon -a.\n"); + printk(KERN_ERR "swsusp: Cannot find swap device, try " + "swapon -a.\n"); return error; } memset(&snapshot, 0, sizeof(struct snapshot_handle)); @@ -381,27 +385,6 @@ int swsusp_write(void) return error; } -/* - * Using bio to read from swap. - * This code requires a bit more work than just using buffer heads - * but, it is the recommended way for 2.5/2.6. - * The following are to signal the beginning and end of I/O. Bios - * finish asynchronously, while we want them to happen synchronously. - * A simple atomic_t, and a wait loop take care of this problem. - */ - -static atomic_t io_done = ATOMIC_INIT(0); - -static int end_io(struct bio *bio, unsigned int num, int err) -{ - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { - printk(KERN_ERR "I/O error reading swsusp image.\n"); - return -EIO; - } - atomic_set(&io_done, 0); - return 0; -} - static struct block_device *resume_bdev; /** @@ -409,15 +392,15 @@ static struct block_device *resume_bdev; * @rw: READ or WRITE. * @off physical offset of page. * @page: page we're reading or writing. + * @bio_chain: list of pending biod (for async reading) * * Straight from the textbook - allocate and initialize the bio. - * If we're writing, make sure the page is marked as dirty. - * Then submit it and wait. + * If we're reading, make sure the page is marked as dirty. + * Then submit it and, if @bio_chain == NULL, wait. */ - -static int submit(int rw, pgoff_t page_off, void *page) +static int submit(int rw, pgoff_t page_off, struct page *page, + struct bio **bio_chain) { - int error = 0; struct bio *bio; bio = bio_alloc(GFP_ATOMIC, 1); @@ -425,33 +408,40 @@ static int submit(int rw, pgoff_t page_off, void *page) return -ENOMEM; bio->bi_sector = page_off * (PAGE_SIZE >> 9); bio->bi_bdev = resume_bdev; - bio->bi_end_io = end_io; + bio->bi_end_io = end_swap_bio_read; - if (bio_add_page(bio, virt_to_page(page), PAGE_SIZE, 0) < PAGE_SIZE) { - printk("swsusp: ERROR: adding page to bio at %ld\n",page_off); - error = -EFAULT; - goto Done; + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { + printk("swsusp: ERROR: adding page to bio at %ld\n", page_off); + bio_put(bio); + return -EFAULT; } - atomic_set(&io_done, 1); - submit_bio(rw | (1 << BIO_RW_SYNC), bio); - while (atomic_read(&io_done)) - yield(); - if (rw == READ) - bio_set_pages_dirty(bio); - Done: - bio_put(bio); - return error; + lock_page(page); + bio_get(bio); + + if (bio_chain == NULL) { + submit_bio(rw | (1 << BIO_RW_SYNC), bio); + wait_on_page_locked(page); + if (rw == READ) + bio_set_pages_dirty(bio); + bio_put(bio); + } else { + get_page(page); + bio->bi_private = *bio_chain; + *bio_chain = bio; + submit_bio(rw | (1 << BIO_RW_SYNC), bio); + } + return 0; } -static int bio_read_page(pgoff_t page_off, void *page) +static int bio_read_page(pgoff_t page_off, void *addr, struct bio **bio_chain) { - return submit(READ, page_off, page); + return submit(READ, page_off, virt_to_page(addr), bio_chain); } -static int bio_write_page(pgoff_t page_off, void *page) +static int bio_write_page(pgoff_t page_off, void *addr) { - return submit(WRITE, page_off, page); + return submit(WRITE, page_off, virt_to_page(addr), NULL); } /** @@ -476,7 +466,7 @@ static int get_swap_reader(struct swap_map_handle *handle, handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_ATOMIC); if (!handle->cur) return -ENOMEM; - error = bio_read_page(swp_offset(start), handle->cur); + error = bio_read_page(swp_offset(start), handle->cur, NULL); if (error) { release_swap_reader(handle); return error; @@ -485,7 +475,8 @@ static int get_swap_reader(struct swap_map_handle *handle, return 0; } -static int swap_read_page(struct swap_map_handle *handle, void *buf) +static int swap_read_page(struct swap_map_handle *handle, void *buf, + struct bio **bio_chain) { unsigned long offset; int error; @@ -495,16 +486,17 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf) offset = handle->cur->entries[handle->k]; if (!offset) return -EFAULT; - error = bio_read_page(offset, buf); + error = bio_read_page(offset, buf, bio_chain); if (error) return error; if (++handle->k >= MAP_PAGE_ENTRIES) { + error = wait_on_bio_chain(bio_chain); handle->k = 0; offset = handle->cur->next_swap; if (!offset) release_swap_reader(handle); - else - error = bio_read_page(offset, handle->cur); + else if (!error) + error = bio_read_page(offset, handle->cur, NULL); } return error; } @@ -517,38 +509,48 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf) static int load_image(struct swap_map_handle *handle, struct snapshot_handle *snapshot, - unsigned int nr_pages) + unsigned int nr_to_read) { unsigned int m; - int ret; int error = 0; struct timeval start; struct timeval stop; + struct bio *bio; + int err2; + unsigned nr_pages; - printk("Loading image data pages (%u pages) ... ", nr_pages); - m = nr_pages / 100; + printk("Loading image data pages (%u pages) ... ", nr_to_read); + m = nr_to_read / 100; if (!m) m = 1; nr_pages = 0; + bio = NULL; do_gettimeofday(&start); - do { - ret = snapshot_write_next(snapshot, PAGE_SIZE); - if (ret > 0) { - error = swap_read_page(handle, data_of(*snapshot)); - if (error) - break; - if (!(nr_pages % m)) - printk("\b\b\b\b%3d%%", nr_pages / m); - nr_pages++; - } - } while (ret > 0); + for ( ; ; ) { + error = snapshot_write_next(snapshot, PAGE_SIZE); + if (error <= 0) + break; + error = swap_read_page(handle, data_of(*snapshot), &bio); + if (error) + break; + if (snapshot->sync_read) + error = wait_on_bio_chain(&bio); + if (error) + break; + if (!(nr_pages % m)) + printk("\b\b\b\b%3d%%", nr_pages / m); + nr_pages++; + } + err2 = wait_on_bio_chain(&bio); do_gettimeofday(&stop); + if (!error) + error = err2; if (!error) { printk("\b\b\b\bdone\n"); if (!snapshot_image_loaded(snapshot)) error = -ENODATA; } - show_speed(&start, &stop, nr_pages, "Read"); + show_speed(&start, &stop, nr_to_read, "Read"); return error; } @@ -571,7 +573,7 @@ int swsusp_read(void) header = (struct swsusp_info *)data_of(snapshot); error = get_swap_reader(&handle, swsusp_header.image); if (!error) - error = swap_read_page(&handle, header); + error = swap_read_page(&handle, header, NULL); if (!error) error = load_image(&handle, &snapshot, header->pages - 1); release_swap_reader(&handle); @@ -597,7 +599,7 @@ int swsusp_check(void) if (!IS_ERR(resume_bdev)) { set_blocksize(resume_bdev, PAGE_SIZE); memset(&swsusp_header, 0, sizeof(swsusp_header)); - if ((error = bio_read_page(0, &swsusp_header))) + if ((error = bio_read_page(0, &swsusp_header, NULL))) return error; if (!memcmp(SWSUSP_SIG, swsusp_header.sig, 10)) { memcpy(swsusp_header.sig, swsusp_header.orig_sig, 10); -- cgit v1.2.3 From ae83c5eef59ffe6eb61110b8c8fd1ea3e0881712 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:45 -0700 Subject: [PATCH] swsusp: clean up browsing of pfns Clean up some loops over pfns for each zone in snapshot.c: reduce the number of additions to perform, rework detection of saveable pages and make the code a bit less difficult to understand, hopefully. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/snapshot.c | 62 +++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 591301ae8b7d..979096c27773 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -156,7 +156,7 @@ static inline int save_highmem(void) {return 0;} static inline int restore_highmem(void) {return 0;} #endif -static int pfn_is_nosave(unsigned long pfn) +static inline int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; @@ -167,43 +167,43 @@ static int pfn_is_nosave(unsigned long pfn) * saveable - Determine whether a page should be cloned or not. * @pfn: The page * - * We save a page if it's Reserved, and not in the range of pages - * statically defined as 'unsaveable', or if it isn't reserved, and - * isn't part of a free chunk of pages. + * We save a page if it isn't Nosave, and is not in the range of pages + * statically defined as 'unsaveable', and it + * isn't a part of a free chunk of pages. */ -static int saveable(struct zone *zone, unsigned long *zone_pfn) +static struct page *saveable_page(unsigned long pfn) { - unsigned long pfn = *zone_pfn + zone->zone_start_pfn; struct page *page; if (!pfn_valid(pfn)) - return 0; + return NULL; page = pfn_to_page(pfn); - BUG_ON(PageReserved(page) && PageNosave(page)); + if (PageNosave(page)) - return 0; + return NULL; if (PageReserved(page) && pfn_is_nosave(pfn)) - return 0; + return NULL; if (PageNosaveFree(page)) - return 0; + return NULL; - return 1; + return page; } unsigned int count_data_pages(void) { struct zone *zone; - unsigned long zone_pfn; + unsigned long pfn, max_zone_pfn; unsigned int n = 0; for_each_zone (zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); - for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) - n += saveable(zone, &zone_pfn); + max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) + n += !!saveable_page(pfn); } return n; } @@ -211,7 +211,7 @@ unsigned int count_data_pages(void) static void copy_data_pages(struct pbe *pblist) { struct zone *zone; - unsigned long zone_pfn; + unsigned long pfn, max_zone_pfn; struct pbe *pbe, *p; pbe = pblist; @@ -224,13 +224,14 @@ static void copy_data_pages(struct pbe *pblist) SetPageNosaveFree(virt_to_page(p)); for_each_pbe (p, pblist) SetPageNosaveFree(virt_to_page(p->address)); - for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { - if (saveable(zone, &zone_pfn)) { - struct page *page; + max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { + struct page *page = saveable_page(pfn); + + if (page) { long *src, *dst; int n; - page = pfn_to_page(zone_pfn + zone->zone_start_pfn); BUG_ON(!pbe); pbe->orig_address = (unsigned long)page_address(page); /* copy_page and memcpy are not usable for copying task structs. */ @@ -383,13 +384,14 @@ static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, void swsusp_free(void) { struct zone *zone; - unsigned long zone_pfn; + unsigned long pfn, max_zone_pfn; for_each_zone(zone) { - for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) - if (pfn_valid(zone_pfn + zone->zone_start_pfn)) { - struct page *page; - page = pfn_to_page(zone_pfn + zone->zone_start_pfn); + max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + if (PageNosave(page) && PageNosaveFree(page)) { ClearPageNosave(page); ClearPageNosaveFree(page); @@ -598,7 +600,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) static int mark_unsafe_pages(struct pbe *pblist) { struct zone *zone; - unsigned long zone_pfn; + unsigned long pfn, max_zone_pfn; struct pbe *p; if (!pblist) /* a sanity check */ @@ -606,10 +608,10 @@ static int mark_unsafe_pages(struct pbe *pblist) /* Clear page flags */ for_each_zone (zone) { - for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) - if (pfn_valid(zone_pfn + zone->zone_start_pfn)) - ClearPageNosaveFree(pfn_to_page(zone_pfn + - zone->zone_start_pfn)); + max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) + if (pfn_valid(pfn)) + ClearPageNosaveFree(pfn_to_page(pfn)); } /* Mark orig addresses */ -- cgit v1.2.3 From fb13a28b0f5ada60861868c4fa48a12bd0cb8dea Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:46 -0700 Subject: [PATCH] swsusp: struct snapshot_handle cleanup Add comments describing struct snapshot_handle and its members, change the confusing name of its member 'page' to 'cur'. Signed-off-by: Rafael J. Wysocki Cc: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/power.h | 64 ++++++++++++++++++++++++++++++++++++++++++------- kernel/power/snapshot.c | 40 +++++++++++++++---------------- 2 files changed, 76 insertions(+), 28 deletions(-) (limited to 'kernel') diff --git a/kernel/power/power.h b/kernel/power/power.h index 59ce712f082d..1cefcf87a694 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -50,17 +50,65 @@ extern asmlinkage int swsusp_arch_resume(void); extern unsigned int count_data_pages(void); +/** + * Auxiliary structure used for reading the snapshot image data and + * metadata from and writing them to the list of page backup entries + * (PBEs) which is the main data structure of swsusp. + * + * Using struct snapshot_handle we can transfer the image, including its + * metadata, as a continuous sequence of bytes with the help of + * snapshot_read_next() and snapshot_write_next(). + * + * The code that writes the image to a storage or transfers it to + * the user land is required to use snapshot_read_next() for this + * purpose and it should not make any assumptions regarding the internal + * structure of the image. Similarly, the code that reads the image from + * a storage or transfers it from the user land is required to use + * snapshot_write_next(). + * + * This may allow us to change the internal structure of the image + * in the future with considerably less effort. + */ + struct snapshot_handle { - loff_t offset; - unsigned int page; - unsigned int page_offset; - unsigned int prev; - struct pbe *pbe, *last_pbe; - void *buffer; - unsigned int buf_offset; - int sync_read; + loff_t offset; /* number of the last byte ready for reading + * or writing in the sequence + */ + unsigned int cur; /* number of the block of PAGE_SIZE bytes the + * next operation will refer to (ie. current) + */ + unsigned int cur_offset; /* offset with respect to the current + * block (for the next operation) + */ + unsigned int prev; /* number of the block of PAGE_SIZE bytes that + * was the current one previously + */ + struct pbe *pbe; /* PBE that corresponds to 'buffer' */ + struct pbe *last_pbe; /* When the image is restored (eg. read + * from disk) we can store some image + * data directly in the page frames + * in which they were before suspend. + * In such a case the PBEs that + * correspond to them will be unused. + * This is the last PBE, so far, that + * does not correspond to such data. + */ + void *buffer; /* address of the block to read from + * or write to + */ + unsigned int buf_offset; /* location to read from or write to, + * given as a displacement from 'buffer' + */ + int sync_read; /* Set to one to notify the caller of + * snapshot_write_next() that it may + * need to call wait_on_bio_chain() + */ }; +/* This macro returns the address from/to which the caller of + * snapshot_read_next()/snapshot_write_next() is allowed to + * read/write data after the function returns + */ #define data_of(handle) ((handle).buffer + (handle).buf_offset) extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 979096c27773..81fe8de9e604 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -555,7 +555,7 @@ static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pb int snapshot_read_next(struct snapshot_handle *handle, size_t count) { - if (handle->page > nr_meta_pages + nr_copy_pages) + if (handle->cur > nr_meta_pages + nr_copy_pages) return 0; if (!buffer) { /* This makes the buffer be freed by swsusp_free() */ @@ -568,8 +568,8 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) handle->buffer = buffer; handle->pbe = pagedir_nosave; } - if (handle->prev < handle->page) { - if (handle->page <= nr_meta_pages) { + if (handle->prev < handle->cur) { + if (handle->cur <= nr_meta_pages) { handle->pbe = pack_orig_addresses(buffer, handle->pbe); if (!handle->pbe) handle->pbe = pagedir_nosave; @@ -577,15 +577,15 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) handle->buffer = (void *)handle->pbe->address; handle->pbe = handle->pbe->next; } - handle->prev = handle->page; + handle->prev = handle->cur; } - handle->buf_offset = handle->page_offset; - if (handle->page_offset + count >= PAGE_SIZE) { - count = PAGE_SIZE - handle->page_offset; - handle->page_offset = 0; - handle->page++; + handle->buf_offset = handle->cur_offset; + if (handle->cur_offset + count >= PAGE_SIZE) { + count = PAGE_SIZE - handle->cur_offset; + handle->cur_offset = 0; + handle->cur++; } else { - handle->page_offset += count; + handle->cur_offset += count; } handle->offset += count; return count; @@ -820,7 +820,7 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) { int error = 0; - if (handle->prev && handle->page > nr_meta_pages + nr_copy_pages) + if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) return 0; if (!buffer) { /* This makes the buffer be freed by swsusp_free() */ @@ -831,7 +831,7 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) if (!handle->offset) handle->buffer = buffer; handle->sync_read = 1; - if (handle->prev < handle->page) { + if (handle->prev < handle->cur) { if (!handle->prev) { error = load_header(handle, (struct swsusp_info *)buffer); @@ -854,15 +854,15 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) handle->buffer = get_buffer(handle); handle->sync_read = 0; } - handle->prev = handle->page; + handle->prev = handle->cur; } - handle->buf_offset = handle->page_offset; - if (handle->page_offset + count >= PAGE_SIZE) { - count = PAGE_SIZE - handle->page_offset; - handle->page_offset = 0; - handle->page++; + handle->buf_offset = handle->cur_offset; + if (handle->cur_offset + count >= PAGE_SIZE) { + count = PAGE_SIZE - handle->cur_offset; + handle->cur_offset = 0; + handle->cur++; } else { - handle->page_offset += count; + handle->cur_offset += count; } handle->offset += count; return count; @@ -871,5 +871,5 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) int snapshot_image_loaded(struct snapshot_handle *handle) { return !(!handle->pbe || handle->pbe->next || !nr_copy_pages || - handle->page <= nr_meta_pages + nr_copy_pages); + handle->cur <= nr_meta_pages + nr_copy_pages); } -- cgit v1.2.3 From e3920fb42c8ddfe63befb54d95c0e13eabacea9b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:48 -0700 Subject: [PATCH] Disable CPU hotplug during suspend The current suspend code has to be run on one CPU, so we use the CPU hotplug to take the non-boot CPUs offline on SMP machines. However, we should also make sure that these CPUs will not be enabled by someone else after we have disabled them. The functions disable_nonboot_cpus() and enable_nonboot_cpus() are moved to kernel/cpu.c, because they now refer to some stuff in there that should better be static. Also it's better if disable_nonboot_cpus() returns an error instead of panicking if something goes wrong, and enable_nonboot_cpus() has no reason to panic(), because the CPUs may have been enabled by the userland before it tries to take them online. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpu.c | 138 ++++++++++++++++++++++++++++++++++++++++++-------- kernel/power/Makefile | 2 - kernel/power/disk.c | 7 ++- kernel/power/main.c | 10 ++-- kernel/power/smp.c | 62 ----------------------- kernel/power/user.c | 14 +++-- 6 files changed, 137 insertions(+), 96 deletions(-) delete mode 100644 kernel/power/smp.c (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index f230f9ae01c2..32c96628463e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -21,6 +21,11 @@ static DEFINE_MUTEX(cpu_bitmask_lock); static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); +/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. + * Should always be manipulated under cpu_add_remove_lock + */ +static int cpu_hotplug_disabled; + #ifdef CONFIG_HOTPLUG_CPU /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ @@ -108,30 +113,25 @@ static int take_cpu_down(void *unused) return 0; } -int cpu_down(unsigned int cpu) +/* Requires cpu_add_remove_lock to be held */ +static int _cpu_down(unsigned int cpu) { int err; struct task_struct *p; cpumask_t old_allowed, tmp; - mutex_lock(&cpu_add_remove_lock); - if (num_online_cpus() == 1) { - err = -EBUSY; - goto out; - } + if (num_online_cpus() == 1) + return -EBUSY; - if (!cpu_online(cpu)) { - err = -EINVAL; - goto out; - } + if (!cpu_online(cpu)) + return -EINVAL; err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, (void *)(long)cpu); if (err == NOTIFY_BAD) { printk("%s: attempt to take down CPU %u failed\n", __FUNCTION__, cpu); - err = -EINVAL; - goto out; + return -EINVAL; } /* Ensure that we are not runnable on dying cpu */ @@ -179,22 +179,32 @@ out_thread: err = kthread_stop(p); out_allowed: set_cpus_allowed(current, old_allowed); -out: + return err; +} + +int cpu_down(unsigned int cpu) +{ + int err = 0; + + mutex_lock(&cpu_add_remove_lock); + if (cpu_hotplug_disabled) + err = -EBUSY; + else + err = _cpu_down(cpu); + mutex_unlock(&cpu_add_remove_lock); return err; } #endif /*CONFIG_HOTPLUG_CPU*/ -int __devinit cpu_up(unsigned int cpu) +/* Requires cpu_add_remove_lock to be held */ +static int __devinit _cpu_up(unsigned int cpu) { int ret; void *hcpu = (void *)(long)cpu; - mutex_lock(&cpu_add_remove_lock); - if (cpu_online(cpu) || !cpu_present(cpu)) { - ret = -EINVAL; - goto out; - } + if (cpu_online(cpu) || !cpu_present(cpu)) + return -EINVAL; ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); if (ret == NOTIFY_BAD) { @@ -219,7 +229,95 @@ out_notify: if (ret != 0) blocking_notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); + + return ret; +} + +int __devinit cpu_up(unsigned int cpu) +{ + int err = 0; + + mutex_lock(&cpu_add_remove_lock); + if (cpu_hotplug_disabled) + err = -EBUSY; + else + err = _cpu_up(cpu); + + mutex_unlock(&cpu_add_remove_lock); + return err; +} + +#ifdef CONFIG_SUSPEND_SMP +static cpumask_t frozen_cpus; + +int disable_nonboot_cpus(void) +{ + int cpu, first_cpu, error; + + mutex_lock(&cpu_add_remove_lock); + first_cpu = first_cpu(cpu_present_map); + if (!cpu_online(first_cpu)) { + error = _cpu_up(first_cpu); + if (error) { + printk(KERN_ERR "Could not bring CPU%d up.\n", + first_cpu); + goto out; + } + } + error = set_cpus_allowed(current, cpumask_of_cpu(first_cpu)); + if (error) { + printk(KERN_ERR "Could not run on CPU%d\n", first_cpu); + goto out; + } + /* We take down all of the non-boot CPUs in one shot to avoid races + * with the userspace trying to use the CPU hotplug at the same time + */ + cpus_clear(frozen_cpus); + printk("Disabling non-boot CPUs ...\n"); + for_each_online_cpu(cpu) { + if (cpu == first_cpu) + continue; + error = _cpu_down(cpu); + if (!error) { + cpu_set(cpu, frozen_cpus); + printk("CPU%d is down\n", cpu); + } else { + printk(KERN_ERR "Error taking CPU%d down: %d\n", + cpu, error); + break; + } + } + if (!error) { + BUG_ON(num_online_cpus() > 1); + /* Make sure the CPUs won't be enabled by someone else */ + cpu_hotplug_disabled = 1; + } else { + printk(KERN_ERR "Non-boot CPUs are not disabled"); + } out: mutex_unlock(&cpu_add_remove_lock); - return ret; + return error; +} + +void enable_nonboot_cpus(void) +{ + int cpu, error; + + /* Allow everyone to use the CPU hotplug again */ + mutex_lock(&cpu_add_remove_lock); + cpu_hotplug_disabled = 0; + mutex_unlock(&cpu_add_remove_lock); + + printk("Enabling non-boot CPUs ...\n"); + for_each_cpu_mask(cpu, frozen_cpus) { + error = cpu_up(cpu); + if (!error) { + printk("CPU%d is up\n", cpu); + continue; + } + printk(KERN_WARNING "Error taking CPU%d up: %d\n", + cpu, error); + } + cpus_clear(frozen_cpus); } +#endif diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 8d0af3d37a4b..38725f526afc 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -7,6 +7,4 @@ obj-y := main.o process.o console.o obj-$(CONFIG_PM_LEGACY) += pm.o obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o disk.o snapshot.o swap.o user.o -obj-$(CONFIG_SUSPEND_SMP) += smp.o - obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/disk.c b/kernel/power/disk.c index e13e74067845..7c7b9b65e365 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "power.h" @@ -72,7 +73,10 @@ static int prepare_processes(void) int error; pm_prepare_console(); - disable_nonboot_cpus(); + + error = disable_nonboot_cpus(); + if (error) + goto enable_cpus; if (freeze_processes()) { error = -EBUSY; @@ -84,6 +88,7 @@ static int prepare_processes(void) return 0; thaw: thaw_processes(); +enable_cpus: enable_nonboot_cpus(); pm_restore_console(); return error; diff --git a/kernel/power/main.c b/kernel/power/main.c index 6d295c776794..4d403323a7bb 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "power.h" @@ -51,7 +52,7 @@ void pm_set_ops(struct pm_ops * ops) static int suspend_prepare(suspend_state_t state) { - int error = 0; + int error; unsigned int free_pages; if (!pm_ops || !pm_ops->enter) @@ -59,12 +60,9 @@ static int suspend_prepare(suspend_state_t state) pm_prepare_console(); - disable_nonboot_cpus(); - - if (num_online_cpus() != 1) { - error = -EPERM; + error = disable_nonboot_cpus(); + if (error) goto Enable_cpu; - } if (freeze_processes()) { error = -EAGAIN; diff --git a/kernel/power/smp.c b/kernel/power/smp.c deleted file mode 100644 index 5957312b2d68..000000000000 --- a/kernel/power/smp.c +++ /dev/null @@ -1,62 +0,0 @@ -/* - * drivers/power/smp.c - Functions for stopping other CPUs. - * - * Copyright 2004 Pavel Machek - * Copyright (C) 2002-2003 Nigel Cunningham - * - * This file is released under the GPLv2. - */ - -#undef DEBUG - -#include -#include -#include -#include -#include -#include -#include - -/* This is protected by pm_sem semaphore */ -static cpumask_t frozen_cpus; - -void disable_nonboot_cpus(void) -{ - int cpu, error; - - error = 0; - cpus_clear(frozen_cpus); - printk("Freezing cpus ...\n"); - for_each_online_cpu(cpu) { - if (cpu == 0) - continue; - error = cpu_down(cpu); - if (!error) { - cpu_set(cpu, frozen_cpus); - printk("CPU%d is down\n", cpu); - continue; - } - printk("Error taking cpu %d down: %d\n", cpu, error); - } - BUG_ON(raw_smp_processor_id() != 0); - if (error) - panic("cpus not sleeping"); -} - -void enable_nonboot_cpus(void) -{ - int cpu, error; - - printk("Thawing cpus ...\n"); - for_each_cpu_mask(cpu, frozen_cpus) { - error = cpu_up(cpu); - if (!error) { - printk("CPU%d is up\n", cpu); - continue; - } - printk("Error taking cpu %d up: %d\n", cpu, error); - panic("Not enough cpus"); - } - cpus_clear(frozen_cpus); -} - diff --git a/kernel/power/user.c b/kernel/power/user.c index 3f1539fbe48a..0ef5e4ba39e5 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -139,12 +140,15 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, if (data->frozen) break; down(&pm_sem); - disable_nonboot_cpus(); - if (freeze_processes()) { - thaw_processes(); - enable_nonboot_cpus(); - error = -EBUSY; + error = disable_nonboot_cpus(); + if (!error) { + error = freeze_processes(); + if (error) { + thaw_processes(); + error = -EBUSY; + } } + enable_nonboot_cpus(); up(&pm_sem); if (!error) data->frozen = 1; -- cgit v1.2.3 From f623f0db8e6aa86a37be86167e4ff478821a9f4f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:49 -0700 Subject: [PATCH] swsusp: Fix mark_free_pages Clean up mm/page_alloc.c#mark_free_pages() and make it avoid clearing PageNosaveFree for PageNosave pages. This allows us to get rid of an ugly hack in kernel/power/snapshot.c#copy_data_pages(). Additionally, the page-copying loop in copy_data_pages() is moved to an inline function. Signed-off-by: Rafael J. Wysocki Cc: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/snapshot.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 81fe8de9e604..4afb7900002b 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -208,37 +208,36 @@ unsigned int count_data_pages(void) return n; } +static inline void copy_data_page(long *dst, long *src) +{ + int n; + + /* copy_page and memcpy are not usable for copying task structs. */ + for (n = PAGE_SIZE / sizeof(long); n; n--) + *dst++ = *src++; +} + static void copy_data_pages(struct pbe *pblist) { struct zone *zone; unsigned long pfn, max_zone_pfn; - struct pbe *pbe, *p; + struct pbe *pbe; pbe = pblist; for_each_zone (zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); - /* This is necessary for swsusp_free() */ - for_each_pb_page (p, pblist) - SetPageNosaveFree(virt_to_page(p)); - for_each_pbe (p, pblist) - SetPageNosaveFree(virt_to_page(p->address)); max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { struct page *page = saveable_page(pfn); if (page) { - long *src, *dst; - int n; + void *ptr = page_address(page); BUG_ON(!pbe); - pbe->orig_address = (unsigned long)page_address(page); - /* copy_page and memcpy are not usable for copying task structs. */ - dst = (long *)pbe->address; - src = (long *)pbe->orig_address; - for (n = PAGE_SIZE / sizeof(long); n; n--) - *dst++ = *src++; + copy_data_page((void *)pbe->address, ptr); + pbe->orig_address = (unsigned long)ptr; pbe = pbe->next; } } -- cgit v1.2.3 From f6143aa60ed71e58578bc92cc64d98158a694d99 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:50 -0700 Subject: [PATCH] swsusp: Reorder memory-allocating functions Move some functions in kernel/power/snapshot.c to a better place (in the same file) and introduce free_image_page() (will be necessary in the future). Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/snapshot.c | 93 ++++++++++++++++++++++++++++--------------------- 1 file changed, 53 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 4afb7900002b..7ad0c0465524 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -156,6 +156,58 @@ static inline int save_highmem(void) {return 0;} static inline int restore_highmem(void) {return 0;} #endif +/** + * @safe_needed - on resume, for storing the PBE list and the image, + * we can only use memory pages that do not conflict with the pages + * used before suspend. + * + * The unsafe pages are marked with the PG_nosave_free flag + * and we count them using unsafe_pages + */ + +static unsigned int unsafe_pages; + +static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) +{ + void *res; + + res = (void *)get_zeroed_page(gfp_mask); + if (safe_needed) + while (res && PageNosaveFree(virt_to_page(res))) { + /* The page is unsafe, mark it for swsusp_free() */ + SetPageNosave(virt_to_page(res)); + unsafe_pages++; + res = (void *)get_zeroed_page(gfp_mask); + } + if (res) { + SetPageNosave(virt_to_page(res)); + SetPageNosaveFree(virt_to_page(res)); + } + return res; +} + +unsigned long get_safe_page(gfp_t gfp_mask) +{ + return (unsigned long)alloc_image_page(gfp_mask, 1); +} + +/** + * free_image_page - free page represented by @addr, allocated with + * alloc_image_page (page flags set by it must be cleared) + */ + +static inline void free_image_page(void *addr, int clear_nosave_free) +{ + ClearPageNosave(virt_to_page(addr)); + if (clear_nosave_free) + ClearPageNosaveFree(virt_to_page(addr)); + free_page((unsigned long)addr); +} + +/** + * pfn_is_nosave - check if given pfn is in the 'nosave' section + */ + static inline int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; @@ -245,7 +297,6 @@ static void copy_data_pages(struct pbe *pblist) BUG_ON(pbe); } - /** * free_pagedir - free pages allocated with alloc_pagedir() */ @@ -256,10 +307,7 @@ static void free_pagedir(struct pbe *pblist, int clear_nosave_free) while (pblist) { pbe = (pblist + PB_PAGE_SKIP)->next; - ClearPageNosave(virt_to_page(pblist)); - if (clear_nosave_free) - ClearPageNosaveFree(virt_to_page(pblist)); - free_page((unsigned long)pblist); + free_image_page(pblist, clear_nosave_free); pblist = pbe; } } @@ -303,41 +351,6 @@ static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) } } -static unsigned int unsafe_pages; - -/** - * @safe_needed - on resume, for storing the PBE list and the image, - * we can only use memory pages that do not conflict with the pages - * used before suspend. - * - * The unsafe pages are marked with the PG_nosave_free flag - * and we count them using unsafe_pages - */ - -static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) -{ - void *res; - - res = (void *)get_zeroed_page(gfp_mask); - if (safe_needed) - while (res && PageNosaveFree(virt_to_page(res))) { - /* The page is unsafe, mark it for swsusp_free() */ - SetPageNosave(virt_to_page(res)); - unsafe_pages++; - res = (void *)get_zeroed_page(gfp_mask); - } - if (res) { - SetPageNosave(virt_to_page(res)); - SetPageNosaveFree(virt_to_page(res)); - } - return res; -} - -unsigned long get_safe_page(gfp_t gfp_mask) -{ - return (unsigned long)alloc_image_page(gfp_mask, 1); -} - /** * alloc_pagedir - Allocate the page directory. * -- cgit v1.2.3 From cd560bb2f9e2cd451bb3942af43da19632ba4a8e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:50 -0700 Subject: [PATCH] swsusp: Fix alloc_pagedir Get rid of the FIXME in kernel/power/snapshot.c#alloc_pagedir() and simplify the functions called by it. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/snapshot.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 7ad0c0465524..4ca372f2bc14 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -316,12 +316,12 @@ static void free_pagedir(struct pbe *pblist, int clear_nosave_free) * fill_pb_page - Create a list of PBEs on a given memory page */ -static inline void fill_pb_page(struct pbe *pbpage) +static inline void fill_pb_page(struct pbe *pbpage, unsigned int n) { struct pbe *p; p = pbpage; - pbpage += PB_PAGE_SKIP; + pbpage += n - 1; do p->next = p + 1; while (++p < pbpage); @@ -330,24 +330,26 @@ static inline void fill_pb_page(struct pbe *pbpage) /** * create_pbe_list - Create a list of PBEs on top of a given chain * of memory pages allocated with alloc_pagedir() + * + * This function assumes that pages allocated by alloc_image_page() will + * always be zeroed. */ static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) { - struct pbe *pbpage, *p; + struct pbe *pbpage; unsigned int num = PBES_PER_PAGE; for_each_pb_page (pbpage, pblist) { if (num >= nr_pages) break; - fill_pb_page(pbpage); + fill_pb_page(pbpage, PBES_PER_PAGE); num += PBES_PER_PAGE; } if (pbpage) { - for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++) - p->next = p + 1; - p->next = NULL; + num -= PBES_PER_PAGE; + fill_pb_page(pbpage, nr_pages - num); } } @@ -374,17 +376,17 @@ static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, return NULL; pblist = alloc_image_page(gfp_mask, safe_needed); - /* FIXME: rewrite this ugly loop */ - for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; - pbe = pbe->next, num += PBES_PER_PAGE) { + pbe = pblist; + for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) { + if (!pbe) { + free_pagedir(pblist, 1); + return NULL; + } pbe += PB_PAGE_SKIP; pbe->next = alloc_image_page(gfp_mask, safe_needed); + pbe = pbe->next; } - if (!pbe) { /* get_zeroed_page() failed */ - free_pagedir(pblist, 1); - pblist = NULL; - } else - create_pbe_list(pblist, nr_pages); + create_pbe_list(pblist, nr_pages); return pblist; } -- cgit v1.2.3 From 75534b50cc658e951bcb213c2763c81e9f7b0b48 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:52 -0700 Subject: [PATCH] Change the name of pagedir_nosave The name of the pagedir_nosave variable does not make sense any more, so it seems reasonable to change it to something more meaningful. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/power.h | 2 -- kernel/power/snapshot.c | 26 ++++++++++++++------------ 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/power/power.h b/kernel/power/power.h index 1cefcf87a694..e18ba207e784 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -38,8 +38,6 @@ extern struct subsystem power_subsys; /* References to section boundaries */ extern const void __nosave_begin, __nosave_end; -extern struct pbe *pagedir_nosave; - /* Preferred image size in bytes (default 500 MB) */ extern unsigned long image_size; extern int in_suspend; diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 4ca372f2bc14..1d276b3ae152 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -34,7 +34,9 @@ #include "power.h" -struct pbe *pagedir_nosave; +/* List of PBEs used for creating and restoring the suspend image */ +struct pbe *restore_pblist; + static unsigned int nr_copy_pages; static unsigned int nr_meta_pages; static unsigned long *buffer; @@ -415,7 +417,7 @@ void swsusp_free(void) } nr_copy_pages = 0; nr_meta_pages = 0; - pagedir_nosave = NULL; + restore_pblist = NULL; buffer = NULL; } @@ -490,15 +492,15 @@ asmlinkage int swsusp_save(void) return -ENOMEM; } - pagedir_nosave = swsusp_alloc(nr_pages); - if (!pagedir_nosave) + restore_pblist = swsusp_alloc(nr_pages); + if (!restore_pblist) return -ENOMEM; /* During allocating of suspend pagedir, new cold pages may appear. * Kill them. */ drain_local_pages(); - copy_data_pages(pagedir_nosave); + copy_data_pages(restore_pblist); /* * End of critical section. From now on, we can write to memory, @@ -580,13 +582,13 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) if (!handle->offset) { init_header((struct swsusp_info *)buffer); handle->buffer = buffer; - handle->pbe = pagedir_nosave; + handle->pbe = restore_pblist; } if (handle->prev < handle->cur) { if (handle->cur <= nr_meta_pages) { handle->pbe = pack_orig_addresses(buffer, handle->pbe); if (!handle->pbe) - handle->pbe = pagedir_nosave; + handle->pbe = restore_pblist; } else { handle->buffer = (void *)handle->pbe->address; handle->pbe = handle->pbe->next; @@ -689,7 +691,7 @@ static int load_header(struct snapshot_handle *handle, pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0); if (!pblist) return -ENOMEM; - pagedir_nosave = pblist; + restore_pblist = pblist; handle->pbe = pblist; nr_copy_pages = info->image_pages; nr_meta_pages = info->pages - info->image_pages - 1; @@ -716,7 +718,7 @@ static inline struct pbe *unpack_orig_addresses(unsigned long *buf, /** * prepare_image - use metadata contained in the PBE list - * pointed to by pagedir_nosave to mark the pages that will + * pointed to by restore_pblist to mark the pages that will * be overwritten in the process of restoring the system * memory state from the image ("unsafe" pages) and allocate * memory for the image @@ -741,7 +743,7 @@ static int prepare_image(struct snapshot_handle *handle) unsigned int nr_pages = nr_copy_pages; struct pbe *p, *pblist = NULL; - p = pagedir_nosave; + p = restore_pblist; error = mark_unsafe_pages(p); if (!error) { pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); @@ -773,7 +775,7 @@ static int prepare_image(struct snapshot_handle *handle) } } if (!error) { - pagedir_nosave = pblist; + restore_pblist = pblist; } else { handle->pbe = NULL; swsusp_free(); @@ -858,7 +860,7 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) error = prepare_image(handle); if (error) return error; - handle->pbe = pagedir_nosave; + handle->pbe = restore_pblist; handle->last_pbe = NULL; handle->buffer = get_buffer(handle); handle->sync_read = 0; -- cgit v1.2.3 From 0bcd888d64684f896ffa70c1d16a42b00753c184 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:52 -0700 Subject: [PATCH] swsusp: Introduce some helpful constants Introduce some constants that hopefully will help improve the readability of code in kernel/power/snapshot.c. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/snapshot.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 1d276b3ae152..d0d691f976d8 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -167,6 +167,11 @@ static inline int restore_highmem(void) {return 0;} * and we count them using unsafe_pages */ +#define PG_ANY 0 +#define PG_SAFE 1 +#define PG_UNSAFE_CLEAR 1 +#define PG_UNSAFE_KEEP 0 + static unsigned int unsafe_pages; static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) @@ -190,7 +195,7 @@ static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) unsigned long get_safe_page(gfp_t gfp_mask) { - return (unsigned long)alloc_image_page(gfp_mask, 1); + return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE); } /** @@ -381,7 +386,7 @@ static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, pbe = pblist; for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) { if (!pbe) { - free_pagedir(pblist, 1); + free_pagedir(pblist, PG_UNSAFE_CLEAR); return NULL; } pbe += PB_PAGE_SKIP; @@ -458,12 +463,13 @@ static struct pbe *swsusp_alloc(unsigned int nr_pages) { struct pbe *pblist; - if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) { + pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, PG_ANY); + if (!pblist) { printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); return NULL; } - if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) { + if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, PG_ANY)) { printk(KERN_ERR "suspend: Allocating image pages failed.\n"); swsusp_free(); return NULL; @@ -575,7 +581,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) return 0; if (!buffer) { /* This makes the buffer be freed by swsusp_free() */ - buffer = alloc_image_page(GFP_ATOMIC, 0); + buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); if (!buffer) return -ENOMEM; } @@ -688,7 +694,7 @@ static int load_header(struct snapshot_handle *handle, error = check_header(info); if (!error) { - pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, 0); + pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, PG_ANY); if (!pblist) return -ENOMEM; restore_pblist = pblist; @@ -746,10 +752,10 @@ static int prepare_image(struct snapshot_handle *handle) p = restore_pblist; error = mark_unsafe_pages(p); if (!error) { - pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, 1); + pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, PG_SAFE); if (pblist) copy_page_backup_list(pblist, p); - free_pagedir(p, 0); + free_pagedir(p, PG_UNSAFE_KEEP); if (!pblist) error = -ENOMEM; } @@ -840,7 +846,7 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) return 0; if (!buffer) { /* This makes the buffer be freed by swsusp_free() */ - buffer = alloc_image_page(GFP_ATOMIC, 0); + buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); if (!buffer) return -ENOMEM; } -- cgit v1.2.3 From b788db79896ef2a5817b9395ad63573b254a6d93 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:54 -0700 Subject: [PATCH] swsusp: Introduce memory bitmaps Introduce the memory bitmap data structure and make swsusp use in the suspend phase. The current swsusp's internal data structure is not very efficient from the memory usage point of view, so it seems reasonable to replace it with a data structure that will require less memory, such as a pair of bitmaps. The idea is to use bitmaps that may be allocated as sets of individual pages, so that we can avoid making allocations of order greater than 0. For this reason the memory bitmap structure consists of several linked lists of objects that contain pointers to memory pages with the actual bitmap data. Still, for a typical system all of these lists fit in a single page, so it's reasonable to introduce an additional mechanism allowing us to allocate all of them efficiently without sacrificing the generality of the design. This is done with the help of the chain_allocator structure and associated functions. We need to use two memory bitmaps during the suspend phase of the suspend-resume cycle. One of them is necessary for marking the saveable pages, and the second is used to mark the pages in which to store the copies of them (aka image pages). First, the bitmaps are created and we allocate as many image pages as needed (the corresponding bits in the second bitmap are set as soon as the pages are allocated). Second, the bits corresponding to the saveable pages are set in the first bitmap and the saveable pages are copied to the image pages. Finally, the first bitmap is used to save the kernel virtual addresses of the saveable pages and the second one is used to save the contents of the image pages. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/power.h | 3 +- kernel/power/snapshot.c | 602 ++++++++++++++++++++++++++++++++++++++++++------ kernel/power/swsusp.c | 5 +- 3 files changed, 540 insertions(+), 70 deletions(-) (limited to 'kernel') diff --git a/kernel/power/power.h b/kernel/power/power.h index e18ba207e784..6e9e2acc34f8 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -109,9 +109,10 @@ struct snapshot_handle { */ #define data_of(handle) ((handle).buffer + (handle).buf_offset) +extern unsigned int snapshot_additional_pages(struct zone *zone); extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); extern int snapshot_write_next(struct snapshot_handle *handle, size_t count); -int snapshot_image_loaded(struct snapshot_handle *handle); +extern int snapshot_image_loaded(struct snapshot_handle *handle); #define SNAPSHOT_IOC_MAGIC '3' #define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index d0d691f976d8..852e0df41719 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -211,6 +211,467 @@ static inline void free_image_page(void *addr, int clear_nosave_free) free_page((unsigned long)addr); } +/* struct linked_page is used to build chains of pages */ + +#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) + +struct linked_page { + struct linked_page *next; + char data[LINKED_PAGE_DATA_SIZE]; +} __attribute__((packed)); + +static inline void +free_list_of_pages(struct linked_page *list, int clear_page_nosave) +{ + while (list) { + struct linked_page *lp = list->next; + + free_image_page(list, clear_page_nosave); + list = lp; + } +} + +/** + * struct chain_allocator is used for allocating small objects out of + * a linked list of pages called 'the chain'. + * + * The chain grows each time when there is no room for a new object in + * the current page. The allocated objects cannot be freed individually. + * It is only possible to free them all at once, by freeing the entire + * chain. + * + * NOTE: The chain allocator may be inefficient if the allocated objects + * are not much smaller than PAGE_SIZE. + */ + +struct chain_allocator { + struct linked_page *chain; /* the chain */ + unsigned int used_space; /* total size of objects allocated out + * of the current page + */ + gfp_t gfp_mask; /* mask for allocating pages */ + int safe_needed; /* if set, only "safe" pages are allocated */ +}; + +static void +chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) +{ + ca->chain = NULL; + ca->used_space = LINKED_PAGE_DATA_SIZE; + ca->gfp_mask = gfp_mask; + ca->safe_needed = safe_needed; +} + +static void *chain_alloc(struct chain_allocator *ca, unsigned int size) +{ + void *ret; + + if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { + struct linked_page *lp; + + lp = alloc_image_page(ca->gfp_mask, ca->safe_needed); + if (!lp) + return NULL; + + lp->next = ca->chain; + ca->chain = lp; + ca->used_space = 0; + } + ret = ca->chain->data + ca->used_space; + ca->used_space += size; + return ret; +} + +static void chain_free(struct chain_allocator *ca, int clear_page_nosave) +{ + free_list_of_pages(ca->chain, clear_page_nosave); + memset(ca, 0, sizeof(struct chain_allocator)); +} + +/** + * Data types related to memory bitmaps. + * + * Memory bitmap is a structure consiting of many linked lists of + * objects. The main list's elements are of type struct zone_bitmap + * and each of them corresonds to one zone. For each zone bitmap + * object there is a list of objects of type struct bm_block that + * represent each blocks of bit chunks in which information is + * stored. + * + * struct memory_bitmap contains a pointer to the main list of zone + * bitmap objects, a struct bm_position used for browsing the bitmap, + * and a pointer to the list of pages used for allocating all of the + * zone bitmap objects and bitmap block objects. + * + * NOTE: It has to be possible to lay out the bitmap in memory + * using only allocations of order 0. Additionally, the bitmap is + * designed to work with arbitrary number of zones (this is over the + * top for now, but let's avoid making unnecessary assumptions ;-). + * + * struct zone_bitmap contains a pointer to a list of bitmap block + * objects and a pointer to the bitmap block object that has been + * most recently used for setting bits. Additionally, it contains the + * pfns that correspond to the start and end of the represented zone. + * + * struct bm_block contains a pointer to the memory page in which + * information is stored (in the form of a block of bit chunks + * of type unsigned long each). It also contains the pfns that + * correspond to the start and end of the represented memory area and + * the number of bit chunks in the block. + * + * NOTE: Memory bitmaps are used for two types of operations only: + * "set a bit" and "find the next bit set". Moreover, the searching + * is always carried out after all of the "set a bit" operations + * on given bitmap. + */ + +#define BM_END_OF_MAP (~0UL) + +#define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long)) +#define BM_BITS_PER_CHUNK (sizeof(long) << 3) +#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) + +struct bm_block { + struct bm_block *next; /* next element of the list */ + unsigned long start_pfn; /* pfn represented by the first bit */ + unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ + unsigned int size; /* number of bit chunks */ + unsigned long *data; /* chunks of bits representing pages */ +}; + +struct zone_bitmap { + struct zone_bitmap *next; /* next element of the list */ + unsigned long start_pfn; /* minimal pfn in this zone */ + unsigned long end_pfn; /* maximal pfn in this zone plus 1 */ + struct bm_block *bm_blocks; /* list of bitmap blocks */ + struct bm_block *cur_block; /* recently used bitmap block */ +}; + +/* strcut bm_position is used for browsing memory bitmaps */ + +struct bm_position { + struct zone_bitmap *zone_bm; + struct bm_block *block; + int chunk; + int bit; +}; + +struct memory_bitmap { + struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */ + struct linked_page *p_list; /* list of pages used to store zone + * bitmap objects and bitmap block + * objects + */ + struct bm_position cur; /* most recently used bit position */ +}; + +/* Functions that operate on memory bitmaps */ + +static inline void memory_bm_reset_chunk(struct memory_bitmap *bm) +{ + bm->cur.chunk = 0; + bm->cur.bit = -1; +} + +static void memory_bm_position_reset(struct memory_bitmap *bm) +{ + struct zone_bitmap *zone_bm; + + zone_bm = bm->zone_bm_list; + bm->cur.zone_bm = zone_bm; + bm->cur.block = zone_bm->bm_blocks; + memory_bm_reset_chunk(bm); +} + +static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); + +/** + * create_bm_block_list - create a list of block bitmap objects + */ + +static inline struct bm_block * +create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca) +{ + struct bm_block *bblist = NULL; + + while (nr_blocks-- > 0) { + struct bm_block *bb; + + bb = chain_alloc(ca, sizeof(struct bm_block)); + if (!bb) + return NULL; + + bb->next = bblist; + bblist = bb; + } + return bblist; +} + +/** + * create_zone_bm_list - create a list of zone bitmap objects + */ + +static inline struct zone_bitmap * +create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca) +{ + struct zone_bitmap *zbmlist = NULL; + + while (nr_zones-- > 0) { + struct zone_bitmap *zbm; + + zbm = chain_alloc(ca, sizeof(struct zone_bitmap)); + if (!zbm) + return NULL; + + zbm->next = zbmlist; + zbmlist = zbm; + } + return zbmlist; +} + +/** + * memory_bm_create - allocate memory for a memory bitmap + */ + +static int +memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) +{ + struct chain_allocator ca; + struct zone *zone; + struct zone_bitmap *zone_bm; + struct bm_block *bb; + unsigned int nr; + + chain_init(&ca, gfp_mask, safe_needed); + + /* Compute the number of zones */ + nr = 0; + for_each_zone (zone) + if (populated_zone(zone) && !is_highmem(zone)) + nr++; + + /* Allocate the list of zones bitmap objects */ + zone_bm = create_zone_bm_list(nr, &ca); + bm->zone_bm_list = zone_bm; + if (!zone_bm) { + chain_free(&ca, PG_UNSAFE_CLEAR); + return -ENOMEM; + } + + /* Initialize the zone bitmap objects */ + for_each_zone (zone) { + unsigned long pfn; + + if (!populated_zone(zone) || is_highmem(zone)) + continue; + + zone_bm->start_pfn = zone->zone_start_pfn; + zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages; + /* Allocate the list of bitmap block objects */ + nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); + bb = create_bm_block_list(nr, &ca); + zone_bm->bm_blocks = bb; + zone_bm->cur_block = bb; + if (!bb) + goto Free; + + nr = zone->spanned_pages; + pfn = zone->zone_start_pfn; + /* Initialize the bitmap block objects */ + while (bb) { + unsigned long *ptr; + + ptr = alloc_image_page(gfp_mask, safe_needed); + bb->data = ptr; + if (!ptr) + goto Free; + + bb->start_pfn = pfn; + if (nr >= BM_BITS_PER_BLOCK) { + pfn += BM_BITS_PER_BLOCK; + bb->size = BM_CHUNKS_PER_BLOCK; + nr -= BM_BITS_PER_BLOCK; + } else { + /* This is executed only once in the loop */ + pfn += nr; + bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK); + } + bb->end_pfn = pfn; + bb = bb->next; + } + zone_bm = zone_bm->next; + } + bm->p_list = ca.chain; + memory_bm_position_reset(bm); + return 0; + +Free: + bm->p_list = ca.chain; + memory_bm_free(bm, PG_UNSAFE_CLEAR); + return -ENOMEM; +} + +/** + * memory_bm_free - free memory occupied by the memory bitmap @bm + */ + +static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) +{ + struct zone_bitmap *zone_bm; + + /* Free the list of bit blocks for each zone_bitmap object */ + zone_bm = bm->zone_bm_list; + while (zone_bm) { + struct bm_block *bb; + + bb = zone_bm->bm_blocks; + while (bb) { + if (bb->data) + free_image_page(bb->data, clear_nosave_free); + bb = bb->next; + } + zone_bm = zone_bm->next; + } + free_list_of_pages(bm->p_list, clear_nosave_free); + bm->zone_bm_list = NULL; +} + +/** + * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds + * to given pfn. The cur_zone_bm member of @bm and the cur_block member + * of @bm->cur_zone_bm are updated. + * + * If the bit cannot be set, the function returns -EINVAL . + */ + +static int +memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) +{ + struct zone_bitmap *zone_bm; + struct bm_block *bb; + + /* Check if the pfn is from the current zone */ + zone_bm = bm->cur.zone_bm; + if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { + zone_bm = bm->zone_bm_list; + /* We don't assume that the zones are sorted by pfns */ + while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { + zone_bm = zone_bm->next; + if (unlikely(!zone_bm)) + return -EINVAL; + } + bm->cur.zone_bm = zone_bm; + } + /* Check if the pfn corresponds to the current bitmap block */ + bb = zone_bm->cur_block; + if (pfn < bb->start_pfn) + bb = zone_bm->bm_blocks; + + while (pfn >= bb->end_pfn) { + bb = bb->next; + if (unlikely(!bb)) + return -EINVAL; + } + zone_bm->cur_block = bb; + pfn -= bb->start_pfn; + set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK); + return 0; +} + +/* Two auxiliary functions for memory_bm_next_pfn */ + +/* Find the first set bit in the given chunk, if there is one */ + +static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p) +{ + bit++; + while (bit < BM_BITS_PER_CHUNK) { + if (test_bit(bit, chunk_p)) + return bit; + + bit++; + } + return -1; +} + +/* Find a chunk containing some bits set in given block of bits */ + +static inline int next_chunk_in_block(int n, struct bm_block *bb) +{ + n++; + while (n < bb->size) { + if (bb->data[n]) + return n; + + n++; + } + return -1; +} + +/** + * memory_bm_next_pfn - find the pfn that corresponds to the next set bit + * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is + * returned. + * + * It is required to run memory_bm_position_reset() before the first call to + * this function. + */ + +static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) +{ + struct zone_bitmap *zone_bm; + struct bm_block *bb; + int chunk; + int bit; + + do { + bb = bm->cur.block; + do { + chunk = bm->cur.chunk; + bit = bm->cur.bit; + do { + bit = next_bit_in_chunk(bit, bb->data + chunk); + if (bit >= 0) + goto Return_pfn; + + chunk = next_chunk_in_block(chunk, bb); + bit = -1; + } while (chunk >= 0); + bb = bb->next; + bm->cur.block = bb; + memory_bm_reset_chunk(bm); + } while (bb); + zone_bm = bm->cur.zone_bm->next; + if (zone_bm) { + bm->cur.zone_bm = zone_bm; + bm->cur.block = zone_bm->bm_blocks; + memory_bm_reset_chunk(bm); + } + } while (zone_bm); + memory_bm_position_reset(bm); + return BM_END_OF_MAP; + +Return_pfn: + bm->cur.chunk = chunk; + bm->cur.bit = bit; + return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit; +} + +/** + * snapshot_additional_pages - estimate the number of additional pages + * be needed for setting up the suspend image data structures for given + * zone (usually the returned value is greater than the exact number) + */ + +unsigned int snapshot_additional_pages(struct zone *zone) +{ + unsigned int res; + + res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); + res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); + return res; +} + /** * pfn_is_nosave - check if given pfn is in the 'nosave' section */ @@ -276,32 +737,38 @@ static inline void copy_data_page(long *dst, long *src) *dst++ = *src++; } -static void copy_data_pages(struct pbe *pblist) +static void +copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) { struct zone *zone; - unsigned long pfn, max_zone_pfn; - struct pbe *pbe; + unsigned long pfn; - pbe = pblist; for_each_zone (zone) { + unsigned long max_zone_pfn; + if (is_highmem(zone)) continue; + mark_free_pages(zone); max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; - for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { - struct page *page = saveable_page(pfn); - - if (page) { - void *ptr = page_address(page); - - BUG_ON(!pbe); - copy_data_page((void *)pbe->address, ptr); - pbe->orig_address = (unsigned long)ptr; - pbe = pbe->next; - } - } + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) + if (saveable_page(pfn)) + memory_bm_set_bit(orig_bm, pfn); } - BUG_ON(pbe); + memory_bm_position_reset(orig_bm); + memory_bm_position_reset(copy_bm); + do { + pfn = memory_bm_next_pfn(orig_bm); + if (likely(pfn != BM_END_OF_MAP)) { + struct page *page; + void *src; + + page = pfn_to_page(pfn); + src = page_address(page); + page = pfn_to_page(memory_bm_next_pfn(copy_bm)); + copy_data_page(page_address(page), src); + } + } while (pfn != BM_END_OF_MAP); } /** @@ -447,37 +914,43 @@ static int enough_free_mem(unsigned int nr_pages) (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); } -static int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed) +static int +swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, + unsigned int nr_pages) { - struct pbe *p; + int error; - for_each_pbe (p, pblist) { - p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed); - if (!p->address) - return -ENOMEM; - } - return 0; -} + error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); + if (error) + goto Free; -static struct pbe *swsusp_alloc(unsigned int nr_pages) -{ - struct pbe *pblist; + error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); + if (error) + goto Free; - pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, PG_ANY); - if (!pblist) { - printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); - return NULL; - } + while (nr_pages-- > 0) { + struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD); + if (!page) + goto Free; - if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, PG_ANY)) { - printk(KERN_ERR "suspend: Allocating image pages failed.\n"); - swsusp_free(); - return NULL; + SetPageNosave(page); + SetPageNosaveFree(page); + memory_bm_set_bit(copy_bm, page_to_pfn(page)); } + return 0; - return pblist; +Free: + swsusp_free(); + return -ENOMEM; } +/* Memory bitmap used for marking saveable pages */ +static struct memory_bitmap orig_bm; +/* Memory bitmap used for marking allocated pages that will contain the copies + * of saveable pages + */ +static struct memory_bitmap copy_bm; + asmlinkage int swsusp_save(void) { unsigned int nr_pages; @@ -498,15 +971,14 @@ asmlinkage int swsusp_save(void) return -ENOMEM; } - restore_pblist = swsusp_alloc(nr_pages); - if (!restore_pblist) + if (swsusp_alloc(&orig_bm, ©_bm, nr_pages)) return -ENOMEM; /* During allocating of suspend pagedir, new cold pages may appear. * Kill them. */ drain_local_pages(); - copy_data_pages(restore_pblist); + copy_data_pages(©_bm, &orig_bm); /* * End of critical section. From now on, we can write to memory, @@ -535,22 +1007,23 @@ static void init_header(struct swsusp_info *info) } /** - * pack_orig_addresses - the .orig_address fields of the PBEs from the - * list starting at @pbe are stored in the array @buf[] (1 page) + * pack_addresses - the addresses corresponding to pfns found in the + * bitmap @bm are stored in the array @buf[] (1 page) */ -static inline struct pbe *pack_orig_addresses(unsigned long *buf, struct pbe *pbe) +static inline void +pack_addresses(unsigned long *buf, struct memory_bitmap *bm) { int j; - for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { - buf[j] = pbe->orig_address; - pbe = pbe->next; + for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { + unsigned long pfn = memory_bm_next_pfn(bm); + + if (unlikely(pfn == BM_END_OF_MAP)) + break; + + buf[j] = (unsigned long)page_address(pfn_to_page(pfn)); } - if (!pbe) - for (; j < PAGE_SIZE / sizeof(long); j++) - buf[j] = 0; - return pbe; } /** @@ -579,6 +1052,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) { if (handle->cur > nr_meta_pages + nr_copy_pages) return 0; + if (!buffer) { /* This makes the buffer be freed by swsusp_free() */ buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); @@ -588,16 +1062,17 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) if (!handle->offset) { init_header((struct swsusp_info *)buffer); handle->buffer = buffer; - handle->pbe = restore_pblist; + memory_bm_position_reset(&orig_bm); + memory_bm_position_reset(©_bm); } if (handle->prev < handle->cur) { if (handle->cur <= nr_meta_pages) { - handle->pbe = pack_orig_addresses(buffer, handle->pbe); - if (!handle->pbe) - handle->pbe = restore_pblist; + memset(buffer, 0, PAGE_SIZE); + pack_addresses(buffer, &orig_bm); } else { - handle->buffer = (void *)handle->pbe->address; - handle->pbe = handle->pbe->next; + unsigned long pfn = memory_bm_next_pfn(©_bm); + + handle->buffer = page_address(pfn_to_page(pfn)); } handle->prev = handle->cur; } @@ -736,12 +1211,7 @@ static inline struct pbe *unpack_orig_addresses(unsigned long *buf, * of "safe" which will be used later */ -struct safe_page { - struct safe_page *next; - char padding[PAGE_SIZE - sizeof(void *)]; -}; - -static struct safe_page *safe_pages; +static struct linked_page *safe_pages; static int prepare_image(struct snapshot_handle *handle) { @@ -763,9 +1233,9 @@ static int prepare_image(struct snapshot_handle *handle) if (!error && nr_pages > unsafe_pages) { nr_pages -= unsafe_pages; while (nr_pages--) { - struct safe_page *ptr; + struct linked_page *ptr; - ptr = (struct safe_page *)get_zeroed_page(GFP_ATOMIC); + ptr = (void *)get_zeroed_page(GFP_ATOMIC); if (!ptr) { error = -ENOMEM; break; diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 17f669c83012..8ef677ea0cea 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -193,14 +193,13 @@ int swsusp_shrink_memory(void) printk("Shrinking memory... "); do { size = 2 * count_highmem_pages(); - size += size / 50 + count_data_pages(); - size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE + - PAGES_FOR_IO; + size += size / 50 + count_data_pages() + PAGES_FOR_IO; tmp = size; for_each_zone (zone) if (!is_highmem(zone) && populated_zone(zone)) { tmp -= zone->free_pages; tmp += zone->lowmem_reserve[ZONE_NORMAL]; + tmp += snapshot_additional_pages(zone); } if (tmp > 0) { tmp = __shrink_memory(tmp); -- cgit v1.2.3 From 940864ddabdb180e02041c4dcd46ba6f9eee732f Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:55 -0700 Subject: [PATCH] swsusp: Use memory bitmaps during resume Make swsusp use memory bitmaps to store its internal information during the resume phase of the suspend-resume cycle. If the pfns of saveable pages are saved during the suspend phase instead of the kernel virtual addresses of these pages, we can use them during the resume phase directly to set the corresponding bits in a memory bitmap. Then, this bitmap is used to mark the page frames corresponding to the pages that were saveable before the suspend (aka "unsafe" page frames). Next, we allocate as many page frames as needed to store the entire suspend image and make sure that there will be some extra free "safe" page frames for the list of PBEs constructed later. Subsequently, the image is loaded and, if possible, the data loaded from it are written into their "original" page frames (ie. the ones they had occupied before the suspend). The image data that cannot be written into their "original" page frames are loaded into "safe" page frames and their "original" kernel virtual addresses, as well as the addresses of the "safe" pages containing their copies, are stored in a list of PBEs. Finally, the list of PBEs is used to copy the remaining image data into their "original" page frames (this is done atomically, by the architecture-dependent parts of swsusp). Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/power.h | 11 +- kernel/power/snapshot.c | 416 +++++++++++++++++++++--------------------------- kernel/power/swap.c | 4 +- kernel/power/user.c | 1 + 4 files changed, 186 insertions(+), 246 deletions(-) (limited to 'kernel') diff --git a/kernel/power/power.h b/kernel/power/power.h index 6e9e2acc34f8..bfe999f7b272 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -81,16 +81,6 @@ struct snapshot_handle { unsigned int prev; /* number of the block of PAGE_SIZE bytes that * was the current one previously */ - struct pbe *pbe; /* PBE that corresponds to 'buffer' */ - struct pbe *last_pbe; /* When the image is restored (eg. read - * from disk) we can store some image - * data directly in the page frames - * in which they were before suspend. - * In such a case the PBEs that - * correspond to them will be unused. - * This is the last PBE, so far, that - * does not correspond to such data. - */ void *buffer; /* address of the block to read from * or write to */ @@ -113,6 +103,7 @@ extern unsigned int snapshot_additional_pages(struct zone *zone); extern int snapshot_read_next(struct snapshot_handle *handle, size_t count); extern int snapshot_write_next(struct snapshot_handle *handle, size_t count); extern int snapshot_image_loaded(struct snapshot_handle *handle); +extern void snapshot_free_unused_memory(struct snapshot_handle *handle); #define SNAPSHOT_IOC_MAGIC '3' #define SNAPSHOT_FREEZE _IO(SNAPSHOT_IOC_MAGIC, 1) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 852e0df41719..1b84313cbab5 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -39,7 +39,7 @@ struct pbe *restore_pblist; static unsigned int nr_copy_pages; static unsigned int nr_meta_pages; -static unsigned long *buffer; +static void *buffer; #ifdef CONFIG_HIGHMEM unsigned int count_highmem_pages(void) @@ -172,7 +172,7 @@ static inline int restore_highmem(void) {return 0;} #define PG_UNSAFE_CLEAR 1 #define PG_UNSAFE_KEEP 0 -static unsigned int unsafe_pages; +static unsigned int allocated_unsafe_pages; static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { @@ -183,7 +183,7 @@ static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) while (res && PageNosaveFree(virt_to_page(res))) { /* The page is unsafe, mark it for swsusp_free() */ SetPageNosave(virt_to_page(res)); - unsafe_pages++; + allocated_unsafe_pages++; res = (void *)get_zeroed_page(gfp_mask); } if (res) { @@ -772,101 +772,10 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) } /** - * free_pagedir - free pages allocated with alloc_pagedir() - */ - -static void free_pagedir(struct pbe *pblist, int clear_nosave_free) -{ - struct pbe *pbe; - - while (pblist) { - pbe = (pblist + PB_PAGE_SKIP)->next; - free_image_page(pblist, clear_nosave_free); - pblist = pbe; - } -} - -/** - * fill_pb_page - Create a list of PBEs on a given memory page - */ - -static inline void fill_pb_page(struct pbe *pbpage, unsigned int n) -{ - struct pbe *p; - - p = pbpage; - pbpage += n - 1; - do - p->next = p + 1; - while (++p < pbpage); -} - -/** - * create_pbe_list - Create a list of PBEs on top of a given chain - * of memory pages allocated with alloc_pagedir() + * swsusp_free - free pages allocated for the suspend. * - * This function assumes that pages allocated by alloc_image_page() will - * always be zeroed. - */ - -static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages) -{ - struct pbe *pbpage; - unsigned int num = PBES_PER_PAGE; - - for_each_pb_page (pbpage, pblist) { - if (num >= nr_pages) - break; - - fill_pb_page(pbpage, PBES_PER_PAGE); - num += PBES_PER_PAGE; - } - if (pbpage) { - num -= PBES_PER_PAGE; - fill_pb_page(pbpage, nr_pages - num); - } -} - -/** - * alloc_pagedir - Allocate the page directory. - * - * First, determine exactly how many pages we need and - * allocate them. - * - * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE - * struct pbe elements (pbes) and the last element in the page points - * to the next page. - * - * On each page we set up a list of struct_pbe elements. - */ - -static struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, - int safe_needed) -{ - unsigned int num; - struct pbe *pblist, *pbe; - - if (!nr_pages) - return NULL; - - pblist = alloc_image_page(gfp_mask, safe_needed); - pbe = pblist; - for (num = PBES_PER_PAGE; num < nr_pages; num += PBES_PER_PAGE) { - if (!pbe) { - free_pagedir(pblist, PG_UNSAFE_CLEAR); - return NULL; - } - pbe += PB_PAGE_SKIP; - pbe->next = alloc_image_page(gfp_mask, safe_needed); - pbe = pbe->next; - } - create_pbe_list(pblist, nr_pages); - return pblist; -} - -/** - * Free pages we allocated for suspend. Suspend pages are alocated - * before atomic copy, so we need to free them after resume. + * Suspend pages are alocated before the atomic copy is made, so we + * need to release them after the resume. */ void swsusp_free(void) @@ -904,14 +813,18 @@ void swsusp_free(void) static int enough_free_mem(unsigned int nr_pages) { struct zone *zone; - unsigned int n = 0; + unsigned int free = 0, meta = 0; for_each_zone (zone) - if (!is_highmem(zone)) - n += zone->free_pages; - pr_debug("swsusp: available memory: %u pages\n", n); - return n > (nr_pages + PAGES_FOR_IO + - (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); + if (!is_highmem(zone)) { + free += zone->free_pages; + meta += snapshot_additional_pages(zone); + } + + pr_debug("swsusp: pages needed: %u + %u + %u, available pages: %u\n", + nr_pages, PAGES_FOR_IO, meta, free); + + return free > nr_pages + PAGES_FOR_IO + meta; } static int @@ -961,11 +874,6 @@ asmlinkage int swsusp_save(void) nr_pages = count_data_pages(); printk("swsusp: Need to copy %u pages\n", nr_pages); - pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n", - nr_pages, - (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE, - PAGES_FOR_IO, nr_free_pages()); - if (!enough_free_mem(nr_pages)) { printk(KERN_ERR "swsusp: Not enough free memory\n"); return -ENOMEM; @@ -1007,22 +915,19 @@ static void init_header(struct swsusp_info *info) } /** - * pack_addresses - the addresses corresponding to pfns found in the - * bitmap @bm are stored in the array @buf[] (1 page) + * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm + * are stored in the array @buf[] (1 page at a time) */ static inline void -pack_addresses(unsigned long *buf, struct memory_bitmap *bm) +pack_pfns(unsigned long *buf, struct memory_bitmap *bm) { int j; for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { - unsigned long pfn = memory_bm_next_pfn(bm); - - if (unlikely(pfn == BM_END_OF_MAP)) + buf[j] = memory_bm_next_pfn(bm); + if (unlikely(buf[j] == BM_END_OF_MAP)) break; - - buf[j] = (unsigned long)page_address(pfn_to_page(pfn)); } } @@ -1068,7 +973,7 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) if (handle->prev < handle->cur) { if (handle->cur <= nr_meta_pages) { memset(buffer, 0, PAGE_SIZE); - pack_addresses(buffer, &orig_bm); + pack_pfns(buffer, &orig_bm); } else { unsigned long pfn = memory_bm_next_pfn(©_bm); @@ -1094,14 +999,10 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) * had been used before suspend */ -static int mark_unsafe_pages(struct pbe *pblist) +static int mark_unsafe_pages(struct memory_bitmap *bm) { struct zone *zone; unsigned long pfn, max_zone_pfn; - struct pbe *p; - - if (!pblist) /* a sanity check */ - return -EINVAL; /* Clear page flags */ for_each_zone (zone) { @@ -1111,30 +1012,37 @@ static int mark_unsafe_pages(struct pbe *pblist) ClearPageNosaveFree(pfn_to_page(pfn)); } - /* Mark orig addresses */ - for_each_pbe (p, pblist) { - if (virt_addr_valid(p->orig_address)) - SetPageNosaveFree(virt_to_page(p->orig_address)); - else - return -EFAULT; - } + /* Mark pages that correspond to the "original" pfns as "unsafe" */ + memory_bm_position_reset(bm); + do { + pfn = memory_bm_next_pfn(bm); + if (likely(pfn != BM_END_OF_MAP)) { + if (likely(pfn_valid(pfn))) + SetPageNosaveFree(pfn_to_page(pfn)); + else + return -EFAULT; + } + } while (pfn != BM_END_OF_MAP); - unsafe_pages = 0; + allocated_unsafe_pages = 0; return 0; } -static void copy_page_backup_list(struct pbe *dst, struct pbe *src) +static void +duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src) { - /* We assume both lists contain the same number of elements */ - while (src) { - dst->orig_address = src->orig_address; - dst = dst->next; - src = src->next; + unsigned long pfn; + + memory_bm_position_reset(src); + pfn = memory_bm_next_pfn(src); + while (pfn != BM_END_OF_MAP) { + memory_bm_set_bit(dst, pfn); + pfn = memory_bm_next_pfn(src); } } -static int check_header(struct swsusp_info *info) +static inline int check_header(struct swsusp_info *info) { char *reason = NULL; @@ -1161,19 +1069,14 @@ static int check_header(struct swsusp_info *info) * load header - check the image header and copy data from it */ -static int load_header(struct snapshot_handle *handle, - struct swsusp_info *info) +static int +load_header(struct swsusp_info *info) { int error; - struct pbe *pblist; + restore_pblist = NULL; error = check_header(info); if (!error) { - pblist = alloc_pagedir(info->image_pages, GFP_ATOMIC, PG_ANY); - if (!pblist) - return -ENOMEM; - restore_pblist = pblist; - handle->pbe = pblist; nr_copy_pages = info->image_pages; nr_meta_pages = info->pages - info->image_pages - 1; } @@ -1181,108 +1084,137 @@ static int load_header(struct snapshot_handle *handle, } /** - * unpack_orig_addresses - copy the elements of @buf[] (1 page) to - * the PBEs in the list starting at @pbe + * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set + * the corresponding bit in the memory bitmap @bm */ -static inline struct pbe *unpack_orig_addresses(unsigned long *buf, - struct pbe *pbe) +static inline void +unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) { int j; - for (j = 0; j < PAGE_SIZE / sizeof(long) && pbe; j++) { - pbe->orig_address = buf[j]; - pbe = pbe->next; + for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { + if (unlikely(buf[j] == BM_END_OF_MAP)) + break; + + memory_bm_set_bit(bm, buf[j]); } - return pbe; } /** - * prepare_image - use metadata contained in the PBE list - * pointed to by restore_pblist to mark the pages that will - * be overwritten in the process of restoring the system - * memory state from the image ("unsafe" pages) and allocate - * memory for the image + * prepare_image - use the memory bitmap @bm to mark the pages that will + * be overwritten in the process of restoring the system memory state + * from the suspend image ("unsafe" pages) and allocate memory for the + * image. * - * The idea is to allocate the PBE list first and then - * allocate as many pages as it's needed for the image data, - * but not to assign these pages to the PBEs initially. - * Instead, we just mark them as allocated and create a list - * of "safe" which will be used later + * The idea is to allocate a new memory bitmap first and then allocate + * as many pages as needed for the image data, but not to assign these + * pages to specific tasks initially. Instead, we just mark them as + * allocated and create a list of "safe" pages that will be used later. */ -static struct linked_page *safe_pages; +#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe)) + +static struct linked_page *safe_pages_list; -static int prepare_image(struct snapshot_handle *handle) +static int +prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) { - int error = 0; - unsigned int nr_pages = nr_copy_pages; - struct pbe *p, *pblist = NULL; + unsigned int nr_pages; + struct linked_page *sp_list, *lp; + int error; - p = restore_pblist; - error = mark_unsafe_pages(p); - if (!error) { - pblist = alloc_pagedir(nr_pages, GFP_ATOMIC, PG_SAFE); - if (pblist) - copy_page_backup_list(pblist, p); - free_pagedir(p, PG_UNSAFE_KEEP); - if (!pblist) + error = mark_unsafe_pages(bm); + if (error) + goto Free; + + error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); + if (error) + goto Free; + + duplicate_memory_bitmap(new_bm, bm); + memory_bm_free(bm, PG_UNSAFE_KEEP); + /* Reserve some safe pages for potential later use. + * + * NOTE: This way we make sure there will be enough safe pages for the + * chain_alloc() in get_buffer(). It is a bit wasteful, but + * nr_copy_pages cannot be greater than 50% of the memory anyway. + */ + sp_list = NULL; + /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */ + nr_pages = nr_copy_pages - allocated_unsafe_pages; + nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); + while (nr_pages > 0) { + lp = alloc_image_page(GFP_ATOMIC, PG_SAFE); + if (!lp) { error = -ENOMEM; + goto Free; + } + lp->next = sp_list; + sp_list = lp; + nr_pages--; } - safe_pages = NULL; - if (!error && nr_pages > unsafe_pages) { - nr_pages -= unsafe_pages; - while (nr_pages--) { - struct linked_page *ptr; - - ptr = (void *)get_zeroed_page(GFP_ATOMIC); - if (!ptr) { - error = -ENOMEM; - break; - } - if (!PageNosaveFree(virt_to_page(ptr))) { - /* The page is "safe", add it to the list */ - ptr->next = safe_pages; - safe_pages = ptr; - } - /* Mark the page as allocated */ - SetPageNosave(virt_to_page(ptr)); - SetPageNosaveFree(virt_to_page(ptr)); + /* Preallocate memory for the image */ + safe_pages_list = NULL; + nr_pages = nr_copy_pages - allocated_unsafe_pages; + while (nr_pages > 0) { + lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); + if (!lp) { + error = -ENOMEM; + goto Free; + } + if (!PageNosaveFree(virt_to_page(lp))) { + /* The page is "safe", add it to the list */ + lp->next = safe_pages_list; + safe_pages_list = lp; } + /* Mark the page as allocated */ + SetPageNosave(virt_to_page(lp)); + SetPageNosaveFree(virt_to_page(lp)); + nr_pages--; } - if (!error) { - restore_pblist = pblist; - } else { - handle->pbe = NULL; - swsusp_free(); + /* Free the reserved safe pages so that chain_alloc() can use them */ + while (sp_list) { + lp = sp_list->next; + free_image_page(sp_list, PG_UNSAFE_CLEAR); + sp_list = lp; } + return 0; + +Free: + swsusp_free(); return error; } -static void *get_buffer(struct snapshot_handle *handle) +/** + * get_buffer - compute the address that snapshot_write_next() should + * set for its caller to write to. + */ + +static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) { - struct pbe *pbe = handle->pbe, *last = handle->last_pbe; - struct page *page = virt_to_page(pbe->orig_address); + struct pbe *pbe; + struct page *page = pfn_to_page(memory_bm_next_pfn(bm)); - if (PageNosave(page) && PageNosaveFree(page)) { - /* - * We have allocated the "original" page frame and we can - * use it directly to store the read page + if (PageNosave(page) && PageNosaveFree(page)) + /* We have allocated the "original" page frame and we can + * use it directly to store the loaded page. */ - pbe->address = 0; - if (last && last->next) - last->next = NULL; - return (void *)pbe->orig_address; - } - /* - * The "original" page frame has not been allocated and we have to - * use a "safe" page frame to store the read page + return page_address(page); + + /* The "original" page frame has not been allocated and we have to + * use a "safe" page frame to store the loaded page. */ - pbe->address = (unsigned long)safe_pages; - safe_pages = safe_pages->next; - if (last) - last->next = pbe; - handle->last_pbe = pbe; + pbe = chain_alloc(ca, sizeof(struct pbe)); + if (!pbe) { + swsusp_free(); + return NULL; + } + pbe->orig_address = (unsigned long)page_address(page); + pbe->address = (unsigned long)safe_pages_list; + safe_pages_list = safe_pages_list->next; + pbe->next = restore_pblist; + restore_pblist = pbe; return (void *)pbe->address; } @@ -1310,10 +1242,13 @@ static void *get_buffer(struct snapshot_handle *handle) int snapshot_write_next(struct snapshot_handle *handle, size_t count) { + static struct chain_allocator ca; int error = 0; + /* Check if we have already loaded the entire image */ if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) return 0; + if (!buffer) { /* This makes the buffer be freed by swsusp_free() */ buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); @@ -1324,26 +1259,32 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) handle->buffer = buffer; handle->sync_read = 1; if (handle->prev < handle->cur) { - if (!handle->prev) { - error = load_header(handle, - (struct swsusp_info *)buffer); + if (handle->prev == 0) { + error = load_header(buffer); + if (error) + return error; + + error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY); if (error) return error; + } else if (handle->prev <= nr_meta_pages) { - handle->pbe = unpack_orig_addresses(buffer, - handle->pbe); - if (!handle->pbe) { - error = prepare_image(handle); + unpack_orig_pfns(buffer, ©_bm); + if (handle->prev == nr_meta_pages) { + error = prepare_image(&orig_bm, ©_bm); if (error) return error; - handle->pbe = restore_pblist; - handle->last_pbe = NULL; - handle->buffer = get_buffer(handle); + + chain_init(&ca, GFP_ATOMIC, PG_SAFE); + memory_bm_position_reset(&orig_bm); + restore_pblist = NULL; + handle->buffer = get_buffer(&orig_bm, &ca); handle->sync_read = 0; + if (!handle->buffer) + return -ENOMEM; } } else { - handle->pbe = handle->pbe->next; - handle->buffer = get_buffer(handle); + handle->buffer = get_buffer(&orig_bm, &ca); handle->sync_read = 0; } handle->prev = handle->cur; @@ -1362,6 +1303,13 @@ int snapshot_write_next(struct snapshot_handle *handle, size_t count) int snapshot_image_loaded(struct snapshot_handle *handle) { - return !(!handle->pbe || handle->pbe->next || !nr_copy_pages || - handle->cur <= nr_meta_pages + nr_copy_pages); + return !(!nr_copy_pages || + handle->cur <= nr_meta_pages + nr_copy_pages); +} + +void snapshot_free_unused_memory(struct snapshot_handle *handle) +{ + /* Free only if we have loaded the image entirely */ + if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) + memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); } diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 8309d20b2563..9b2ee5344dee 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -331,8 +331,7 @@ static int enough_swap(unsigned int nr_pages) unsigned int free_swap = count_swap_pages(root_swap, 1); pr_debug("swsusp: free swap pages: %u\n", free_swap); - return free_swap > (nr_pages + PAGES_FOR_IO + - (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE); + return free_swap > nr_pages + PAGES_FOR_IO; } /** @@ -547,6 +546,7 @@ static int load_image(struct swap_map_handle *handle, error = err2; if (!error) { printk("\b\b\b\bdone\n"); + snapshot_free_unused_memory(snapshot); if (!snapshot_image_loaded(snapshot)) error = -ENODATA; } diff --git a/kernel/power/user.c b/kernel/power/user.c index 0ef5e4ba39e5..2e4499f3e4d9 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -193,6 +193,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, error = -EPERM; break; } + snapshot_free_unused_memory(&data->handle); down(&pm_sem); pm_prepare_console(); error = device_suspend(PMSG_FREEZE); -- cgit v1.2.3 From c8eb8b4025175f967af0ba8e933f23aa9954dc35 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:56 -0700 Subject: [PATCH] PM: make it possible to disable console suspending Change suspend_console() so that it waits for all consoles to flush the remaining messages and make it possible to switch the console suspending off with the help of a Kconfig option. Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Cc: Stefan Seyfried Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/Kconfig | 11 +++++++++++ kernel/printk.c | 3 +++ 2 files changed, 14 insertions(+) (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 619ecabf7c58..4b6e2f18e056 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -36,6 +36,17 @@ config PM_DEBUG code. This is helpful when debugging and reporting various PM bugs, like suspend support. +config DISABLE_CONSOLE_SUSPEND + bool "Keep console(s) enabled during suspend/resume (DANGEROUS)" + depends on PM && PM_DEBUG + default n + ---help--- + This option turns off the console suspend mechanism that prevents + debug messages from reaching the console during the suspend/resume + operations. This may be helpful when debugging device drivers' + suspend/resume routines, but may itself lead to problems, for example + if netconsole is used. + config PM_TRACE bool "Suspend/resume event tracing" depends on PM && PM_DEBUG && X86_32 && EXPERIMENTAL diff --git a/kernel/printk.c b/kernel/printk.c index 1149365e989e..771f5e861bcd 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -721,6 +721,7 @@ int __init add_preferred_console(char *name, int idx, char *options) return 0; } +#ifndef CONFIG_DISABLE_CONSOLE_SUSPEND /** * suspend_console - suspend the console subsystem * @@ -728,6 +729,7 @@ int __init add_preferred_console(char *name, int idx, char *options) */ void suspend_console(void) { + printk("Suspending console(s)\n"); acquire_console_sem(); console_suspended = 1; } @@ -737,6 +739,7 @@ void resume_console(void) console_suspended = 0; release_console_sem(); } +#endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */ /** * acquire_console_sem - lock the console system for exclusive use. -- cgit v1.2.3 From c5c6ba4e08ab9c9e390a0f3a7d9a5c332f5cc6ef Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Sep 2006 23:32:58 -0700 Subject: [PATCH] PM: Add pm_trace switch Add the pm_trace attribute in /sys/power which has to be explicitly set to one to really enable the "PM tracing" code compiled in when CONFIG_PM_TRACE is set (which modifies the machine's CMOS clock in unpredictable ways). Signed-off-by: Rafael J. Wysocki Acked-by: Pavel Machek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/power/main.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'kernel') diff --git a/kernel/power/main.c b/kernel/power/main.c index 4d403323a7bb..873228c71dab 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "power.h" @@ -281,10 +282,39 @@ static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n power_attr(state); +#ifdef CONFIG_PM_TRACE +int pm_trace_enabled; + +static ssize_t pm_trace_show(struct subsystem * subsys, char * buf) +{ + return sprintf(buf, "%d\n", pm_trace_enabled); +} + +static ssize_t +pm_trace_store(struct subsystem * subsys, const char * buf, size_t n) +{ + int val; + + if (sscanf(buf, "%d", &val) == 1) { + pm_trace_enabled = !!val; + return n; + } + return -EINVAL; +} + +power_attr(pm_trace); + +static struct attribute * g[] = { + &state_attr.attr, + &pm_trace_attr.attr, + NULL, +}; +#else static struct attribute * g[] = { &state_attr.attr, NULL, }; +#endif /* CONFIG_PM_TRACE */ static struct attribute_group attr_group = { .attrs = g, -- cgit v1.2.3 From d33b6fba2c4350651f3f61ff2ab858a2f116e9a4 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 30 Jun 2006 02:31:24 -0700 Subject: Resources: insert identical resources above existing resources If you have two resources which aree exactly the same size, insert_resource() currently inserts the new one below the existing one. This is wrong because there's no way to insert a resource of the same size above an existing one. I took this opportunity to rewrite the initial loop to be a for-loop instead of a goto-loop and fix the documentation. Signed-off-by: Matthew Wilcox Cc: Ivan Kokshaysky Cc: Dominik Brodowski Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- kernel/resource.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 46286434af80..9db38a1a7520 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -344,12 +344,11 @@ EXPORT_SYMBOL(allocate_resource); * * Returns 0 on success, -EBUSY if the resource can't be inserted. * - * This function is equivalent of request_resource when no conflict + * This function is equivalent to request_resource when no conflict * happens. If a conflict happens, and the conflicting resources * entirely fit within the range of the new resource, then the new - * resource is inserted and the conflicting resources become childs of - * the new resource. Otherwise the new resource becomes the child of - * the conflicting resource + * resource is inserted and the conflicting resources become children of + * the new resource. */ int insert_resource(struct resource *parent, struct resource *new) { @@ -357,20 +356,21 @@ int insert_resource(struct resource *parent, struct resource *new) struct resource *first, *next; write_lock(&resource_lock); - begin: - result = 0; - first = __request_resource(parent, new); - if (!first) - goto out; - result = -EBUSY; - if (first == parent) - goto out; + for (;; parent = first) { + result = 0; + first = __request_resource(parent, new); + if (!first) + goto out; - /* Resource fully contained by the clashing resource? Recurse into it */ - if (first->start <= new->start && first->end >= new->end) { - parent = first; - goto begin; + result = -EBUSY; + if (first == parent) + goto out; + + if ((first->start > new->start) || (first->end < new->end)) + break; + if ((first->start == new->start) && (first->end == new->end)) + break; } for (next = first; ; next = next->sibling) { -- cgit v1.2.3 From 0ec76a110f432e98277e464b82ace8dd66571689 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 27 Sep 2006 01:50:15 -0700 Subject: [PATCH] NOMMU: Check that access_process_vm() has a valid target Check that access_process_vm() is accessing a valid mapping in the target process. This limits ptrace() accesses and accesses through /proc//maps to only those regions actually mapped by a program. Signed-off-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/ptrace.c | 54 ------------------------------------------------------ 1 file changed, 54 deletions(-) (limited to 'kernel') diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 9a111f70145c..8aad0331d82e 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -241,60 +241,6 @@ int ptrace_detach(struct task_struct *child, unsigned int data) return 0; } -/* - * Access another process' address space. - * Source/target buffer must be kernel space, - * Do not walk the page table directly, use get_user_pages - */ - -int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) -{ - struct mm_struct *mm; - struct vm_area_struct *vma; - struct page *page; - void *old_buf = buf; - - mm = get_task_mm(tsk); - if (!mm) - return 0; - - down_read(&mm->mmap_sem); - /* ignore errors, just check how much was sucessfully transfered */ - while (len) { - int bytes, ret, offset; - void *maddr; - - ret = get_user_pages(tsk, mm, addr, 1, - write, 1, &page, &vma); - if (ret <= 0) - break; - - bytes = len; - offset = addr & (PAGE_SIZE-1); - if (bytes > PAGE_SIZE-offset) - bytes = PAGE_SIZE-offset; - - maddr = kmap(page); - if (write) { - copy_to_user_page(vma, page, addr, - maddr + offset, buf, bytes); - set_page_dirty_lock(page); - } else { - copy_from_user_page(vma, page, addr, - buf, maddr + offset, bytes); - } - kunmap(page); - page_cache_release(page); - len -= bytes; - buf += bytes; - addr += bytes; - } - up_read(&mm->mmap_sem); - mmput(mm); - - return buf - old_buf; -} - int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) { int copied = 0; -- cgit v1.2.3 From f269fdd1829acc5e53bf57b145003e5733133f2b Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 27 Sep 2006 01:50:23 -0700 Subject: [PATCH] NOMMU: move the fallback arch_vma_name() to a sensible place Move the fallback arch_vma_name() to a sensible place (kernel/signal.c). Currently it's in fs/proc/task_mmu.c, a file that is dependent on both CONFIG_PROC_FS and CONFIG_MMU being enabled, but it's used from kernel/signal.c from where it is called unconditionally. [akpm@osdl.org: build fix] Signed-off-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/signal.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index bfdb5686fa3e..05853a7337e3 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2577,6 +2577,11 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) } #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ +__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) +{ + return NULL; +} + void __init signals_init(void) { sigqueue_cachep = -- cgit v1.2.3 From 8e18e2941c53416aa219708e7dcad21fb4bd6794 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 27 Sep 2006 01:50:46 -0700 Subject: [PATCH] inode_diet: Replace inode.u.generic_ip with inode.i_private The following patches reduce the size of the VFS inode structure by 28 bytes on a UP x86. (It would be more on an x86_64 system). This is a 10% reduction in the inode size on a UP kernel that is configured in a production mode (i.e., with no spinlock or other debugging functions enabled; if you want to save memory taken up by in-core inodes, the first thing you should do is disable the debugging options; they are responsible for a huge amount of bloat in the VFS inode structure). This patch: The filesystem or device-specific pointer in the inode is inside a union, which is pretty pointless given that all 30+ users of this field have been using the void pointer. Get rid of the union and rename it to i_private, with a comment to explain who is allowed to use the void pointer. This is just a cleanup, but it allows us to reuse the union 'u' for something something where the union will actually be used. [judith@osdl.org: powerpc build fix] Signed-off-by: "Theodore Ts'o" Signed-off-by: Judith Lebzelter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/relay.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/relay.c b/kernel/relay.c index 33345e73485c..85786ff2a4f9 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -669,7 +669,7 @@ EXPORT_SYMBOL_GPL(relay_flush); */ static int relay_file_open(struct inode *inode, struct file *filp) { - struct rchan_buf *buf = inode->u.generic_ip; + struct rchan_buf *buf = inode->i_private; kref_get(&buf->kref); filp->private_data = buf; -- cgit v1.2.3 From ba52de123d454b57369f291348266d86f4b35070 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 27 Sep 2006 01:50:49 -0700 Subject: [PATCH] inode-diet: Eliminate i_blksize from the inode structure This eliminates the i_blksize field from struct inode. Filesystems that want to provide a per-inode st_blksize can do so by providing their own getattr routine instead of using the generic_fillattr() function. Note that some filesystems were providing pretty much random (and incorrect) values for i_blksize. [bunk@stusta.de: cleanup] [akpm@osdl.org: generic_fillattr() fix] Signed-off-by: "Theodore Ts'o" Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index cff41511269f..1b32c2c04c15 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -289,7 +289,6 @@ static struct inode *cpuset_new_inode(mode_t mode) inode->i_mode = mode; inode->i_uid = current->fsuid; inode->i_gid = current->fsgid; - inode->i_blksize = PAGE_CACHE_SIZE; inode->i_blocks = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info; -- cgit v1.2.3 From b89a81712f486e4f7a606987413e387605fdeaf4 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 27 Sep 2006 01:51:04 -0700 Subject: [PATCH] sysctl: Allow /proc/sys without sys_sysctl Since sys_sysctl is deprecated start allow it to be compiled out. This should catch any remaining user space code that cares, and paves the way for further sysctl cleanups. [akpm@osdl.org: If sys_sysctl() is not compiled-in, emit a warning] Signed-off-by: Eric W. Biederman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 113 +++++++++++++++++++------------------------------------- 1 file changed, 38 insertions(+), 75 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bcb3a181dbb2..8bfa7d117c54 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -137,8 +137,11 @@ extern int no_unaligned_warning; extern int max_lock_depth; #endif -static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t, - ctl_table *, void **); +#ifdef CONFIG_SYSCTL_SYSCALL +static int parse_table(int __user *, int, void __user *, size_t __user *, + void __user *, size_t, ctl_table *, void **); +#endif + static int proc_doutsstring(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); @@ -165,7 +168,7 @@ int sysctl_legacy_va_layout; /* /proc declarations: */ -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_PROC_SYSCTL static ssize_t proc_readsys(struct file *, char __user *, size_t, loff_t *); static ssize_t proc_writesys(struct file *, const char __user *, size_t, loff_t *); @@ -1166,12 +1169,13 @@ static void start_unregistering(struct ctl_table_header *p) void __init sysctl_init(void) { -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_PROC_SYSCTL register_proc_table(root_table, proc_sys_root, &root_table_header); init_irq_proc(); #endif } +#ifdef CONFIG_SYSCTL_SYSCALL int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { @@ -1225,6 +1229,7 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args) unlock_kernel(); return error; } +#endif /* CONFIG_SYSCTL_SYSCALL */ /* * ctl_perm does NOT grant the superuser all rights automatically, because @@ -1251,6 +1256,7 @@ static inline int ctl_perm(ctl_table *table, int op) return test_perm(table->mode, op); } +#ifdef CONFIG_SYSCTL_SYSCALL static int parse_table(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen, @@ -1340,6 +1346,7 @@ int do_sysctl_strategy (ctl_table *table, } return 0; } +#endif /* CONFIG_SYSCTL_SYSCALL */ /** * register_sysctl_table - register a sysctl hierarchy @@ -1427,7 +1434,7 @@ struct ctl_table_header *register_sysctl_table(ctl_table * table, else list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry); spin_unlock(&sysctl_lock); -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_PROC_SYSCTL register_proc_table(table, proc_sys_root, tmp); #endif return tmp; @@ -1445,18 +1452,31 @@ void unregister_sysctl_table(struct ctl_table_header * header) might_sleep(); spin_lock(&sysctl_lock); start_unregistering(header); -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_PROC_SYSCTL unregister_proc_table(header->ctl_table, proc_sys_root); #endif spin_unlock(&sysctl_lock); kfree(header); } +#else /* !CONFIG_SYSCTL */ +struct ctl_table_header * register_sysctl_table(ctl_table * table, + int insert_at_head) +{ + return NULL; +} + +void unregister_sysctl_table(struct ctl_table_header * table) +{ +} + +#endif /* CONFIG_SYSCTL */ + /* * /proc/sys support */ -#ifdef CONFIG_PROC_FS +#ifdef CONFIG_PROC_SYSCTL /* Scan the sysctl entries in table and add them all into /proc */ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root, void *set) @@ -2318,6 +2338,7 @@ int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write, #endif /* CONFIG_PROC_FS */ +#ifdef CONFIG_SYSCTL_SYSCALL /* * General sysctl support routines */ @@ -2460,11 +2481,19 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, return 1; } -#else /* CONFIG_SYSCTL */ +#else /* CONFIG_SYSCTL_SYSCALL */ asmlinkage long sys_sysctl(struct __sysctl_args __user *args) { + static int msg_count; + + if (msg_count < 5) { + msg_count++; + printk(KERN_INFO + "warning: process `%s' used the removed sysctl " + "system call\n", current->comm); + } return -ENOSYS; } @@ -2496,73 +2525,7 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, return -ENOSYS; } -int proc_dostring(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -int proc_dointvec(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -int proc_dointvec_bset(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp, - void __user *buffer, size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write, - struct file *filp, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - return -ENOSYS; -} - -struct ctl_table_header * register_sysctl_table(ctl_table * table, - int insert_at_head) -{ - return NULL; -} - -void unregister_sysctl_table(struct ctl_table_header * table) -{ -} - -#endif /* CONFIG_SYSCTL */ +#endif /* CONFIG_SYSCTL_SYSCALL */ /* * No sense putting this after each symbol definition, twice, -- cgit v1.2.3 From c18258c6f0848f97e85287f6271c511a092bb784 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 27 Sep 2006 01:51:06 -0700 Subject: [PATCH] pid: Implement transfer_pid and use it to simplify de_thread In de_thread we move pids from one process to another, a rather ugly case. The function transfer_pid makes it clear what we are doing, and makes the action atomic. This is useful we ever want to atomically traverse the process group and session lists, in a rcu safe manner. Even if the atomic properties this change should be a win as transfer_pid should be less code to execute than executing both attach_pid and detach_pid, and this should make de_thread slightly smaller as only a single function call needs to be emitted. The only downside is that the code might be slower to execute as the odds are against transfer_pid being in cache. Signed-off-by: Eric W. Biederman Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/pid.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/pid.c b/kernel/pid.c index 93e212f20671..6db82b68e2f8 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -252,6 +252,15 @@ void fastcall detach_pid(struct task_struct *task, enum pid_type type) free_pid(pid); } +/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ +void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, + enum pid_type type) +{ + new->pids[type].pid = old->pids[type].pid; + hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); + old->pids[type].pid = NULL; +} + struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) { struct task_struct *result = NULL; -- cgit v1.2.3 From 65800ac77e080cf159d6c1207b6886e18f22bc08 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 27 Sep 2006 01:51:11 -0700 Subject: [PATCH] pid: remove temporary debug code in attach_pid With the patches flying between Oleg and myself somehow this temporary debug code got left in pid.c. It was never intended to make it to the stable kernel. Signed-off-by: Eric W. Biederman Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/pid.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/pid.c b/kernel/pid.c index 6db82b68e2f8..8387e8c68193 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -223,9 +223,6 @@ int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr) struct pid_link *link; struct pid *pid; - WARN_ON(!task->pid); /* to be removed soon */ - WARN_ON(!nr); /* to be removed soon */ - link = &task->pids[type]; link->pid = pid = find_pid(nr); hlist_add_head_rcu(&link->node, &pid->tasks[type]); -- cgit v1.2.3 From 910067d188d56d80801b71b0ca1f73aa400c7b8c Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 29 Sep 2006 01:58:36 -0700 Subject: [PATCH] remove generic__raw_read_trylock() If the cpu has the lock held for write, is interrupted, and the interrupt handler calls read_trylock(), it's an instant deadlock. Now, Dave Miller has subsequently pointed out that we don't have any situations where this can occur. Nevertheless, we should delete generic__raw_read_lock (and its associated EXPORT to make Arjan happy) so that nobody thinks they can use it. Acked-by: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/spinlock.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'kernel') diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 9644a41e0bef..d48143eafbfd 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -21,17 +21,6 @@ #include #include -/* - * Generic declaration of the raw read_trylock() function, - * architectures are supposed to optimize this: - */ -int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock) -{ - __raw_read_lock(lock); - return 1; -} -EXPORT_SYMBOL(generic__raw_read_trylock); - int __lockfunc _spin_trylock(spinlock_t *lock) { preempt_disable(); -- cgit v1.2.3 From d8c7649e99e4b081b624aefe1e77caa30b53cb18 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 29 Sep 2006 01:58:55 -0700 Subject: [PATCH] kernel/params: driver layer error checking Check driver layer return values in kernel/params.c Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/params.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 91aea7aa532e..63d432d0ebc0 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -547,6 +547,7 @@ static void __init kernel_param_sysfs_setup(const char *name, unsigned int name_skip) { struct module_kobject *mk; + int ret; mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL); BUG_ON(!mk); @@ -554,7 +555,8 @@ static void __init kernel_param_sysfs_setup(const char *name, mk->mod = THIS_MODULE; kobj_set_kset_s(mk, module_subsys); kobject_set_name(&mk->kobj, name); - kobject_register(&mk->kobj); + ret = kobject_register(&mk->kobj); + BUG_ON(ret < 0); /* no need to keep the kobject if no parameter is exported */ if (!param_sysfs_setup(mk, kparam, num_params, name_skip)) { @@ -684,7 +686,14 @@ decl_subsys(module, &module_ktype, NULL); */ static int __init param_sysfs_init(void) { - subsystem_register(&module_subsys); + int ret; + + ret = subsystem_register(&module_subsys); + if (ret < 0) { + printk(KERN_WARNING "%s (%d): subsystem_register error: %d\n", + __FILE__, __LINE__, ret); + return ret; + } param_sysfs_builtin(); -- cgit v1.2.3 From 4c78a6639386f9e7399fbc0d0a173d4cc1a3e9bf Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 29 Sep 2006 01:59:10 -0700 Subject: [PATCH] kernel-doc for relay interface Add relay interface support to DocBook/kernel-api.tmpl. Fix typos etc. in relay.c and relayfs.txt. Signed-off-by: Randy Dunlap Acked-by: Tom Zanussi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/relay.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/relay.c b/kernel/relay.c index 85786ff2a4f9..1d63ecddfa70 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -95,7 +95,7 @@ int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) * @buf: the buffer struct * @size: total size of the buffer * - * Returns a pointer to the resulting buffer, NULL if unsuccessful. The + * Returns a pointer to the resulting buffer, %NULL if unsuccessful. The * passed in size will get page aligned, if it isn't already. */ static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size) @@ -132,10 +132,9 @@ depopulate: /** * relay_create_buf - allocate and initialize a channel buffer - * @alloc_size: size of the buffer to allocate - * @n_subbufs: number of sub-buffers in the channel + * @chan: the relay channel * - * Returns channel buffer if successful, NULL otherwise + * Returns channel buffer if successful, %NULL otherwise. */ struct rchan_buf *relay_create_buf(struct rchan *chan) { @@ -163,6 +162,7 @@ free_buf: /** * relay_destroy_channel - free the channel struct + * @kref: target kernel reference that contains the relay channel * * Should only be called from kref_put(). */ @@ -194,6 +194,7 @@ void relay_destroy_buf(struct rchan_buf *buf) /** * relay_remove_buf - remove a channel buffer + * @kref: target kernel reference that contains the relay buffer * * Removes the file from the fileystem, which also frees the * rchan_buf_struct and the channel buffer. Should only be called from @@ -374,7 +375,7 @@ void relay_reset(struct rchan *chan) } EXPORT_SYMBOL_GPL(relay_reset); -/** +/* * relay_open_buf - create a new relay channel buffer * * Internal - used by relay_open(). @@ -448,12 +449,12 @@ static inline void setup_callbacks(struct rchan *chan, /** * relay_open - create a new relay channel * @base_filename: base name of files to create - * @parent: dentry of parent directory, NULL for root directory + * @parent: dentry of parent directory, %NULL for root directory * @subbuf_size: size of sub-buffers * @n_subbufs: number of sub-buffers * @cb: client callback functions * - * Returns channel pointer if successful, NULL otherwise. + * Returns channel pointer if successful, %NULL otherwise. * * Creates a channel buffer for each cpu using the sizes and * attributes specified. The created channel buffer files @@ -585,7 +586,7 @@ EXPORT_SYMBOL_GPL(relay_switch_subbuf); * subbufs_consumed should be the number of sub-buffers newly consumed, * not the total consumed. * - * NOTE: kernel clients don't need to call this function if the channel + * NOTE: Kernel clients don't need to call this function if the channel * mode is 'overwrite'. */ void relay_subbufs_consumed(struct rchan *chan, @@ -641,7 +642,7 @@ EXPORT_SYMBOL_GPL(relay_close); * relay_flush - close the channel * @chan: the channel * - * Flushes all channel buffers i.e. forces buffer switch. + * Flushes all channel buffers, i.e. forces buffer switch. */ void relay_flush(struct rchan *chan) { @@ -729,7 +730,7 @@ static int relay_file_release(struct inode *inode, struct file *filp) return 0; } -/** +/* * relay_file_read_consume - update the consumed count for the buffer */ static void relay_file_read_consume(struct rchan_buf *buf, @@ -756,7 +757,7 @@ static void relay_file_read_consume(struct rchan_buf *buf, } } -/** +/* * relay_file_read_avail - boolean, are there unconsumed bytes available? */ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) @@ -793,6 +794,8 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) /** * relay_file_read_subbuf_avail - return bytes available in sub-buffer + * @read_pos: file read position + * @buf: relay channel buffer */ static size_t relay_file_read_subbuf_avail(size_t read_pos, struct rchan_buf *buf) @@ -818,6 +821,8 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos, /** * relay_file_read_start_pos - find the first available byte to read + * @read_pos: file read position + * @buf: relay channel buffer * * If the read_pos is in the middle of padding, return the * position of the first actually available byte, otherwise @@ -844,6 +849,9 @@ static size_t relay_file_read_start_pos(size_t read_pos, /** * relay_file_read_end_pos - return the new read position + * @read_pos: file read position + * @buf: relay channel buffer + * @count: number of bytes to be read */ static size_t relay_file_read_end_pos(struct rchan_buf *buf, size_t read_pos, @@ -865,7 +873,7 @@ static size_t relay_file_read_end_pos(struct rchan_buf *buf, return end_pos; } -/** +/* * subbuf_read_actor - read up to one subbuf's worth of data */ static int subbuf_read_actor(size_t read_start, @@ -890,7 +898,7 @@ static int subbuf_read_actor(size_t read_start, return ret; } -/** +/* * subbuf_send_actor - send up to one subbuf's worth of data */ static int subbuf_send_actor(size_t read_start, @@ -933,7 +941,7 @@ typedef int (*subbuf_actor_t) (size_t read_start, read_descriptor_t *desc, read_actor_t actor); -/** +/* * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries */ static inline ssize_t relay_file_read_subbufs(struct file *filp, -- cgit v1.2.3 From 2ff6fd8f4a40b72ff35dbb1e08eb9ed6b64b6028 Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Fri, 29 Sep 2006 01:59:21 -0700 Subject: [PATCH] irq: fixed coding style Signed-off-by: Yoichi Yuasa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index ac1f850d4937..3c3fec55f6d1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -146,7 +146,7 @@ static void default_disable(unsigned int irq) struct irq_desc *desc = irq_desc + irq; if (!(desc->status & IRQ_DELAYED_DISABLE)) - irq_desc[irq].chip->mask(irq); + desc->chip->mask(irq); } /* -- cgit v1.2.3 From 538d9d532b0e0320c9dd326a560b5a72d73f910d Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Fri, 29 Sep 2006 01:59:22 -0700 Subject: [PATCH] irq: remove a extra line Signed-off-by: Yoichi Yuasa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/irq/chip.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 3c3fec55f6d1..736cb0bd498f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -40,10 +40,6 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) spin_lock_irqsave(&desc->lock, flags); irq_chip_set_defaults(chip); desc->chip = chip; - /* - * For compatibility only: - */ - desc->chip = chip; spin_unlock_irqrestore(&desc->lock, flags); return 0; -- cgit v1.2.3 From a49a4af759c0193d42aeaeefb4df7de8973dd713 Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Fri, 29 Sep 2006 01:59:30 -0700 Subject: [PATCH] rcu: add lock annotations to rcu{,_bh}_torture_read_{lock,unlock} rcu_torture_read_lock and rcu_bh_torture_read_lock acquire locks without releasing them, and the matching functions rcu_torture_read_unlock and rcu_bh_torture_read_unlock get called with the corresponding locks held and release them. Add lock annotations to these four functions so that sparse can check callers for lock pairing, and so that sparse will not complain about these functions since they intentionally use locks in this manner. Signed-off-by: Josh Triplett Acked-by: Paul McKenney Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/rcutorture.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 4d1c3d247127..4f2c4272d59c 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -192,13 +192,13 @@ static struct rcu_torture_ops *cur_ops = NULL; * Definitions for rcu torture testing. */ -static int rcu_torture_read_lock(void) +static int rcu_torture_read_lock(void) __acquires(RCU) { rcu_read_lock(); return 0; } -static void rcu_torture_read_unlock(int idx) +static void rcu_torture_read_unlock(int idx) __releases(RCU) { rcu_read_unlock(); } @@ -250,13 +250,13 @@ static struct rcu_torture_ops rcu_ops = { * Definitions for rcu_bh torture testing. */ -static int rcu_bh_torture_read_lock(void) +static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) { rcu_read_lock_bh(); return 0; } -static void rcu_bh_torture_read_unlock(int idx) +static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) { rcu_read_unlock_bh(); } -- cgit v1.2.3 From d10be6d1bdb0c901b78244872de3cc1c1b6c3fb2 Mon Sep 17 00:00:00 2001 From: Mark Huang Date: Fri, 29 Sep 2006 01:59:34 -0700 Subject: [PATCH] module_subsys: initialize earlier Initialize module_subsys earlier (or at least earlier than devices) since it could be used very early in the boot process if kmod loads a module before the device initcalls. Otherwise, kmod will crash in kernel/module.c:mod_sysfs_setup() since the kset in module_subsys is not initialized yet. I only noticed this problem because occasionally, kmod loads the modules for my SCSI and Ethernet adapters very early, during the boot process itself. I don't quite understand why it loads them sometimes and doesn't load them other times. Or who is telling kmod to do so. Can someone explain? Signed-off-by: Mark Huang Cc: Greg KH Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/params.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/params.c b/kernel/params.c index 63d432d0ebc0..f406655d6653 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -699,7 +699,7 @@ static int __init param_sysfs_init(void) return 0; } -__initcall(param_sysfs_init); +subsys_initcall(param_sysfs_init); EXPORT_SYMBOL(param_set_byte); EXPORT_SYMBOL(param_get_byte); -- cgit v1.2.3 From 89e7e374dde1015d69d2d70797ae4053b14fa9db Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Fri, 29 Sep 2006 01:59:36 -0700 Subject: [PATCH] timer: add lock annotation to lock_timer_base lock_timer_base acquires a lock and returns with that lock held. Add a lock annotation to this function so that sparse can check callers for lock pairing, and so that sparse will not complain about this function since it intentionally uses the lock in this manner. Signed-off-by: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/timer.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index 1d7dd6267c2d..6c9fa80088ed 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -175,6 +175,7 @@ static inline void detach_timer(struct timer_list *timer, */ static tvec_base_t *lock_timer_base(struct timer_list *timer, unsigned long *flags) + __acquires(timer->base->lock) { tvec_base_t *base; -- cgit v1.2.3 From 6c5c934153513dc72e2d6464f39e8ef1f27c0a3e Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 29 Sep 2006 01:59:40 -0700 Subject: [PATCH] ifdef blktrace debugging fields Signed-off-by: Alexey Dobriyan Acked-by: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 802b1cf0e63f..bca6ce6d3ded 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -183,7 +183,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) /* One for us, one for whoever does the "release_task()" (usually parent) */ atomic_set(&tsk->usage,2); atomic_set(&tsk->fs_excl, 0); +#ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; +#endif tsk->splice_pipe = NULL; return tsk; } -- cgit v1.2.3 From db630637b2f192bea2ba1c000e9cbe4e542a03ea Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 29 Sep 2006 01:59:44 -0700 Subject: [PATCH] clean up and remove some extra spinlocks from rtmutex Oleg brought up some interesting points about grabbing the pi_lock for some protections. In this discussion, I realized that there are some places that the pi_lock is being grabbed when it really wasn't necessary. Also this patch does a little bit of clean up. This patch basically does three things: 1) renames the "boost" variable to "chain_walk". Since it is used in the debugging case when it isn't going to be boosted. It better describes what the test is going to do if it succeeds. 2) moves get_task_struct to just before the unlocking of the wait_lock. This removes duplicate code, and makes it a little easier to read. The owner wont go away while either the pi_lock or the wait_lock are held. 3) removes the pi_locking and owner blocked checking completely from the debugging case. This is because the grabbing the lock and doing the check, then releasing the lock is just so full of races. It's just as good to go ahead and call the pi_chain_walk function, since after releasing the lock the owner can then block anyway, and we would have missed that. For the debug case, we really do want to do the chain walk to test for deadlocks anyway. [oleg@tv-sign.ru: more of the same] Signed-of-by: Steven Rostedt Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Oleg Nesterov Cc: Esben Nielsen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/rtmutex.c | 51 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 3e13a1e5856f..4ab17da46fd8 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -251,6 +251,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* Grab the next task */ task = rt_mutex_owner(lock); + get_task_struct(task); spin_lock_irqsave(&task->pi_lock, flags); if (waiter == rt_mutex_top_waiter(lock)) { @@ -269,7 +270,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, __rt_mutex_adjust_prio(task); } - get_task_struct(task); spin_unlock_irqrestore(&task->pi_lock, flags); top_waiter = rt_mutex_top_waiter(lock); @@ -409,7 +409,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct task_struct *owner = rt_mutex_owner(lock); struct rt_mutex_waiter *top_waiter = waiter; unsigned long flags; - int boost = 0, res; + int chain_walk = 0, res; spin_lock_irqsave(¤t->pi_lock, flags); __rt_mutex_adjust_prio(current); @@ -433,25 +433,23 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, plist_add(&waiter->pi_list_entry, &owner->pi_waiters); __rt_mutex_adjust_prio(owner); - if (owner->pi_blocked_on) { - boost = 1; - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(owner); - } - spin_unlock_irqrestore(&owner->pi_lock, flags); - } - else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { - spin_lock_irqsave(&owner->pi_lock, flags); - if (owner->pi_blocked_on) { - boost = 1; - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(owner); - } + if (owner->pi_blocked_on) + chain_walk = 1; spin_unlock_irqrestore(&owner->pi_lock, flags); } - if (!boost) + else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) + chain_walk = 1; + + if (!chain_walk) return 0; + /* + * The owner can't disappear while holding a lock, + * so the owner struct is protected by wait_lock. + * Gets dropped in rt_mutex_adjust_prio_chain()! + */ + get_task_struct(owner); + spin_unlock(&lock->wait_lock); res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, @@ -532,7 +530,7 @@ static void remove_waiter(struct rt_mutex *lock, int first = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); unsigned long flags; - int boost = 0; + int chain_walk = 0; spin_lock_irqsave(¤t->pi_lock, flags); plist_del(&waiter->list_entry, &lock->wait_list); @@ -554,19 +552,20 @@ static void remove_waiter(struct rt_mutex *lock, } __rt_mutex_adjust_prio(owner); - if (owner->pi_blocked_on) { - boost = 1; - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(owner); - } + if (owner->pi_blocked_on) + chain_walk = 1; + spin_unlock_irqrestore(&owner->pi_lock, flags); } WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); - if (!boost) + if (!chain_walk) return; + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(owner); + spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); @@ -592,10 +591,10 @@ void rt_mutex_adjust_pi(struct task_struct *task) return; } - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(task); spin_unlock_irqrestore(&task->pi_lock, flags); + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(task); rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); } -- cgit v1.2.3 From 2aae4a108dab8b8bc92270335f6e4b5c146b32ae Mon Sep 17 00:00:00 2001 From: Rolf Eike Beer Date: Fri, 29 Sep 2006 01:59:46 -0700 Subject: [PATCH] Fix kerneldoc comments in kernel/timer.c Some of the kerneldoc comments in this file are ignored since the lead-in is malformed, using either "/*" or "/***" instead of "/**". [rdunlap@xenotime.net: kerneldoc fixes] Signed-off-by: Rolf Eike Beer Acked-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/timer.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index 6c9fa80088ed..d644f4e9ca0c 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -136,7 +136,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) list_add_tail(&timer->entry, vec); } -/*** +/** * init_timer - initialize a timer. * @timer: the timer to be initialized * @@ -236,7 +236,7 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) EXPORT_SYMBOL(__mod_timer); -/*** +/** * add_timer_on - start a timer on a particular CPU * @timer: the timer to be added * @cpu: the CPU to start it on @@ -256,9 +256,10 @@ void add_timer_on(struct timer_list *timer, int cpu) } -/*** +/** * mod_timer - modify a timer's timeout * @timer: the timer to be modified + * @expires: new timeout in jiffies * * mod_timer is a more efficient way to update the expire field of an * active timer (if the timer is inactive it will be activated) @@ -292,7 +293,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires) EXPORT_SYMBOL(mod_timer); -/*** +/** * del_timer - deactive a timer. * @timer: the timer to be deactivated * @@ -324,7 +325,10 @@ int del_timer(struct timer_list *timer) EXPORT_SYMBOL(del_timer); #ifdef CONFIG_SMP -/* +/** + * try_to_del_timer_sync - Try to deactivate a timer + * @timer: timer do del + * * This function tries to deactivate a timer. Upon successful (ret >= 0) * exit the timer is not queued and the handler is not running on any CPU. * @@ -352,7 +356,7 @@ out: return ret; } -/*** +/** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated * @@ -402,15 +406,15 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index) return index; } -/*** +#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) + +/** * __run_timers - run all expired timers (if any) on this CPU. * @base: the timer vector to be processed. * * This function cascades all vectors and executes all expired timer * vectors. */ -#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) - static inline void __run_timers(tvec_base_t *base) { struct timer_list *timer; @@ -971,7 +975,7 @@ void __init timekeeping_init(void) static int timekeeping_suspended; -/* +/** * timekeeping_resume - Resumes the generic timekeeping subsystem. * @dev: unused * @@ -1107,7 +1111,7 @@ static void clocksource_adjust(struct clocksource *clock, s64 offset) clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift); } -/* +/** * update_wall_time - Uses the current clocksource to increment the wall time * * Called from the timer interrupt, must hold a write on xtime_lock. @@ -1471,8 +1475,9 @@ asmlinkage long sys_gettid(void) return current->pid; } -/* +/** * sys_sysinfo - fill in sysinfo struct + * @info: pointer to buffer to fill */ asmlinkage long sys_sysinfo(struct sysinfo __user *info) { -- cgit v1.2.3 From e6cab99bb478e067b1a7a120333ff326954a2412 Mon Sep 17 00:00:00 2001 From: Chuck Ebbert <76306.1226@compuserve.com> Date: Fri, 29 Sep 2006 01:59:57 -0700 Subject: [PATCH] unwind: fix unused variable warning when !CONFIG_MODULES Fix "variable defined but not used" compiler warning in unwind.c when CONFIG_MODULES is not set. Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com> Cc: Jan Beulich Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/unwind.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/unwind.c b/kernel/unwind.c index 3430475fcd88..2e2368607aab 100644 --- a/kernel/unwind.c +++ b/kernel/unwind.c @@ -102,7 +102,7 @@ static struct unwind_table { unsigned long size; struct unwind_table *link; const char *name; -} root_table, *last_table; +} root_table; struct unwind_item { enum item_location { @@ -174,6 +174,8 @@ void __init unwind_init(void) #ifdef CONFIG_MODULES +static struct unwind_table *last_table; + /* Must be called with module_mutex held. */ void *unwind_add_table(struct module *module, const void *table_start, -- cgit v1.2.3 From 3b9b8ab65d8eed784b9164d03807cb2bda7b5cd6 Mon Sep 17 00:00:00 2001 From: Kirill Korotaev Date: Fri, 29 Sep 2006 02:00:05 -0700 Subject: [PATCH] Fix unserialized task->files changing Fixed race on put_files_struct on exec with proc. Restoring files on current on error path may lead to proc having a pointer to already kfree-d files_struct. ->files changing at exit.c and khtread.c are safe as exit_files() makes all things under lock. Found during OpenVZ stress testing. [akpm@osdl.org: add export] Signed-off-by: Pavel Emelianov Signed-off-by: Kirill Korotaev Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index d891883420f7..4b6fb054b25d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -487,6 +487,18 @@ void fastcall put_files_struct(struct files_struct *files) EXPORT_SYMBOL(put_files_struct); +void reset_files_struct(struct task_struct *tsk, struct files_struct *files) +{ + struct files_struct *old; + + old = tsk->files; + task_lock(tsk); + tsk->files = files; + task_unlock(tsk); + put_files_struct(old); +} +EXPORT_SYMBOL(reset_files_struct); + static inline void __exit_files(struct task_struct *tsk) { struct files_struct * files = tsk->files; -- cgit v1.2.3 From f400e198b2ed26ce55b22a1412ded0896e7516ac Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 29 Sep 2006 02:00:07 -0700 Subject: [PATCH] pidspace: is_init() This is an updated version of Eric Biederman's is_init() patch. (http://lkml.org/lkml/2006/2/6/280). It applies cleanly to 2.6.18-rc3 and replaces a few more instances of ->pid == 1 with is_init(). Further, is_init() checks pid and thus removes dependency on Eric's other patches for now. Eric's original description: There are a lot of places in the kernel where we test for init because we give it special properties. Most significantly init must not die. This results in code all over the kernel test ->pid == 1. Introduce is_init to capture this case. With multiple pid spaces for all of the cases affected we are looking for only the first process on the system, not some other process that has pid == 1. Signed-off-by: Eric W. Biederman Signed-off-by: Sukadev Bhattiprolu Cc: Dave Hansen Cc: Serge Hallyn Cc: Cedric Le Goater Cc: Acked-by: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/capability.c | 2 +- kernel/cpuset.c | 2 +- kernel/exit.c | 2 +- kernel/kexec.c | 2 +- kernel/ptrace.c | 1 + kernel/sysctl.c | 2 +- 6 files changed, 6 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/capability.c b/kernel/capability.c index c7685ad00a97..edb845a6e84a 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -133,7 +133,7 @@ static inline int cap_set_all(kernel_cap_t *effective, int found = 0; do_each_thread(g, target) { - if (target == current || target->pid == 1) + if (target == current || is_init(target)) continue; found = 1; if (security_capset_check(target, effective, inheritable, diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1b32c2c04c15..584bb4e6c042 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -240,7 +240,7 @@ static struct super_block *cpuset_sb; * A cpuset can only be deleted if both its 'count' of using tasks * is zero, and its list of 'children' cpusets is empty. Since all * tasks in the system use _some_ cpuset, and since there is always at - * least one task in the system (init, pid == 1), therefore, top_cpuset + * least one task in the system (init), therefore, top_cpuset * always has either children cpusets and/or using tasks. So we don't * need a special hack to ensure that top_cpuset cannot be deleted. * diff --git a/kernel/exit.c b/kernel/exit.c index 4b6fb054b25d..9961192d6055 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -219,7 +219,7 @@ static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) do_each_task_pid(pgrp, PIDTYPE_PGID, p) { if (p == ignored_task || p->exit_state - || p->real_parent->pid == 1) + || is_init(p->real_parent)) continue; if (process_group(p->real_parent) != pgrp && p->real_parent->signal->session == p->signal->session) { diff --git a/kernel/kexec.c b/kernel/kexec.c index 50087ecf337e..37cad75cf494 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -40,7 +40,7 @@ struct resource crashk_res = { int kexec_should_crash(struct task_struct *p) { - if (in_interrupt() || !p->pid || p->pid == 1 || panic_on_oops) + if (in_interrupt() || !p->pid || is_init(p) || panic_on_oops) return 1; return 0; } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 8aad0331d82e..4d50e06fd745 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -440,6 +440,7 @@ struct task_struct *ptrace_get_task_struct(pid_t pid) child = find_task_by_pid(pid); if (child) get_task_struct(child); + read_unlock(&tasklist_lock); if (!child) return ERR_PTR(-ESRCH); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8bfa7d117c54..9535a3839930 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1915,7 +1915,7 @@ int proc_dointvec_bset(ctl_table *table, int write, struct file *filp, return -EPERM; } - op = (current->pid == 1) ? OP_SET : OP_AND; + op = is_init(current) ? OP_SET : OP_AND; return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, do_proc_dointvec_bset_conv,&op); } -- cgit v1.2.3 From 99de055ac065e19ed69de961e97c6336a261b34e Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Fri, 29 Sep 2006 02:00:10 -0700 Subject: [PATCH] lockdep: print kernel version Lets do the same thing we do for oopses - print out the version in the report. It's an extra line of output though. We could tack it on the end of the INFO: lines, but that screws up Ingo's pretty output. Signed-off-by: Dave Jones Cc: Ingo Molnar Cc: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/lockdep.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index c088e5542e84..df1c3594de31 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -36,6 +36,7 @@ #include #include #include +#include #include @@ -515,6 +516,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth) return 0; } +static void print_kernel_version(void) +{ + printk("%s %.*s\n", system_utsname.release, + (int)strcspn(system_utsname.version, " "), + system_utsname.version); +} + /* * When a circular dependency is detected, print the * header first: @@ -531,6 +539,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth) printk("\n=======================================================\n"); printk( "[ INFO: possible circular locking dependency detected ]\n"); + print_kernel_version(); printk( "-------------------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", curr->comm, curr->pid); @@ -712,6 +721,7 @@ print_bad_irq_dependency(struct task_struct *curr, printk("\n======================================================\n"); printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", irqclass, irqclass); + print_kernel_version(); printk( "------------------------------------------------------\n"); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", curr->comm, curr->pid, @@ -793,6 +803,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, printk("\n=============================================\n"); printk( "[ INFO: possible recursive locking detected ]\n"); + print_kernel_version(); printk( "---------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", curr->comm, curr->pid); @@ -1375,6 +1386,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, printk("\n=========================================================\n"); printk( "[ INFO: possible irq lock inversion dependency detected ]\n"); + print_kernel_version(); printk( "---------------------------------------------------------\n"); printk("%s/%d just changed the state of lock:\n", curr->comm, curr->pid); @@ -1469,6 +1481,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, printk("\n=================================\n"); printk( "[ INFO: inconsistent lock state ]\n"); + print_kernel_version(); printk( "---------------------------------\n"); printk("inconsistent {%s} -> {%s} usage.\n", -- cgit v1.2.3 From a45bce49545739a940f8bd4ca85c3b7435564893 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 29 Sep 2006 02:00:11 -0700 Subject: [PATCH] memory ordering in __kfifo primitives Both __kfifo_put() and __kfifo_get() have header comments stating that if there is but one concurrent reader and one concurrent writer, locking is not necessary. This is almost the case, but a couple of memory barriers are needed. Another option would be to change the header comments to remove the bit about locking not being needed, and to change the those callers who currently don't use locking to add the required locking. The attachment analyzes this approach, but the patch below seems simpler. Signed-off-by: Paul E. McKenney Cc: Stelian Pop Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kfifo.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'kernel') diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 64ab045c3d9d..5d1d907378a2 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c @@ -122,6 +122,13 @@ unsigned int __kfifo_put(struct kfifo *fifo, len = min(len, fifo->size - fifo->in + fifo->out); + /* + * Ensure that we sample the fifo->out index -before- we + * start putting bytes into the kfifo. + */ + + smp_mb(); + /* first put the data starting from fifo->in to buffer end */ l = min(len, fifo->size - (fifo->in & (fifo->size - 1))); memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l); @@ -129,6 +136,13 @@ unsigned int __kfifo_put(struct kfifo *fifo, /* then put the rest (if any) at the beginning of the buffer */ memcpy(fifo->buffer, buffer + l, len - l); + /* + * Ensure that we add the bytes to the kfifo -before- + * we update the fifo->in index. + */ + + smp_wmb(); + fifo->in += len; return len; @@ -154,6 +168,13 @@ unsigned int __kfifo_get(struct kfifo *fifo, len = min(len, fifo->in - fifo->out); + /* + * Ensure that we sample the fifo->in index -before- we + * start removing bytes from the kfifo. + */ + + smp_rmb(); + /* first get the data from fifo->out until the end of the buffer */ l = min(len, fifo->size - (fifo->out & (fifo->size - 1))); memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l); @@ -161,6 +182,13 @@ unsigned int __kfifo_get(struct kfifo *fifo, /* then get the rest (if any) from the beginning of the buffer */ memcpy(buffer + l, fifo->buffer, len - l); + /* + * Ensure that we remove the bytes from the kfifo -before- + * we update the fifo->out index. + */ + + smp_mb(); + fifo->out += len; return len; -- cgit v1.2.3 From 07dccf3344010f9b9df7fe725da7e73bca2992df Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 29 Sep 2006 02:00:22 -0700 Subject: [PATCH] check return value of cpu_callback Spawing ksoftirqd, migration, or watchdog, and calling init_timers_cpu() may fail with small memory. If it happens in initcalls, kernel NULL pointer dereference happens later. This patch makes crash happen immediately in such cases. It seems a bit better than getting kernel NULL pointer dereference later. Cc: Ingo Molnar Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 4 +++- kernel/softirq.c | 4 +++- kernel/softlockup.c | 3 ++- kernel/timer.c | 4 +++- 4 files changed, 11 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 5c848fd4e461..b946209d9c15 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5272,9 +5272,11 @@ static struct notifier_block __cpuinitdata migration_notifier = { int __init migration_init(void) { void *cpu = (void *)(long)smp_processor_id(); + int err; /* Start one for the boot CPU: */ - migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); + err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); + BUG_ON(err == NOTIFY_BAD); migration_call(&migration_notifier, CPU_ONLINE, cpu); register_cpu_notifier(&migration_notifier); diff --git a/kernel/softirq.c b/kernel/softirq.c index 3789ca98197c..bf25015dce16 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -612,7 +612,9 @@ static struct notifier_block __cpuinitdata cpu_nfb = { __init int spawn_ksoftirqd(void) { void *cpu = (void *)(long)smp_processor_id(); - cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); + int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); + + BUG_ON(err == NOTIFY_BAD); cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); return 0; diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 03e6a2b0b787..50afeb813305 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -149,8 +149,9 @@ static struct notifier_block __cpuinitdata cpu_nfb = { __init void spawn_softlockup_task(void) { void *cpu = (void *)(long)smp_processor_id(); + int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); - cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); + BUG_ON(err == NOTIFY_BAD); cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); diff --git a/kernel/timer.c b/kernel/timer.c index d644f4e9ca0c..a2cb1ecb1b28 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1694,8 +1694,10 @@ static struct notifier_block __cpuinitdata timers_nb = { void __init init_timers(void) { - timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, + int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, (void *)(long)smp_processor_id()); + + BUG_ON(err == NOTIFY_BAD); register_cpu_notifier(&timers_nb); open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); } -- cgit v1.2.3 From 1711ef3866b0360e102327389fe4b76c849bbe83 Mon Sep 17 00:00:00 2001 From: Toyo Abe Date: Fri, 29 Sep 2006 02:00:28 -0700 Subject: [PATCH] posix-timers: Fix clock_nanosleep() doesn't return the remaining time in compatibility mode The clock_nanosleep() function does not return the time remaining when the sleep is interrupted by a signal. This patch creates a new call out, compat_clock_nanosleep_restart(), which handles returning the remaining time after a sleep is interrupted. This patch revives clock_nanosleep_restart(). It is now accessed via the new call out. The compat_clock_nanosleep_restart() is used for compatibility access. Since this is implemented in compatibility mode the normal path is virtually unaffected - no real performance impact. Signed-off-by: Toyo Abe Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/compat.c | 33 +++++++++++++++++++++++++++++++++ kernel/hrtimer.c | 20 ++++++++++---------- kernel/posix-cpu-timers.c | 17 ++++++++++++----- kernel/posix-timers.c | 21 +++++++++++++++++++++ 4 files changed, 76 insertions(+), 15 deletions(-) (limited to 'kernel') diff --git a/kernel/compat.c b/kernel/compat.c index 126dee9530aa..75573e5d27b0 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -22,6 +22,7 @@ #include #include #include +#include #include @@ -601,6 +602,30 @@ long compat_sys_clock_getres(clockid_t which_clock, return err; } +static long compat_clock_nanosleep_restart(struct restart_block *restart) +{ + long err; + mm_segment_t oldfs; + struct timespec tu; + struct compat_timespec *rmtp = (struct compat_timespec *)(restart->arg1); + + restart->arg1 = (unsigned long) &tu; + oldfs = get_fs(); + set_fs(KERNEL_DS); + err = clock_nanosleep_restart(restart); + set_fs(oldfs); + + if ((err == -ERESTART_RESTARTBLOCK) && rmtp && + put_compat_timespec(&tu, rmtp)) + return -EFAULT; + + if (err == -ERESTART_RESTARTBLOCK) { + restart->fn = compat_clock_nanosleep_restart; + restart->arg1 = (unsigned long) rmtp; + } + return err; +} + long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, struct compat_timespec __user *rqtp, struct compat_timespec __user *rmtp) @@ -608,6 +633,7 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, long err; mm_segment_t oldfs; struct timespec in, out; + struct restart_block *restart; if (get_compat_timespec(&in, rqtp)) return -EFAULT; @@ -618,9 +644,16 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, (struct timespec __user *) &in, (struct timespec __user *) &out); set_fs(oldfs); + if ((err == -ERESTART_RESTARTBLOCK) && rmtp && put_compat_timespec(&out, rmtp)) return -EFAULT; + + if (err == -ERESTART_RESTARTBLOCK) { + restart = ¤t_thread_info()->restart_block; + restart->fn = compat_clock_nanosleep_restart; + restart->arg1 = (unsigned long) rmtp; + } return err; } diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 21c38a7e666b..d0ba190dfeb6 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -693,7 +693,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod return t->task == NULL; } -static long __sched nanosleep_restart(struct restart_block *restart) +long __sched hrtimer_nanosleep_restart(struct restart_block *restart) { struct hrtimer_sleeper t; struct timespec __user *rmtp; @@ -702,13 +702,13 @@ static long __sched nanosleep_restart(struct restart_block *restart) restart->fn = do_no_restart_syscall; - hrtimer_init(&t.timer, restart->arg3, HRTIMER_ABS); - t.timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; + hrtimer_init(&t.timer, restart->arg0, HRTIMER_ABS); + t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; if (do_nanosleep(&t, HRTIMER_ABS)) return 0; - rmtp = (struct timespec __user *) restart->arg2; + rmtp = (struct timespec __user *) restart->arg1; if (rmtp) { time = ktime_sub(t.timer.expires, t.timer.base->get_time()); if (time.tv64 <= 0) @@ -718,7 +718,7 @@ static long __sched nanosleep_restart(struct restart_block *restart) return -EFAULT; } - restart->fn = nanosleep_restart; + restart->fn = hrtimer_nanosleep_restart; /* The other values in restart are already filled in */ return -ERESTART_RESTARTBLOCK; @@ -751,11 +751,11 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, } restart = ¤t_thread_info()->restart_block; - restart->fn = nanosleep_restart; - restart->arg0 = t.timer.expires.tv64 & 0xFFFFFFFF; - restart->arg1 = t.timer.expires.tv64 >> 32; - restart->arg2 = (unsigned long) rmtp; - restart->arg3 = (unsigned long) t.timer.base->index; + restart->fn = hrtimer_nanosleep_restart; + restart->arg0 = (unsigned long) t.timer.base->index; + restart->arg1 = (unsigned long) rmtp; + restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF; + restart->arg3 = t.timer.expires.tv64 >> 32; return -ERESTART_RESTARTBLOCK; } diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index d38d9ec3276c..5667fef89dd1 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1393,8 +1393,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, } } -static long posix_cpu_clock_nanosleep_restart(struct restart_block *); - int posix_cpu_nsleep(const clockid_t which_clock, int flags, struct timespec *rqtp, struct timespec __user *rmtp) { @@ -1471,7 +1469,7 @@ int posix_cpu_nsleep(const clockid_t which_clock, int flags, copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) return -EFAULT; - restart_block->fn = posix_cpu_clock_nanosleep_restart; + restart_block->fn = posix_cpu_nsleep_restart; /* Caller already set restart_block->arg1 */ restart_block->arg0 = which_clock; restart_block->arg1 = (unsigned long) rmtp; @@ -1484,8 +1482,7 @@ int posix_cpu_nsleep(const clockid_t which_clock, int flags, return error; } -static long -posix_cpu_clock_nanosleep_restart(struct restart_block *restart_block) +long posix_cpu_nsleep_restart(struct restart_block *restart_block) { clockid_t which_clock = restart_block->arg0; struct timespec __user *rmtp; @@ -1524,6 +1521,10 @@ static int process_cpu_nsleep(const clockid_t which_clock, int flags, { return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); } +static long process_cpu_nsleep_restart(struct restart_block *restart_block) +{ + return -EINVAL; +} static int thread_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) { @@ -1544,6 +1545,10 @@ static int thread_cpu_nsleep(const clockid_t which_clock, int flags, { return -EINVAL; } +static long thread_cpu_nsleep_restart(struct restart_block *restart_block) +{ + return -EINVAL; +} static __init int init_posix_cpu_timers(void) { @@ -1553,6 +1558,7 @@ static __init int init_posix_cpu_timers(void) .clock_set = do_posix_clock_nosettime, .timer_create = process_cpu_timer_create, .nsleep = process_cpu_nsleep, + .nsleep_restart = process_cpu_nsleep_restart, }; struct k_clock thread = { .clock_getres = thread_cpu_clock_getres, @@ -1560,6 +1566,7 @@ static __init int init_posix_cpu_timers(void) .clock_set = do_posix_clock_nosettime, .timer_create = thread_cpu_timer_create, .nsleep = thread_cpu_nsleep, + .nsleep_restart = thread_cpu_nsleep_restart, }; register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index ac6dc8744429..e5ebcc1ec3a0 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -973,3 +973,24 @@ sys_clock_nanosleep(const clockid_t which_clock, int flags, return CLOCK_DISPATCH(which_clock, nsleep, (which_clock, flags, &t, rmtp)); } + +/* + * nanosleep_restart for monotonic and realtime clocks + */ +static int common_nsleep_restart(struct restart_block *restart_block) +{ + return hrtimer_nanosleep_restart(restart_block); +} + +/* + * This will restart clock_nanosleep. This is required only by + * compat_clock_nanosleep_restart for now. + */ +long +clock_nanosleep_restart(struct restart_block *restart_block) +{ + clockid_t which_clock = restart_block->arg0; + + return CLOCK_DISPATCH(which_clock, nsleep_restart, + (restart_block)); +} -- cgit v1.2.3 From e4b765551aa6355eae60b644bed851a9477c4e2b Mon Sep 17 00:00:00 2001 From: Toyo Abe Date: Fri, 29 Sep 2006 02:00:29 -0700 Subject: [PATCH] posix-timers: Fix the flags handling in posix_cpu_nsleep() When a posix_cpu_nsleep() sleep is interrupted by a signal more than twice, it incorrectly reports the sleep time remaining to the user. Because posix_cpu_nsleep() doesn't report back to the user when it's called from restart function due to the wrong flags handling. This patch, which applies after previous one, moves the nanosleep() function from posix_cpu_nsleep() to do_cpu_nanosleep() and cleans up the flags handling appropriately. Signed-off-by: Toyo Abe Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Roland McGrath Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/posix-cpu-timers.c | 84 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 58 insertions(+), 26 deletions(-) (limited to 'kernel') diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 5667fef89dd1..479b16b44f79 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1393,22 +1393,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, } } -int posix_cpu_nsleep(const clockid_t which_clock, int flags, - struct timespec *rqtp, struct timespec __user *rmtp) +static int do_cpu_nanosleep(const clockid_t which_clock, int flags, + struct timespec *rqtp, struct itimerspec *it) { - struct restart_block *restart_block = - ¤t_thread_info()->restart_block; struct k_itimer timer; int error; - /* - * Diagnose required errors first. - */ - if (CPUCLOCK_PERTHREAD(which_clock) && - (CPUCLOCK_PID(which_clock) == 0 || - CPUCLOCK_PID(which_clock) == current->pid)) - return -EINVAL; - /* * Set up a temporary timer and then wait for it to go off. */ @@ -1420,11 +1410,12 @@ int posix_cpu_nsleep(const clockid_t which_clock, int flags, timer.it_process = current; if (!error) { static struct itimerspec zero_it; - struct itimerspec it = { .it_value = *rqtp, - .it_interval = {} }; + + memset(it, 0, sizeof *it); + it->it_value = *rqtp; spin_lock_irq(&timer.it_lock); - error = posix_cpu_timer_set(&timer, flags, &it, NULL); + error = posix_cpu_timer_set(&timer, flags, it, NULL); if (error) { spin_unlock_irq(&timer.it_lock); return error; @@ -1452,33 +1443,56 @@ int posix_cpu_nsleep(const clockid_t which_clock, int flags, * We were interrupted by a signal. */ sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); - posix_cpu_timer_set(&timer, 0, &zero_it, &it); + posix_cpu_timer_set(&timer, 0, &zero_it, it); spin_unlock_irq(&timer.it_lock); - if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { + if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { /* * It actually did fire already. */ return 0; } + error = -ERESTART_RESTARTBLOCK; + } + + return error; +} + +int posix_cpu_nsleep(const clockid_t which_clock, int flags, + struct timespec *rqtp, struct timespec __user *rmtp) +{ + struct restart_block *restart_block = + ¤t_thread_info()->restart_block; + struct itimerspec it; + int error; + + /* + * Diagnose required errors first. + */ + if (CPUCLOCK_PERTHREAD(which_clock) && + (CPUCLOCK_PID(which_clock) == 0 || + CPUCLOCK_PID(which_clock) == current->pid)) + return -EINVAL; + + error = do_cpu_nanosleep(which_clock, flags, rqtp, &it); + + if (error == -ERESTART_RESTARTBLOCK) { + + if (flags & TIMER_ABSTIME) + return -ERESTARTNOHAND; /* - * Report back to the user the time still remaining. - */ - if (rmtp != NULL && !(flags & TIMER_ABSTIME) && - copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) + * Report back to the user the time still remaining. + */ + if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) return -EFAULT; restart_block->fn = posix_cpu_nsleep_restart; - /* Caller already set restart_block->arg1 */ restart_block->arg0 = which_clock; restart_block->arg1 = (unsigned long) rmtp; restart_block->arg2 = rqtp->tv_sec; restart_block->arg3 = rqtp->tv_nsec; - - error = -ERESTART_RESTARTBLOCK; } - return error; } @@ -1487,13 +1501,31 @@ long posix_cpu_nsleep_restart(struct restart_block *restart_block) clockid_t which_clock = restart_block->arg0; struct timespec __user *rmtp; struct timespec t; + struct itimerspec it; + int error; rmtp = (struct timespec __user *) restart_block->arg1; t.tv_sec = restart_block->arg2; t.tv_nsec = restart_block->arg3; restart_block->fn = do_no_restart_syscall; - return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t, rmtp); + error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); + + if (error == -ERESTART_RESTARTBLOCK) { + /* + * Report back to the user the time still remaining. + */ + if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) + return -EFAULT; + + restart_block->fn = posix_cpu_nsleep_restart; + restart_block->arg0 = which_clock; + restart_block->arg1 = (unsigned long) rmtp; + restart_block->arg2 = t.tv_sec; + restart_block->arg3 = t.tv_nsec; + } + return error; + } -- cgit v1.2.3 From b9ecb2bd5d3ab8904752685696cb76aac1f3fef2 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Fri, 29 Sep 2006 02:00:31 -0700 Subject: [PATCH] has_stopped_jobs() cleanup This check has been obsolete since the introduction of TASK_TRACED. Now TASK_STOPPED always means job control stop. Signed-off-by: Roland McGrath Cc: Oleg Nesterov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 9961192d6055..3ec7b10eae38 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -249,17 +249,6 @@ static int has_stopped_jobs(int pgrp) do_each_task_pid(pgrp, PIDTYPE_PGID, p) { if (p->state != TASK_STOPPED) continue; - - /* If p is stopped by a debugger on a signal that won't - stop it, then don't count p as stopped. This isn't - perfect but it's a good approximation. */ - if (unlikely (p->ptrace) - && p->exit_code != SIGSTOP - && p->exit_code != SIGTSTP - && p->exit_code != SIGTTOU - && p->exit_code != SIGTTIN) - continue; - retval = 1; break; } while_each_task_pid(pgrp, PIDTYPE_PGID, p); -- cgit v1.2.3 From 27d91e07f9b863fa94491ffafe250580f0c2ce78 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Fri, 29 Sep 2006 02:00:31 -0700 Subject: [PATCH] __dequeue_signal() cleanup This tightens up __dequeue_signal a little. It also avoids doing recalc_sigpending twice in a row, instead doing it once in dequeue_signal. Signed-off-by: Roland McGrath Cc: Oleg Nesterov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/signal.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/signal.c b/kernel/signal.c index 05853a7337e3..fb5da6d19f14 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -417,9 +417,8 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, siginfo_t *info) { - int sig = 0; + int sig = next_signal(pending, mask); - sig = next_signal(pending, mask); if (sig) { if (current->notifier) { if (sigismember(current->notifier_mask, sig)) { @@ -432,9 +431,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, if (!collect_signal(sig, pending, info)) sig = 0; - } - recalc_sigpending(); return sig; } @@ -451,6 +448,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) if (!signr) signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info); + recalc_sigpending_tsk(tsk); if (signr && unlikely(sig_kernel_stop(signr))) { /* * Set a marker that we have dequeued a stop signal. Our -- cgit v1.2.3 From 3171a0305d62e6627a24bff35af4f997e4988a80 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Fri, 29 Sep 2006 02:00:32 -0700 Subject: [PATCH] simplify update_times (avoid jiffies/jiffies_64 aliasing problem) Pass ticks to do_timer() and update_times(), and adjust x86_64 and s390 timer interrupt handler with this change. Currently update_times() calculates ticks by "jiffies - wall_jiffies", but callers of do_timer() should know how many ticks to update. Passing ticks get rid of this redundant calculation. Also there are another redundancy pointed out by Martin Schwidefsky. This cleanup make a barrier added by 5aee405c662ca644980c184774277fc6d0769a84 needless. So this patch removes it. As a bonus, this cleanup make wall_jiffies can be removed easily, since now wall_jiffies is always synced with jiffies. (This patch does not really remove wall_jiffies. It would be another cleanup patch) Signed-off-by: Atsushi Nemoto Cc: Martin Schwidefsky Cc: "Eric W. Biederman" Cc: Thomas Gleixner Cc: Ingo Molnar Cc: john stultz Cc: Andi Kleen Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Richard Henderson Cc: Ivan Kokshaysky Acked-by: Russell King Cc: Ian Molton Cc: Mikael Starvik Acked-by: David Howells Cc: Yoshinori Sato Cc: Hirokazu Takata Acked-by: Ralf Baechle Cc: Kyle McMartin Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Paul Mundt Cc: Kazumoto Kojima Cc: Richard Curnow Cc: William Lee Irwin III Cc: "David S. Miller" Cc: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Cc: Miles Bader Cc: Chris Zankel Acked-by: "Luck, Tony" Cc: Geert Uytterhoeven Cc: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/timer.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index a2cb1ecb1b28..4f55622b0d38 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1222,10 +1222,8 @@ static inline void calc_load(unsigned long ticks) unsigned long active_tasks; /* fixed-point */ static int count = LOAD_FREQ; - count -= ticks; - if (count < 0) { - count += LOAD_FREQ; - active_tasks = count_active_tasks(); + active_tasks = count_active_tasks(); + for (count -= ticks; count < 0; count += LOAD_FREQ) { CALC_LOAD(avenrun[0], EXP_1, active_tasks); CALC_LOAD(avenrun[1], EXP_5, active_tasks); CALC_LOAD(avenrun[2], EXP_15, active_tasks); @@ -1270,11 +1268,8 @@ void run_local_timers(void) * Called by the timer interrupt. xtime_lock must already be taken * by the timer IRQ! */ -static inline void update_times(void) +static inline void update_times(unsigned long ticks) { - unsigned long ticks; - - ticks = jiffies - wall_jiffies; wall_jiffies += ticks; update_wall_time(); calc_load(ticks); @@ -1286,12 +1281,10 @@ static inline void update_times(void) * jiffies is defined in the linker script... */ -void do_timer(struct pt_regs *regs) +void do_timer(unsigned long ticks) { - jiffies_64++; - /* prevent loading jiffies before storing new jiffies_64 value. */ - barrier(); - update_times(); + jiffies_64 += ticks; + update_times(ticks); } #ifdef __ARCH_WANT_SYS_ALARM -- cgit v1.2.3 From 0b4a8a789a328af6aac613734c362cf6aad72201 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Fri, 29 Sep 2006 02:00:39 -0700 Subject: [PATCH] kexec warning fix This fixes a couple of compiler warnings, and adds paranoia checks as well. Signed-off-by: Roland McGrath Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kexec.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/kexec.c b/kernel/kexec.c index 37cad75cf494..fcdd5d2bc3f4 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -995,7 +995,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, image = xchg(dest_image, image); out: - xchg(&kexec_lock, 0); /* Release the mutex */ + locked = xchg(&kexec_lock, 0); /* Release the mutex */ + BUG_ON(!locked); kimage_free(image); return result; @@ -1061,7 +1062,8 @@ void crash_kexec(struct pt_regs *regs) machine_crash_shutdown(&fixed_regs); machine_kexec(kexec_crash_image); } - xchg(&kexec_lock, 0); + locked = xchg(&kexec_lock, 0); + BUG_ON(!locked); } } -- cgit v1.2.3 From 54306cf04c0ea0a8c432603dbc657ab62a438668 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 29 Sep 2006 02:00:42 -0700 Subject: [PATCH] exit: fix crash case If we are going to BUG() not panic() here then we should cover the case of the BUG being compiled out Signed-off-by: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 3ec7b10eae38..a51b347ae67b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -963,7 +963,8 @@ fastcall NORET_TYPE void do_exit(long code) schedule(); BUG(); /* Avoid "noreturn function does return". */ - for (;;) ; + for (;;) + cpu_relax(); /* For when BUG is null */ } EXPORT_SYMBOL_GPL(do_exit); -- cgit v1.2.3 From 111dbe0c8a21dffa473239861be47ebc87f593b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Steinbrink?= Date: Fri, 29 Sep 2006 02:00:46 -0700 Subject: [PATCH] Fix ____call_usermodehelper errors being silently ignored MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If ____call_usermodehelper fails, we're not interested in the child process' exit value, but the real error, so let's stop wait_for_helper from overwriting it in that case. Issue discovered by Benedikt Böhm while working on a Linux-VServer usermode helper. Signed-off-by: Björn Steinbrink Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kmod.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kmod.c b/kernel/kmod.c index 5c470c57fb57..842f8015d7fd 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -176,6 +176,8 @@ static int wait_for_helper(void *data) if (pid < 0) { sub_info->retval = pid; } else { + int ret; + /* * Normally it is bogus to call wait4() from in-kernel because * wait4() wants to write the exit code to a userspace address. @@ -185,7 +187,15 @@ static int wait_for_helper(void *data) * * Thus the __user pointer cast is valid here. */ - sys_wait4(pid, (int __user *) &sub_info->retval, 0, NULL); + sys_wait4(pid, (int __user *)&ret, 0, NULL); + + /* + * If ret is 0, either ____call_usermodehelper failed and the + * real error code is already in sub_info->retval or + * sub_info->retval is 0 anyway, so don't mess with it then. + */ + if (ret) + sub_info->retval = ret; } complete(sub_info->complete); -- cgit v1.2.3 From c9472e0f28cd2f0695a0ac3a0b4bd33f21714a7e Mon Sep 17 00:00:00 2001 From: Cal Peake Date: Fri, 29 Sep 2006 02:00:47 -0700 Subject: [PATCH] kill extraneous printk in kernel_restart() Get rid of an extraneous printk in kernel_restart(). Signed-off-by: Cal Peake Acked-by: Eric W. Biederman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index 3f894775488d..8647061c084a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -612,7 +612,6 @@ void kernel_restart(char *cmd) } else { printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); } - printk(".\n"); machine_restart(cmd); } EXPORT_SYMBOL_GPL(kernel_restart); -- cgit v1.2.3 From 5fe1d75f34974046fffcca5e22fb8a7b42fded33 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:00:48 -0700 Subject: [PATCH] do_sched_setscheduler(): don't take tasklist_lock Use rcu locks instead. sched_setscheduler() now takes ->siglock before reading ->signal->rlim[]. Signed-off-by: Oleg Nesterov Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Steven Rostedt Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index b946209d9c15..f9b3c6a414f1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4080,6 +4080,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) * @p: the task in question. * @policy: new policy. * @param: structure containing the new RT priority. + * + * NOTE: the task may be already dead */ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) @@ -4115,19 +4117,26 @@ recheck: * Allow unprivileged RT tasks to decrease priority: */ if (!capable(CAP_SYS_NICE)) { + unsigned long rlim_rtprio; + unsigned long flags; + + if (!lock_task_sighand(p, &flags)) + return -ESRCH; + rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; + unlock_task_sighand(p, &flags); + /* * can't change policy, except between SCHED_NORMAL * and SCHED_BATCH: */ if (((policy != SCHED_NORMAL && p->policy != SCHED_BATCH) && (policy != SCHED_BATCH && p->policy != SCHED_NORMAL)) && - !p->signal->rlim[RLIMIT_RTPRIO].rlim_cur) + !rlim_rtprio) return -EPERM; /* can't increase priority */ if ((policy != SCHED_NORMAL && policy != SCHED_BATCH) && param->sched_priority > p->rt_priority && - param->sched_priority > - p->signal->rlim[RLIMIT_RTPRIO].rlim_cur) + param->sched_priority > rlim_rtprio) return -EPERM; /* can't change other user's priorities */ if ((current->euid != p->euid) && @@ -4193,14 +4202,13 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) return -EINVAL; if (copy_from_user(&lparam, param, sizeof(struct sched_param))) return -EFAULT; - read_lock_irq(&tasklist_lock); + + rcu_read_lock(); + retval = -ESRCH; p = find_process_by_pid(pid); - if (!p) { - read_unlock_irq(&tasklist_lock); - return -ESRCH; - } - retval = sched_setscheduler(p, policy, &lparam); - read_unlock_irq(&tasklist_lock); + if (p != NULL) + retval = sched_setscheduler(p, policy, &lparam); + rcu_read_unlock(); return retval; } -- cgit v1.2.3 From 57a6f51c4281aa3119975473c70dce0480d322bd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:00:49 -0700 Subject: [PATCH] introduce is_rt_policy() helper Imho, makes the code a bit easier to read. Signed-off-by: Oleg Nesterov Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Steven Rostedt Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index f9b3c6a414f1..c3c718aea618 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4109,8 +4109,7 @@ recheck: (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) || (!p->mm && param->sched_priority > MAX_RT_PRIO-1)) return -EINVAL; - if ((policy == SCHED_NORMAL || policy == SCHED_BATCH) - != (param->sched_priority == 0)) + if (is_rt_policy(policy) != (param->sched_priority != 0)) return -EINVAL; /* @@ -4134,7 +4133,7 @@ recheck: !rlim_rtprio) return -EPERM; /* can't increase priority */ - if ((policy != SCHED_NORMAL && policy != SCHED_BATCH) && + if (is_rt_policy(policy) && param->sched_priority > p->rt_priority && param->sched_priority > rlim_rtprio) return -EPERM; -- cgit v1.2.3 From 8dc3e9099e01dfdd53f27f329c268eced03dfef6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:00:50 -0700 Subject: [PATCH] sched_setscheduler: fix? policy checks I am not sure this patch is correct: I can't understand what the current code does, and I don't know what it was supposed to do. The comment says: * can't change policy, except between SCHED_NORMAL * and SCHED_BATCH: The code: if (((policy != SCHED_NORMAL && p->policy != SCHED_BATCH) && (policy != SCHED_BATCH && p->policy != SCHED_NORMAL)) && But this is equivalent to: if ( (is_rt_policy(policy) && has_rt_policy(p)) && which means something different. We can't _decrease_ the current ->rt_priority with such a check (if rlim[RLIMIT_RTPRIO] == 0). Probably, it was supposed to be: if ( !(policy == SCHED_NORMAL && p->policy == SCHED_BATCH) && !(policy == SCHED_BATCH && p->policy == SCHED_NORMAL) this matches the comment, but strange: it doesn't allow to _drop_ the realtime priority when rlim[RLIMIT_RTPRIO] == 0. I think the right check would be: /* can't set/change rt policy */ if (is_rt_policy(policy) && policy != p->policy && !rlim_rtprio) return -EPERM; Signed-off-by: Oleg Nesterov Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Steven Rostedt Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index c3c718aea618..155a33da7aa7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4116,27 +4116,25 @@ recheck: * Allow unprivileged RT tasks to decrease priority: */ if (!capable(CAP_SYS_NICE)) { - unsigned long rlim_rtprio; - unsigned long flags; - - if (!lock_task_sighand(p, &flags)) - return -ESRCH; - rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; - unlock_task_sighand(p, &flags); + if (is_rt_policy(policy)) { + unsigned long rlim_rtprio; + unsigned long flags; + + if (!lock_task_sighand(p, &flags)) + return -ESRCH; + rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur; + unlock_task_sighand(p, &flags); + + /* can't set/change the rt policy */ + if (policy != p->policy && !rlim_rtprio) + return -EPERM; + + /* can't increase priority */ + if (param->sched_priority > p->rt_priority && + param->sched_priority > rlim_rtprio) + return -EPERM; + } - /* - * can't change policy, except between SCHED_NORMAL - * and SCHED_BATCH: - */ - if (((policy != SCHED_NORMAL && p->policy != SCHED_BATCH) && - (policy != SCHED_BATCH && p->policy != SCHED_NORMAL)) && - !rlim_rtprio) - return -EPERM; - /* can't increase priority */ - if (is_rt_policy(policy) && - param->sched_priority > p->rt_priority && - param->sched_priority > rlim_rtprio) - return -EPERM; /* can't change other user's priorities */ if ((current->euid != p->euid) && (current->euid != p->uid)) -- cgit v1.2.3 From 1c573afebc6213e4372e0d8352034c23d5262e1f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:00:51 -0700 Subject: [PATCH] reparent_to_init(): use has_rt_policy() Remove open-coded has_rt_policy(), no changes in kernel/exit.o Signed-off-by: Oleg Nesterov Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Steven Rostedt Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index a51b347ae67b..4a280856acd2 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -281,9 +281,7 @@ static void reparent_to_init(void) /* Set the exit signal to SIGCHLD so we signal init on exit */ current->exit_signal = SIGCHLD; - if ((current->policy == SCHED_NORMAL || - current->policy == SCHED_BATCH) - && (task_nice(current) < 0)) + if (!has_rt_policy(current) && (task_nice(current) < 0)) set_user_nice(current, 0); /* cpus_allowed? */ /* rt_priority? */ -- cgit v1.2.3 From 5b160f5ecd2f1b6df2e0015dc1f319c8ef803d62 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:00:52 -0700 Subject: [PATCH] copy_process: cosmetic ->ioprio tweak copy_process: // holds tasklist_lock + ->siglock /* * inherit ioprio */ p->ioprio = current->ioprio; Why? ->ioprio was already copied in dup_task_struct(). I guess this is needed to ensure that the child can't escape sys_ioprio_set(IOPRIO_WHO_{PGRP,USER}), yes? In that case we don't need ->siglock held, and the comment should be updated. Signed-off-by: Oleg Nesterov Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index bca6ce6d3ded..1c999f3e0b47 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1150,7 +1150,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ - p->parent_exec_id = p->self_exec_id; /* ok, now we should be set up.. */ @@ -1173,6 +1172,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); + /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */ + p->ioprio = current->ioprio; + /* * The task hasn't been attached yet, so its cpus_allowed mask will * not be changed, nor will its assigned CPU. @@ -1232,11 +1234,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, } } - /* - * inherit ioprio - */ - p->ioprio = current->ioprio; - if (likely(p->pid)) { add_parent(p); if (unlikely(p->ptrace & PT_PTRACED)) -- cgit v1.2.3 From d359b549bf3d7f42f0084918a4816ea4572e507c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:00:55 -0700 Subject: [PATCH] futex_find_get_task(): don't take tasklist_lock It is ok to do find_task_by_pid() + get_task_struct() under rcu_read_lock(), we cand drop tasklist_lock. Note that testing of ->exit_state is racy with or without tasklist anyway. Signed-off-by: Oleg Nesterov Acked-by: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/futex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 9d260e838cff..ca8ef11feb65 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -389,7 +389,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) { struct task_struct *p; - read_lock(&tasklist_lock); + rcu_read_lock(); p = find_task_by_pid(pid); if (!p) goto out_unlock; @@ -403,7 +403,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) } get_task_struct(p); out_unlock: - read_unlock(&tasklist_lock); + rcu_read_unlock(); return p; } -- cgit v1.2.3 From aaa2a97eb9c0e91d7abc66bf76811a9599fdb3ee Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:00:55 -0700 Subject: [PATCH] sys_get_robust_list(): don't take tasklist_lock use rcu locks for find_task_by_pid(). Signed-off-by: Oleg Nesterov Cc: Ingo Molnar Cc: Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/futex.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index ca8ef11feb65..4b6770e9806d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1624,7 +1624,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, struct task_struct *p; ret = -ESRCH; - read_lock(&tasklist_lock); + rcu_read_lock(); p = find_task_by_pid(pid); if (!p) goto err_unlock; @@ -1633,7 +1633,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, !capable(CAP_SYS_PTRACE)) goto err_unlock; head = p->robust_list; - read_unlock(&tasklist_lock); + rcu_read_unlock(); } if (put_user(sizeof(*head), len_ptr)) @@ -1641,7 +1641,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr, return put_user(head, head_ptr); err_unlock: - read_unlock(&tasklist_lock); + rcu_read_unlock(); return ret; } -- cgit v1.2.3 From 29b884921634e1e01cbd276e1c9b8fc07a7e4a90 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:01:09 -0700 Subject: [PATCH] set EXIT_DEAD state in do_exit(), not in schedule() schedule() checks PF_DEAD on every context switch and sets ->state = EXIT_DEAD to ensure that the exiting task will be deactivated. Note that this EXIT_DEAD is in fact a "random" value, we can use any bit except normal TASK_XXX values. It is better to set this state in do_exit() along with PF_DEAD flag and remove that check in schedule(). We are safe wrt concurrent try_to_wake_up() (for example ptrace, tkill), it can not change task's ->state: the 'state' argument of try_to_wake_up() can't have EXIT_DEAD bit. And in case when try_to_wake_up() sees a stale value of ->state == TASK_RUNNING it will do nothing. Signed-off-by: Oleg Nesterov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 1 + kernel/sched.c | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 4a280856acd2..3d759c98fb11 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -957,6 +957,7 @@ fastcall NORET_TYPE void do_exit(long code) preempt_disable(); BUG_ON(tsk->flags & PF_DEAD); tsk->flags |= PF_DEAD; + tsk->state = EXIT_DEAD; schedule(); BUG(); diff --git a/kernel/sched.c b/kernel/sched.c index 155a33da7aa7..e1646b044b69 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3348,9 +3348,6 @@ need_resched_nonpreemptible: spin_lock_irq(&rq->lock); - if (unlikely(prev->flags & PF_DEAD)) - prev->state = EXIT_DEAD; - switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { switch_count = &prev->nvcsw; -- cgit v1.2.3 From 55a101f8f71a3d3dbda7b5c77083ffe47552f831 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:01:10 -0700 Subject: [PATCH] kill PF_DEAD flag After the previous change (->flags & PF_DEAD) <=> (->state == EXIT_DEAD), we don't need PF_DEAD any longer. Signed-off-by: Oleg Nesterov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 6 ++---- kernel/sched.c | 16 ++++++++-------- 2 files changed, 10 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 3d759c98fb11..9dd5f1336da2 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -953,10 +953,8 @@ fastcall NORET_TYPE void do_exit(long code) if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); - /* PF_DEAD causes final put_task_struct after we schedule. */ preempt_disable(); - BUG_ON(tsk->flags & PF_DEAD); - tsk->flags |= PF_DEAD; + /* causes final put_task_struct in finish_task_switch(). */ tsk->state = EXIT_DEAD; schedule(); @@ -972,7 +970,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); - + do_exit(code); } diff --git a/kernel/sched.c b/kernel/sched.c index e1646b044b69..a9405d7cc6ab 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1755,27 +1755,27 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) __releases(rq->lock) { struct mm_struct *mm = rq->prev_mm; - unsigned long prev_task_flags; + long prev_state; rq->prev_mm = NULL; /* * A task struct has one reference for the use as "current". - * If a task dies, then it sets EXIT_ZOMBIE in tsk->exit_state and - * calls schedule one last time. The schedule call will never return, - * and the scheduled task must drop that reference. - * The test for EXIT_ZOMBIE must occur while the runqueue locks are + * If a task dies, then it sets EXIT_DEAD in tsk->state and calls + * schedule one last time. The schedule call will never return, and + * the scheduled task must drop that reference. + * The test for EXIT_DEAD must occur while the runqueue locks are * still held, otherwise prev could be scheduled on another cpu, die * there before we look at prev->state, and then the reference would * be dropped twice. * Manfred Spraul */ - prev_task_flags = prev->flags; + prev_state = prev->state; finish_arch_switch(prev); finish_lock_switch(rq, prev); if (mm) mmdrop(mm); - if (unlikely(prev_task_flags & PF_DEAD)) { + if (unlikely(prev_state == EXIT_DEAD)) { /* * Remove function-return probe instances associated with this * task and put them back on the free list. @@ -5153,7 +5153,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); /* Cannot have done final schedule yet: would have vanished. */ - BUG_ON(p->flags & PF_DEAD); + BUG_ON(p->state == EXIT_DEAD); get_task_struct(p); -- cgit v1.2.3 From c394cc9fbb367f87faa2228ec2eabacd2d4701c6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 29 Sep 2006 02:01:11 -0700 Subject: [PATCH] introduce TASK_DEAD state I am not sure about this patch, I am asking Ingo to take a decision. task_struct->state == EXIT_DEAD is a very special case, to avoid a confusion it makes sense to introduce a new state, TASK_DEAD, while EXIT_DEAD should live only in ->exit_state as documented in sched.h. Note that this state is not visible to user-space, get_task_state() masks off unsuitable states. Signed-off-by: Oleg Nesterov Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 2 +- kernel/sched.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 9dd5f1336da2..2e4c13cba95a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -955,7 +955,7 @@ fastcall NORET_TYPE void do_exit(long code) preempt_disable(); /* causes final put_task_struct in finish_task_switch(). */ - tsk->state = EXIT_DEAD; + tsk->state = TASK_DEAD; schedule(); BUG(); diff --git a/kernel/sched.c b/kernel/sched.c index a9405d7cc6ab..74f169ac0773 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1761,10 +1761,10 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) /* * A task struct has one reference for the use as "current". - * If a task dies, then it sets EXIT_DEAD in tsk->state and calls + * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. - * The test for EXIT_DEAD must occur while the runqueue locks are + * The test for TASK_DEAD must occur while the runqueue locks are * still held, otherwise prev could be scheduled on another cpu, die * there before we look at prev->state, and then the reference would * be dropped twice. @@ -1775,7 +1775,7 @@ static inline void finish_task_switch(struct rq *rq, struct task_struct *prev) finish_lock_switch(rq, prev); if (mm) mmdrop(mm); - if (unlikely(prev_state == EXIT_DEAD)) { + if (unlikely(prev_state == TASK_DEAD)) { /* * Remove function-return probe instances associated with this * task and put them back on the free list. @@ -5153,7 +5153,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); /* Cannot have done final schedule yet: would have vanished. */ - BUG_ON(p->state == EXIT_DEAD); + BUG_ON(p->state == TASK_DEAD); get_task_struct(p); -- cgit v1.2.3 From 38837fc75acb7fa9b0e111b0241fe4fe76c5d4b3 Mon Sep 17 00:00:00 2001 From: Paul Jackson Date: Fri, 29 Sep 2006 02:01:16 -0700 Subject: [PATCH] cpuset: top_cpuset tracks hotplug changes to node_online_map Change the list of memory nodes allowed to tasks in the top (root) nodeset to dynamically track what cpus are online, using a call to a cpuset hook from the memory hotplug code. Make this top cpus file read-only. On systems that have cpusets configured in their kernel, but that aren't actively using cpusets (for some distros, this covers the majority of systems) all tasks end up in the top cpuset. If that system does support memory hotplug, then these tasks cannot make use of memory nodes that are added after system boot, because the memory nodes are not allowed in the top cpuset. This is a surprising regression over earlier kernels that didn't have cpusets enabled. One key motivation for this change is to remain consistent with the behaviour for the top_cpuset's 'cpus', which is also read-only, and which automatically tracks the cpu_online_map. This change also has the minor benefit that it fixes a long standing, little noticed, minor bug in cpusets. The cpuset performance tweak to short circuit the cpuset_zone_allowed() check on systems with just a single cpuset (see 'number_of_cpusets', in linux/cpuset.h) meant that simply changing the 'mems' of the top_cpuset had no affect, even though the change (the write system call) appeared to succeed. With the following change, that write to the 'mems' file fails -EACCES, and the 'mems' file stubbornly refuses to be changed via user space writes. Thus no one should be mislead into thinking they've changed the top_cpusets's 'mems' when in affect they haven't. In order to keep the behaviour of cpusets consistent between systems actively making use of them and systems not using them, this patch changes the behaviour of the 'mems' file in the top (root) cpuset, making it read only, and making it automatically track the value of node_online_map. Thus tasks in the top cpuset will have automatic use of hot plugged memory nodes allowed by their cpuset. [akpm@osdl.org: build fix] [bunk@stusta.de: build fix] Signed-off-by: Paul Jackson Signed-off-by: Adrian Bunk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 584bb4e6c042..794af5024c2f 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -912,6 +912,10 @@ static int update_nodemask(struct cpuset *cs, char *buf) int fudge; int retval; + /* top_cpuset.mems_allowed tracks node_online_map; it's read-only */ + if (cs == &top_cpuset) + return -EACCES; + trialcs = *cs; retval = nodelist_parse(buf, trialcs.mems_allowed); if (retval < 0) @@ -2042,9 +2046,8 @@ out: * (of no affect) on systems that are actively using CPU hotplug * but making no active use of cpusets. * - * This handles CPU hotplug (cpuhp) events. If someday Memory - * Nodes can be hotplugged (dynamically changing node_online_map) - * then we should handle that too, perhaps in a similar way. + * This routine ensures that top_cpuset.cpus_allowed tracks + * cpu_online_map on each CPU hotplug (cpuhp) event. */ #ifdef CONFIG_HOTPLUG_CPU @@ -2063,6 +2066,25 @@ static int cpuset_handle_cpuhp(struct notifier_block *nb, } #endif +/* + * Keep top_cpuset.mems_allowed tracking node_online_map. + * Call this routine anytime after you change node_online_map. + * See also the previous routine cpuset_handle_cpuhp(). + */ + +#ifdef CONFIG_MEMORY_HOTPLUG +void cpuset_track_online_nodes() +{ + mutex_lock(&manage_mutex); + mutex_lock(&callback_mutex); + + top_cpuset.mems_allowed = node_online_map; + + mutex_unlock(&callback_mutex); + mutex_unlock(&manage_mutex); +} +#endif + /** * cpuset_init_smp - initialize cpus_allowed * -- cgit v1.2.3 From b1aac8bb824c658ddebd296b088a8bff5029c288 Mon Sep 17 00:00:00 2001 From: Paul Jackson Date: Fri, 29 Sep 2006 02:01:17 -0700 Subject: [PATCH] cpuset: hotunplug cpus and mems in all cpusets The cpuset code handling hot unplug of CPUs or Memory Nodes was incorrect - it could remove a CPU or Node from the top cpuset, while leaving it still in some child cpusets. One basic rule of cpusets is that each cpusets cpus and mems are subsets of its parents. The cpuset hot unplug code violated this rule. So the cpuset hotunplug handler must walk down the tree, removing any removed CPU or Node from all cpusets. However, it is not allowed to make a cpusets cpus or mems become empty. They can only transition from empty to non-empty, not back. So if the last CPU or Node would be removed from a cpuset by the above walk, we scan back up the cpuset hierarchy, finding the nearest ancestor that still has something online, and copy its CPU or Memory placement. Signed-off-by: Paul Jackson Cc: Nathan Lynch Cc: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 70 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 794af5024c2f..cc0395d7eba1 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2040,6 +2040,73 @@ out: return err; } +#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG) +/* + * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs + * or memory nodes, we need to walk over the cpuset hierarchy, + * removing that CPU or node from all cpusets. If this removes the + * last CPU or node from a cpuset, then the guarantee_online_cpus() + * or guarantee_online_mems() code will use that emptied cpusets + * parent online CPUs or nodes. Cpusets that were already empty of + * CPUs or nodes are left empty. + * + * This routine is intentionally inefficient in a couple of regards. + * It will check all cpusets in a subtree even if the top cpuset of + * the subtree has no offline CPUs or nodes. It checks both CPUs and + * nodes, even though the caller could have been coded to know that + * only one of CPUs or nodes needed to be checked on a given call. + * This was done to minimize text size rather than cpu cycles. + * + * Call with both manage_mutex and callback_mutex held. + * + * Recursive, on depth of cpuset subtree. + */ + +static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) +{ + struct cpuset *c; + + /* Each of our child cpusets mems must be online */ + list_for_each_entry(c, &cur->children, sibling) { + guarantee_online_cpus_mems_in_subtree(c); + if (!cpus_empty(c->cpus_allowed)) + guarantee_online_cpus(c, &c->cpus_allowed); + if (!nodes_empty(c->mems_allowed)) + guarantee_online_mems(c, &c->mems_allowed); + } +} + +/* + * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track + * cpu_online_map and node_online_map. Force the top cpuset to track + * whats online after any CPU or memory node hotplug or unplug event. + * + * To ensure that we don't remove a CPU or node from the top cpuset + * that is currently in use by a child cpuset (which would violate + * the rule that cpusets must be subsets of their parent), we first + * call the recursive routine guarantee_online_cpus_mems_in_subtree(). + * + * Since there are two callers of this routine, one for CPU hotplug + * events and one for memory node hotplug events, we could have coded + * two separate routines here. We code it as a single common routine + * in order to minimize text size. + */ + +static void common_cpu_mem_hotplug_unplug(void) +{ + mutex_lock(&manage_mutex); + mutex_lock(&callback_mutex); + + guarantee_online_cpus_mems_in_subtree(&top_cpuset); + top_cpuset.cpus_allowed = cpu_online_map; + top_cpuset.mems_allowed = node_online_map; + + mutex_unlock(&callback_mutex); + mutex_unlock(&manage_mutex); +} +#endif + +#ifdef CONFIG_HOTPLUG_CPU /* * The top_cpuset tracks what CPUs and Memory Nodes are online, * period. This is necessary in order to make cpusets transparent @@ -2050,38 +2117,24 @@ out: * cpu_online_map on each CPU hotplug (cpuhp) event. */ -#ifdef CONFIG_HOTPLUG_CPU static int cpuset_handle_cpuhp(struct notifier_block *nb, unsigned long phase, void *cpu) { - mutex_lock(&manage_mutex); - mutex_lock(&callback_mutex); - - top_cpuset.cpus_allowed = cpu_online_map; - - mutex_unlock(&callback_mutex); - mutex_unlock(&manage_mutex); - + common_cpu_mem_hotplug_unplug(); return 0; } #endif +#ifdef CONFIG_MEMORY_HOTPLUG /* * Keep top_cpuset.mems_allowed tracking node_online_map. * Call this routine anytime after you change node_online_map. * See also the previous routine cpuset_handle_cpuhp(). */ -#ifdef CONFIG_MEMORY_HOTPLUG void cpuset_track_online_nodes() { - mutex_lock(&manage_mutex); - mutex_lock(&callback_mutex); - - top_cpuset.mems_allowed = node_online_map; - - mutex_unlock(&callback_mutex); - mutex_unlock(&manage_mutex); + common_cpu_mem_hotplug_unplug(); } #endif -- cgit v1.2.3 From 04b1db9fd7eea63c9663072feece616ea41b0a79 Mon Sep 17 00:00:00 2001 From: "Ian S. Nelson" Date: Fri, 29 Sep 2006 02:01:31 -0700 Subject: [PATCH] /sys/modules: allow full length section names I've been using systemtap for some debugging and I noticed that it can't probe a lot of modules. Turns out it's kind of silly, the sections section of /sys/module is limited to 32byte filenames and many of the actual sections are a a bit longer than that. [akpm@osdl.org: rewrite to use dymanic allocation] Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/module.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index b7fe6e840963..05625d5dc758 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -933,6 +933,15 @@ static ssize_t module_sect_show(struct module_attribute *mattr, return sprintf(buf, "0x%lx\n", sattr->address); } +static void free_sect_attrs(struct module_sect_attrs *sect_attrs) +{ + int section; + + for (section = 0; section < sect_attrs->nsections; section++) + kfree(sect_attrs->attrs[section].name); + kfree(sect_attrs); +} + static void add_sect_attrs(struct module *mod, unsigned int nsect, char *secstrings, Elf_Shdr *sechdrs) { @@ -949,21 +958,26 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, + nloaded * sizeof(sect_attrs->attrs[0]), sizeof(sect_attrs->grp.attrs[0])); size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); - if (! (sect_attrs = kmalloc(size[0] + size[1], GFP_KERNEL))) + sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); + if (sect_attrs == NULL) return; /* Setup section attributes. */ sect_attrs->grp.name = "sections"; sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; + sect_attrs->nsections = 0; sattr = §_attrs->attrs[0]; gattr = §_attrs->grp.attrs[0]; for (i = 0; i < nsect; i++) { if (! (sechdrs[i].sh_flags & SHF_ALLOC)) continue; sattr->address = sechdrs[i].sh_addr; - strlcpy(sattr->name, secstrings + sechdrs[i].sh_name, - MODULE_SECT_NAME_LEN); + sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, + GFP_KERNEL); + if (sattr->name == NULL) + goto out; + sect_attrs->nsections++; sattr->mattr.show = module_sect_show; sattr->mattr.store = NULL; sattr->mattr.attr.name = sattr->name; @@ -979,7 +993,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, mod->sect_attrs = sect_attrs; return; out: - kfree(sect_attrs); + free_sect_attrs(sect_attrs); } static void remove_sect_attrs(struct module *mod) @@ -989,13 +1003,13 @@ static void remove_sect_attrs(struct module *mod) &mod->sect_attrs->grp); /* We are positive that no one is using any sect attrs * at this point. Deallocate immediately. */ - kfree(mod->sect_attrs); + free_sect_attrs(mod->sect_attrs); mod->sect_attrs = NULL; } } - #else + static inline void add_sect_attrs(struct module *mod, unsigned int nsect, char *sectstrings, Elf_Shdr *sechdrs) { -- cgit v1.2.3 From e5582ca21af82929d5cd3613321ac9233c492ebc Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 29 Sep 2006 02:01:35 -0700 Subject: [PATCH] stop_machine.c copyright I had to look back: this code was extracted from the module.c code in 2005. Signed-off-by: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/stop_machine.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 51cacd111dbd..12458040e665 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -1,3 +1,6 @@ +/* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. + * GPL v2 and any later version. + */ #include #include #include -- cgit v1.2.3 From eb84a20e9e6b98dcb33023ad22241d79107a08a7 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 29 Sep 2006 02:01:41 -0700 Subject: [PATCH] audit/accounting: tty locking Add tty locking around the audit and accounting code. The whole current->signal-> locking is all deeply strange but it's for someone else to sort out. Add rather than replace the lock for acct.c Signed-off-by: Alan Cox Acked-by: Arjan van de Ven Cc: Al Viro Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/acct.c | 6 +++++- kernel/auditsc.c | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/acct.c b/kernel/acct.c index 2a7c933651c7..f4330acead46 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -483,10 +483,14 @@ static void do_acct_process(struct file *file) ac.ac_ppid = current->parent->tgid; #endif - read_lock(&tasklist_lock); /* pin current->signal */ + mutex_lock(&tty_mutex); + /* FIXME: Whoever is responsible for current->signal locking needs + to use the same locking all over the kernel and document it */ + read_lock(&tasklist_lock); ac.ac_tty = current->signal->tty ? old_encode_dev(tty_devnum(current->signal->tty)) : 0; read_unlock(&tasklist_lock); + mutex_unlock(&tty_mutex); spin_lock_irq(¤t->sighand->siglock); ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime))); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index fb83c5cb8c32..105147631753 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -817,6 +817,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts audit_log_format(ab, " success=%s exit=%ld", (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", context->return_code); + + mutex_lock(&tty_mutex); if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name) tty = tsk->signal->tty->name; else @@ -838,6 +840,9 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts context->gid, context->euid, context->suid, context->fsuid, context->egid, context->sgid, context->fsgid, tty); + + mutex_unlock(&tty_mutex); + audit_log_task_info(ab, tsk); if (context->filterkey) { audit_log_format(ab, " key="); -- cgit v1.2.3 From 03cbc358aab3faf34bfeaa02206fa660127e9b3f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 29 Sep 2006 02:01:46 -0700 Subject: [PATCH] lockdep core: improve the lock-chain-hash With CONFIG_DEBUG_LOCK_ALLOC turned off i was getting sporadic failures in the locking self-test: ------------> | Locking API testsuite: ---------------------------------------------------------------------------- | spin |wlock |rlock |mutex | wsem | rsem | -------------------------------------------------------------------------- A-A deadlock: ok | ok | ok | ok | ok | ok | A-B-B-A deadlock: ok | ok | ok | ok | ok | ok | A-B-B-C-C-A deadlock: ok | ok | ok | ok | ok | ok | A-B-C-A-B-C deadlock: ok | ok | ok | ok | ok | ok | A-B-B-C-C-D-D-A deadlock: ok |FAILED| ok | ok | ok | ok | A-B-C-D-B-D-D-A deadlock: ok | ok | ok | ok | ok | ok | A-B-C-D-B-C-D-A deadlock: ok | ok | ok | ok | ok |FAILED| after much debugging it turned out to be caused by accidental chain-hash key collisions. The current hash is: #define iterate_chain_key(key1, key2) \ (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \ ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \ (key2)) where MAX_LOCKDEP_KEYS_BITS is 11. This hash is pretty good as it will shift by 5 bits in every iteration, where every new ID 'mixed' into the hash would have up to 11 bits. But because there was a 6 bits overlap between subsequent IDs and their high bits tended to be similar, there was a chance for accidental chain-hash collision for a low number of locks held. the solution is to shift by 11 bits: #define iterate_chain_key(key1, key2) \ (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ (key2)) This keeps the hash perfect up to 5 locks held, but even above that the hash is still good because 11 bits is a relative prime to the total 64 bits, so a complete match will only occur after 64 held locks (which doesnt happen in Linux). Even after 5 locks held, entropy of the 5 IDs mixed into the hash is already good enough so that overlap doesnt generate a colliding hash ID. with this change the false positives went away. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/lockdep.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/lockdep.c b/kernel/lockdep.c index df1c3594de31..e596525669ed 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -122,8 +122,8 @@ static struct list_head chainhash_table[CHAINHASH_SIZE]; * unique. */ #define iterate_chain_key(key1, key2) \ - (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \ - ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \ + (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ + ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ (key2)) void lockdep_off(void) -- cgit v1.2.3 From 181b64803661209cda64e5e874ad75f373a69de8 Mon Sep 17 00:00:00 2001 From: Paul Jackson Date: Fri, 29 Sep 2006 02:01:48 -0700 Subject: [PATCH] cpuset: fix obscure attach_task vs exiting race Fix obscure race condition in kernel/cpuset.c attach_task() code. There is basically zero chance of anyone accidentally being harmed by this race. It requires a special 'micro-stress' load and a special timing loop hacks in the kernel to hit in less than an hour, and even then you'd have to hit it hundreds or thousands of times, followed by some unusual and senseless cpuset configuration requests, including removing the top cpuset, to cause any visibly harm affects. One could, with perhaps a few days or weeks of such effort, get the reference count on the top cpuset below zero, and manage to crash the kernel by asking to remove the top cpuset. I found it by code inspection. The race was introduced when 'the_top_cpuset_hack' was introduced, and one piece of code was not updated. An old check for a possibly null task cpuset pointer needed to be changed to a check for a task marked PF_EXITING. The pointer can't be null anymore, thanks to the_top_cpuset_hack (documented in kernel/cpuset.c). But the task could have gone into PF_EXITING state after it was found in the task_list scan. If a task is PF_EXITING in this code, it is possible that its task->cpuset pointer is pointing to the top cpuset due to the_top_cpuset_hack, rather than because the top_cpuset was that tasks last valid cpuset. In that case, the wrong cpuset reference counter would be decremented. The fix is trivial. Instead of failing the system call if the tasks cpuset pointer is null here, fail it if the task is in PF_EXITING state. The code for 'the_top_cpuset_hack' that changes an exiting tasks cpuset to the top_cpuset is done without locking, so could happen at anytime. But it is done during the exit handling, after the PF_EXITING flag is set. So if we verify that a task is still not PF_EXITING after we copy out its cpuset pointer (into 'oldcs', below), we know that 'oldcs' is not one of these hack references to the top_cpuset. Signed-off-by: Paul Jackson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index cc0395d7eba1..8c3c400cce91 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1225,7 +1225,12 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) task_lock(tsk); oldcs = tsk->cpuset; - if (!oldcs) { + /* + * After getting 'oldcs' cpuset ptr, be sure still not exiting. + * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack + * then fail this attach_task(), to avoid breaking top_cpuset.count. + */ + if (tsk->flags & PF_EXITING) { task_unlock(tsk); mutex_unlock(&callback_mutex); put_task_struct(tsk); -- cgit v1.2.3 From 29cbc78b90a73ad80f2f58ba2927956cf663abed Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 30 Sep 2006 01:47:55 +0200 Subject: [PATCH] x86: Clean up x86 NMI sysctls Use prototypes in headers Don't define panic_on_unrecovered_nmi for all architectures Cc: dzickus@redhat.com Signed-off-by: Andi Kleen --- kernel/panic.c | 1 - kernel/sysctl.c | 11 ++++------- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 6ceb664fb52a..525e365f7239 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -21,7 +21,6 @@ #include int panic_on_oops; -int panic_on_unrecovered_nmi; int tainted; static int pause_on_oops; static int pause_on_oops_flag; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 9535a3839930..c57c4532e296 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -52,6 +52,10 @@ extern int proc_nr_files(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); +#ifdef CONFIG_X86 +#include +#endif + #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ @@ -74,13 +78,6 @@ extern int sysctl_drop_caches; extern int percpu_pagelist_fraction; extern int compat_log; -#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) -int unknown_nmi_panic; -int nmi_watchdog_enabled; -extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, - void __user *, size_t *, loff_t *); -#endif - /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; static int minolduid; -- cgit v1.2.3 From 34596dc9e59d7bece16fe5aba08116b49465da26 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 30 Sep 2006 01:47:55 +0200 Subject: [PATCH] Define vsyscall cache as blob to make clearer that user space shouldn't use it Signed-off-by: Andi Kleen --- kernel/sys.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index 8647061c084a..b88806c66244 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2083,12 +2083,12 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, * padding */ unsigned long t0, t1; - get_user(t0, &cache->t0); - get_user(t1, &cache->t1); + get_user(t0, &cache->blob[0]); + get_user(t1, &cache->blob[1]); t0++; t1++; - put_user(t0, &cache->t0); - put_user(t1, &cache->t1); + put_user(t0, &cache->blob[0]); + put_user(t1, &cache->blob[1]); } return err ? -EFAULT : 0; } -- cgit v1.2.3 From 0d67a46df0125e20d14f12dbd3646f1f1bf23e8c Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 29 Aug 2006 19:05:56 +0100 Subject: [PATCH] BLOCK: Remove duplicate declaration of exit_io_context() [try #6] Remove the duplicate declaration of exit_io_context() from linux/sched.h. Signed-Off-By: David Howells Signed-off-by: Jens Axboe --- kernel/exit.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 2e4c13cba95a..c189de2927ab 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -38,6 +38,7 @@ #include #include /* for audit_free() */ #include +#include #include #include -- cgit v1.2.3 From 07f3f05c1e3052b8656129b2a5aca9f888241a34 Mon Sep 17 00:00:00 2001 From: David Howells Date: Sat, 30 Sep 2006 20:52:18 +0200 Subject: [PATCH] BLOCK: Move extern declarations out of fs/*.c into header files [try #6] Create a new header file, fs/internal.h, for common definitions local to the sources in the fs/ directory. Move extern definitions that should be in header files from fs/*.c to fs/internal.h or other main header files where they span directories. Signed-Off-By: David Howells Signed-off-by: Jens Axboe --- kernel/compat.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/compat.c b/kernel/compat.c index 75573e5d27b0..b4fbd838cd77 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -26,6 +26,8 @@ #include +extern void sigset_from_compat(sigset_t *set, compat_sigset_t *compat); + int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) { return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || -- cgit v1.2.3 From 9361401eb7619c033e2394e4f9f6d410d6719ac7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Sat, 30 Sep 2006 20:45:40 +0200 Subject: [PATCH] BLOCK: Make it possible to disable the block layer [try #6] Make it possible to disable the block layer. Not all embedded devices require it, some can make do with just JFFS2, NFS, ramfs, etc - none of which require the block layer to be present. This patch does the following: (*) Introduces CONFIG_BLOCK to disable the block layer, buffering and blockdev support. (*) Adds dependencies on CONFIG_BLOCK to any configuration item that controls an item that uses the block layer. This includes: (*) Block I/O tracing. (*) Disk partition code. (*) All filesystems that are block based, eg: Ext3, ReiserFS, ISOFS. (*) The SCSI layer. As far as I can tell, even SCSI chardevs use the block layer to do scheduling. Some drivers that use SCSI facilities - such as USB storage - end up disabled indirectly from this. (*) Various block-based device drivers, such as IDE and the old CDROM drivers. (*) MTD blockdev handling and FTL. (*) JFFS - which uses set_bdev_super(), something it could avoid doing by taking a leaf out of JFFS2's book. (*) Makes most of the contents of linux/blkdev.h, linux/buffer_head.h and linux/elevator.h contingent on CONFIG_BLOCK being set. sector_div() is, however, still used in places, and so is still available. (*) Also made contingent are the contents of linux/mpage.h, linux/genhd.h and parts of linux/fs.h. (*) Makes a number of files in fs/ contingent on CONFIG_BLOCK. (*) Makes mm/bounce.c (bounce buffering) contingent on CONFIG_BLOCK. (*) set_page_dirty() doesn't call __set_page_dirty_buffers() if CONFIG_BLOCK is not enabled. (*) fs/no-block.c is created to hold out-of-line stubs and things that are required when CONFIG_BLOCK is not set: (*) Default blockdev file operations (to give error ENODEV on opening). (*) Makes some /proc changes: (*) /proc/devices does not list any blockdevs. (*) /proc/diskstats and /proc/partitions are contingent on CONFIG_BLOCK. (*) Makes some compat ioctl handling contingent on CONFIG_BLOCK. (*) If CONFIG_BLOCK is not defined, makes sys_quotactl() return -ENODEV if given command other than Q_SYNC or if a special device is specified. (*) In init/do_mounts.c, no reference is made to the blockdev routines if CONFIG_BLOCK is not defined. This does not prohibit NFS roots or JFFS2. (*) The bdflush, ioprio_set and ioprio_get syscalls can now be absent (return error ENOSYS by way of cond_syscall if so). (*) The seclvl_bd_claim() and seclvl_bd_release() security calls do nothing if CONFIG_BLOCK is not set, since they can't then happen. Signed-Off-By: David Howells Signed-off-by: Jens Axboe --- kernel/sys_ni.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 6991bece67e8..7a3b2e75f040 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -134,3 +134,8 @@ cond_syscall(sys_madvise); cond_syscall(sys_mremap); cond_syscall(sys_remap_file_pages); cond_syscall(compat_sys_move_pages); + +/* block-layer dependent */ +cond_syscall(sys_bdflush); +cond_syscall(sys_ioprio_set); +cond_syscall(sys_ioprio_get); -- cgit v1.2.3 From 5c87579e65ee4f419b2369407f82326d38b5d2d8 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 30 Sep 2006 23:27:17 -0700 Subject: [PATCH] maximum latency tracking infrastructure Add infrastructure to track "maximum allowable latency" for power saving policies. The reason for adding this infrastructure is that power management in the idle loop needs to make a tradeoff between latency and power savings (deeper power save modes have a longer latency to running code again). The code that today makes this tradeoff just does a rather simple algorithm; however this is not good enough: There are devices and use cases where a lower latency is required than that the higher power saving states provide. An example would be audio playback, but another example is the ipw2100 wireless driver that right now has a very direct and ugly acpi hook to disable some higher power states randomly when it gets certain types of error. The proposed solution is to have an interface where drivers can * announce the maximum latency (in microseconds) that they can deal with * modify this latency * give up their constraint and a function where the code that decides on power saving strategy can query the current global desired maximum. This patch has a user of each side: on the consumer side, ACPI is patched to use this, on the producer side the ipw2100 driver is patched. A generic maximum latency is also registered of 2 timer ticks (more and you lose accurate time tracking after all). While the existing users of the patch are x86 specific, the infrastructure is not. I'd like to ask the arch maintainers of other architectures if the infrastructure is generic enough for their use (assuming the architecture has such a tradeoff as concept at all), and the sound/multimedia driver owners to look at the driver facing API to see if this is something they can use. [akpm@osdl.org: cleanups] Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Acked-by: Jesse Barnes Cc: "Brown, Len" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Makefile | 2 +- kernel/latency.c | 279 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 280 insertions(+), 1 deletion(-) create mode 100644 kernel/latency.c (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index d62ec66c1af2..e210e8cf7237 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ signal.o sys.o kmod.o workqueue.o pid.o \ rcupdate.o extable.o params.o posix-timers.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ - hrtimer.o rwsem.o + hrtimer.o rwsem.o latency.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ diff --git a/kernel/latency.c b/kernel/latency.c new file mode 100644 index 000000000000..258f2555abbc --- /dev/null +++ b/kernel/latency.c @@ -0,0 +1,279 @@ +/* + * latency.c: Explicit system-wide latency-expectation infrastructure + * + * The purpose of this infrastructure is to allow device drivers to set + * latency constraint they have and to collect and summarize these + * expectations globally. The cummulated result can then be used by + * power management and similar users to make decisions that have + * tradoffs with a latency component. + * + * An example user of this are the x86 C-states; each higher C state saves + * more power, but has a higher exit latency. For the idle loop power + * code to make a good decision which C-state to use, information about + * acceptable latencies is required. + * + * An example announcer of latency is an audio driver that knowns it + * will get an interrupt when the hardware has 200 usec of samples + * left in the DMA buffer; in that case the driver can set a latency + * constraint of, say, 150 usec. + * + * Multiple drivers can each announce their maximum accepted latency, + * to keep these appart, a string based identifier is used. + * + * + * (C) Copyright 2006 Intel Corporation + * Author: Arjan van de Ven + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct latency_info { + struct list_head list; + int usecs; + char *identifier; +}; + +/* + * locking rule: all modifications to current_max_latency and + * latency_list need to be done while holding the latency_lock. + * latency_lock needs to be taken _irqsave. + */ +static atomic_t current_max_latency; +static DEFINE_SPINLOCK(latency_lock); + +static LIST_HEAD(latency_list); +static BLOCKING_NOTIFIER_HEAD(latency_notifier); + +/* + * This function returns the maximum latency allowed, which + * happens to be the minimum of all maximum latencies on the + * list. + */ +static int __find_max_latency(void) +{ + int min = INFINITE_LATENCY; + struct latency_info *info; + + list_for_each_entry(info, &latency_list, list) { + if (info->usecs < min) + min = info->usecs; + } + return min; +} + +/** + * set_acceptable_latency - sets the maximum latency acceptable + * @identifier: string that identifies this driver + * @usecs: maximum acceptable latency for this driver + * + * This function informs the kernel that this device(driver) + * can accept at most usecs latency. This setting is used for + * power management and similar tradeoffs. + * + * This function sleeps and can only be called from process + * context. + * Calling this function with an existing identifier is valid + * and will cause the existing latency setting to be changed. + */ +void set_acceptable_latency(char *identifier, int usecs) +{ + struct latency_info *info, *iter; + unsigned long flags; + int found_old = 0; + + info = kzalloc(sizeof(struct latency_info), GFP_KERNEL); + if (!info) + return; + info->usecs = usecs; + info->identifier = kstrdup(identifier, GFP_KERNEL); + if (!info->identifier) + goto free_info; + + spin_lock_irqsave(&latency_lock, flags); + list_for_each_entry(iter, &latency_list, list) { + if (strcmp(iter->identifier, identifier)==0) { + found_old = 1; + iter->usecs = usecs; + break; + } + } + if (!found_old) + list_add(&info->list, &latency_list); + + if (usecs < atomic_read(¤t_max_latency)) + atomic_set(¤t_max_latency, usecs); + + spin_unlock_irqrestore(&latency_lock, flags); + + blocking_notifier_call_chain(&latency_notifier, + atomic_read(¤t_max_latency), NULL); + + /* + * if we inserted the new one, we're done; otherwise there was + * an existing one so we need to free the redundant data + */ + if (!found_old) + return; + + kfree(info->identifier); +free_info: + kfree(info); +} +EXPORT_SYMBOL_GPL(set_acceptable_latency); + +/** + * modify_acceptable_latency - changes the maximum latency acceptable + * @identifier: string that identifies this driver + * @usecs: maximum acceptable latency for this driver + * + * This function informs the kernel that this device(driver) + * can accept at most usecs latency. This setting is used for + * power management and similar tradeoffs. + * + * This function does not sleep and can be called in any context. + * Trying to use a non-existing identifier silently gets ignored. + * + * Due to the atomic nature of this function, the modified latency + * value will only be used for future decisions; past decisions + * can still lead to longer latencies in the near future. + */ +void modify_acceptable_latency(char *identifier, int usecs) +{ + struct latency_info *iter; + unsigned long flags; + + spin_lock_irqsave(&latency_lock, flags); + list_for_each_entry(iter, &latency_list, list) { + if (strcmp(iter->identifier, identifier) == 0) { + iter->usecs = usecs; + break; + } + } + if (usecs < atomic_read(¤t_max_latency)) + atomic_set(¤t_max_latency, usecs); + spin_unlock_irqrestore(&latency_lock, flags); +} +EXPORT_SYMBOL_GPL(modify_acceptable_latency); + +/** + * remove_acceptable_latency - removes the maximum latency acceptable + * @identifier: string that identifies this driver + * + * This function removes a previously set maximum latency setting + * for the driver and frees up any resources associated with the + * bookkeeping needed for this. + * + * This function does not sleep and can be called in any context. + * Trying to use a non-existing identifier silently gets ignored. + */ +void remove_acceptable_latency(char *identifier) +{ + unsigned long flags; + int newmax = 0; + struct latency_info *iter, *temp; + + spin_lock_irqsave(&latency_lock, flags); + + list_for_each_entry_safe(iter, temp, &latency_list, list) { + if (strcmp(iter->identifier, identifier) == 0) { + list_del(&iter->list); + newmax = iter->usecs; + kfree(iter->identifier); + kfree(iter); + break; + } + } + + /* If we just deleted the system wide value, we need to + * recalculate with a full search + */ + if (newmax == atomic_read(¤t_max_latency)) { + newmax = __find_max_latency(); + atomic_set(¤t_max_latency, newmax); + } + spin_unlock_irqrestore(&latency_lock, flags); +} +EXPORT_SYMBOL_GPL(remove_acceptable_latency); + +/** + * system_latency_constraint - queries the system wide latency maximum + * + * This function returns the system wide maximum latency in + * microseconds. + * + * This function does not sleep and can be called in any context. + */ +int system_latency_constraint(void) +{ + return atomic_read(¤t_max_latency); +} +EXPORT_SYMBOL_GPL(system_latency_constraint); + +/** + * synchronize_acceptable_latency - recalculates all latency decisions + * + * This function will cause a callback to various kernel pieces that + * will make those pieces rethink their latency decisions. This implies + * that if there are overlong latencies in hardware state already, those + * latencies get taken right now. When this call completes no overlong + * latency decisions should be active anymore. + * + * Typical usecase of this is after a modify_acceptable_latency() call, + * which in itself is non-blocking and non-synchronizing. + * + * This function blocks and should not be called with locks held. + */ + +void synchronize_acceptable_latency(void) +{ + blocking_notifier_call_chain(&latency_notifier, + atomic_read(¤t_max_latency), NULL); +} +EXPORT_SYMBOL_GPL(synchronize_acceptable_latency); + +/* + * Latency notifier: this notifier gets called when a non-atomic new + * latency value gets set. The expectation nof the caller of the + * non-atomic set is that when the call returns, future latencies + * are within bounds, so the functions on the notifier list are + * expected to take the overlong latencies immediately, inside the + * callback, and not make a overlong latency decision anymore. + * + * The callback gets called when the new latency value is made + * active so system_latency_constraint() returns the new latency. + */ +int register_latency_notifier(struct notifier_block * nb) +{ + return blocking_notifier_chain_register(&latency_notifier, nb); +} +EXPORT_SYMBOL_GPL(register_latency_notifier); + +int unregister_latency_notifier(struct notifier_block * nb) +{ + return blocking_notifier_chain_unregister(&latency_notifier, nb); +} +EXPORT_SYMBOL_GPL(unregister_latency_notifier); + +static __init int latency_init(void) +{ + atomic_set(¤t_max_latency, INFINITE_LATENCY); + /* + * we don't want by default to have longer latencies than 2 ticks, + * since that would cause lost ticks + */ + set_acceptable_latency("kernel", 2*1000000/HZ); + return 0; +} + +module_init(latency_init); -- cgit v1.2.3 From 756184b7d771992f4fb1998d62aebcaf3e028076 Mon Sep 17 00:00:00 2001 From: Cal Peake Date: Sat, 30 Sep 2006 23:27:24 -0700 Subject: [PATCH] CodingStyle cleanup for kernel/sys.c Fix up kernel/sys.c to be consistent with CodingStyle and the rest of the file. Signed-off-by: Cal Peake Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys.c | 80 +++++++++++++++++++++++------------------------------------- 1 file changed, 30 insertions(+), 50 deletions(-) (limited to 'kernel') diff --git a/kernel/sys.c b/kernel/sys.c index b88806c66244..2460581c928c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -607,11 +607,10 @@ static void kernel_restart_prepare(char *cmd) void kernel_restart(char *cmd) { kernel_restart_prepare(cmd); - if (!cmd) { + if (!cmd) printk(KERN_EMERG "Restarting system.\n"); - } else { + else printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd); - } machine_restart(cmd); } EXPORT_SYMBOL_GPL(kernel_restart); @@ -627,9 +626,8 @@ static void kernel_kexec(void) #ifdef CONFIG_KEXEC struct kimage *image; image = xchg(&kexec_image, NULL); - if (!image) { + if (!image) return; - } kernel_restart_prepare(NULL); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); @@ -823,12 +821,10 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) (current->sgid == egid) || capable(CAP_SETGID)) new_egid = egid; - else { + else return -EPERM; - } } - if (new_egid != old_egid) - { + if (new_egid != old_egid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -857,19 +853,14 @@ asmlinkage long sys_setgid(gid_t gid) if (retval) return retval; - if (capable(CAP_SETGID)) - { - if(old_egid != gid) - { + if (capable(CAP_SETGID)) { + if (old_egid != gid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } current->gid = current->egid = current->sgid = current->fsgid = gid; - } - else if ((gid == current->gid) || (gid == current->sgid)) - { - if(old_egid != gid) - { + } else if ((gid == current->gid) || (gid == current->sgid)) { + if (old_egid != gid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -900,8 +891,7 @@ static int set_user(uid_t new_ruid, int dumpclear) switch_uid(new_user); - if(dumpclear) - { + if (dumpclear) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -957,8 +947,7 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) return -EAGAIN; - if (new_euid != old_euid) - { + if (new_euid != old_euid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -1008,8 +997,7 @@ asmlinkage long sys_setuid(uid_t uid) } else if ((uid != current->uid) && (uid != new_suid)) return -EPERM; - if (old_euid != uid) - { + if (old_euid != uid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -1054,8 +1042,7 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) return -EAGAIN; } if (euid != (uid_t) -1) { - if (euid != current->euid) - { + if (euid != current->euid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -1105,8 +1092,7 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) return -EPERM; } if (egid != (gid_t) -1) { - if (egid != current->egid) - { + if (egid != current->egid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -1151,10 +1137,8 @@ asmlinkage long sys_setfsuid(uid_t uid) if (uid == current->uid || uid == current->euid || uid == current->suid || uid == current->fsuid || - capable(CAP_SETUID)) - { - if (uid != old_fsuid) - { + capable(CAP_SETUID)) { + if (uid != old_fsuid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -1182,10 +1166,8 @@ asmlinkage long sys_setfsgid(gid_t gid) if (gid == current->gid || gid == current->egid || gid == current->sgid || gid == current->fsgid || - capable(CAP_SETGID)) - { - if (gid != old_fsgid) - { + capable(CAP_SETGID)) { + if (gid != old_fsgid) { current->mm->dumpable = suid_dumpable; smp_wmb(); } @@ -1321,9 +1303,9 @@ out: asmlinkage long sys_getpgid(pid_t pid) { - if (!pid) { + if (!pid) return process_group(current); - } else { + else { int retval; struct task_struct *p; @@ -1353,9 +1335,9 @@ asmlinkage long sys_getpgrp(void) asmlinkage long sys_getsid(pid_t pid) { - if (!pid) { + if (!pid) return current->signal->session; - } else { + else { int retval; struct task_struct *p; @@ -1363,7 +1345,7 @@ asmlinkage long sys_getsid(pid_t pid) p = find_task_by_pid(pid); retval = -ESRCH; - if(p) { + if (p) { retval = security_task_getsid(p); if (!retval) retval = p->signal->session; @@ -1431,9 +1413,9 @@ struct group_info *groups_alloc(int gidsetsize) group_info->nblocks = nblocks; atomic_set(&group_info->usage, 1); - if (gidsetsize <= NGROUPS_SMALL) { + if (gidsetsize <= NGROUPS_SMALL) group_info->blocks[0] = group_info->small_block; - } else { + else { for (i = 0; i < nblocks; i++) { gid_t *b; b = (void *)__get_free_page(GFP_USER); @@ -1489,7 +1471,7 @@ static int groups_to_user(gid_t __user *grouplist, /* fill a group_info from a user-space array - it must be allocated already */ static int groups_from_user(struct group_info *group_info, gid_t __user *grouplist) - { +{ int i; int count = group_info->ngroups; @@ -1647,9 +1629,8 @@ asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) int in_group_p(gid_t grp) { int retval = 1; - if (grp != current->fsgid) { + if (grp != current->fsgid) retval = groups_search(current->group_info, grp); - } return retval; } @@ -1658,9 +1639,8 @@ EXPORT_SYMBOL(in_group_p); int in_egroup_p(gid_t grp) { int retval = 1; - if (grp != current->egid) { + if (grp != current->egid) retval = groups_search(current->group_info, grp); - } return retval; } @@ -1775,9 +1755,9 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r task_lock(current->group_leader); x = current->signal->rlim[resource]; task_unlock(current->group_leader); - if(x.rlim_cur > 0x7FFFFFFF) + if (x.rlim_cur > 0x7FFFFFFF) x.rlim_cur = 0x7FFFFFFF; - if(x.rlim_max > 0x7FFFFFFF) + if (x.rlim_max > 0x7FFFFFFF) x.rlim_max = 0x7FFFFFFF; return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; } -- cgit v1.2.3 From ef6edc9746dc2bfdacf44eefd5f881179971c478 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Sat, 30 Sep 2006 23:27:43 -0700 Subject: [PATCH] Directed yield: cpu_relax variants for spinlocks and rw-locks On systems running with virtual cpus there is optimization potential in regard to spinlocks and rw-locks. If the virtual cpu that has taken a lock is known to a cpu that wants to acquire the same lock it is beneficial to yield the timeslice of the virtual cpu in favour of the cpu that has the lock (directed yield). With CONFIG_PREEMPT="n" this can be implemented by the architecture without common code changes. Powerpc already does this. With CONFIG_PREEMPT="y" the lock loops are coded with _raw_spin_trylock, _raw_read_trylock and _raw_write_trylock in kernel/spinlock.c. If the lock could not be taken cpu_relax is called. A directed yield is not possible because cpu_relax doesn't know anything about the lock. To be able to yield the lock in favour of the current lock holder variants of cpu_relax for spinlocks and rw-locks are needed. The new _raw_spin_relax, _raw_read_relax and _raw_write_relax primitives differ from cpu_relax insofar that they have an argument: a pointer to the lock structure. Signed-off-by: Martin Schwidefsky Cc: Ingo Molnar Cc: Paul Mackerras Cc: Haavard Skinnemoen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/spinlock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/spinlock.c b/kernel/spinlock.c index d48143eafbfd..476c3741511b 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -215,7 +215,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ - cpu_relax(); \ + _raw_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ } \ @@ -237,7 +237,7 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ if (!(lock)->break_lock) \ (lock)->break_lock = 1; \ while (!op##_can_lock(lock) && (lock)->break_lock) \ - cpu_relax(); \ + _raw_##op##_relax(&lock->raw_lock); \ } \ (lock)->break_lock = 0; \ return flags; \ -- cgit v1.2.3 From 4c7ee8de956fc250fe31e2fa91f6da980fabe317 Mon Sep 17 00:00:00 2001 From: john stultz Date: Sat, 30 Sep 2006 23:28:22 -0700 Subject: [PATCH] NTP: Move all the NTP related code to ntp.c Move all the NTP related code to ntp.c [akpm@osdl.org: cleanups, build fix] Signed-off-by: John Stultz Cc: Ingo Molnar Cc: Roman Zippel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time.c | 173 ----------------------- kernel/time/Makefile | 2 +- kernel/time/ntp.c | 389 +++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/timer.c | 211 +--------------------------- 4 files changed, 391 insertions(+), 384 deletions(-) create mode 100644 kernel/time/ntp.c (limited to 'kernel') diff --git a/kernel/time.c b/kernel/time.c index 5bd489747643..0e017bff4c19 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -202,179 +202,6 @@ asmlinkage long sys_settimeofday(struct timeval __user *tv, return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); } -/* we call this to notify the arch when the clock is being - * controlled. If no such arch routine, do nothing. - */ -void __attribute__ ((weak)) notify_arch_cmos_timer(void) -{ - return; -} - -/* adjtimex mainly allows reading (and writing, if superuser) of - * kernel time-keeping variables. used by xntpd. - */ -int do_adjtimex(struct timex *txc) -{ - long ltemp, mtemp, save_adjust; - int result; - - /* In order to modify anything, you gotta be super-user! */ - if (txc->modes && !capable(CAP_SYS_TIME)) - return -EPERM; - - /* Now we validate the data before disabling interrupts */ - - if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) - /* singleshot must not be used with any other mode bits */ - if (txc->modes != ADJ_OFFSET_SINGLESHOT) - return -EINVAL; - - if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) - /* adjustment Offset limited to +- .512 seconds */ - if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE ) - return -EINVAL; - - /* if the quartz is off by more than 10% something is VERY wrong ! */ - if (txc->modes & ADJ_TICK) - if (txc->tick < 900000/USER_HZ || - txc->tick > 1100000/USER_HZ) - return -EINVAL; - - write_seqlock_irq(&xtime_lock); - result = time_state; /* mostly `TIME_OK' */ - - /* Save for later - semantics of adjtime is to return old value */ - save_adjust = time_next_adjust ? time_next_adjust : time_adjust; - -#if 0 /* STA_CLOCKERR is never set yet */ - time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */ -#endif - /* If there are input parameters, then process them */ - if (txc->modes) - { - if (txc->modes & ADJ_STATUS) /* only set allowed bits */ - time_status = (txc->status & ~STA_RONLY) | - (time_status & STA_RONLY); - - if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ - if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { - result = -EINVAL; - goto leave; - } - time_freq = txc->freq; - } - - if (txc->modes & ADJ_MAXERROR) { - if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { - result = -EINVAL; - goto leave; - } - time_maxerror = txc->maxerror; - } - - if (txc->modes & ADJ_ESTERROR) { - if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { - result = -EINVAL; - goto leave; - } - time_esterror = txc->esterror; - } - - if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ - if (txc->constant < 0) { /* NTP v4 uses values > 6 */ - result = -EINVAL; - goto leave; - } - time_constant = txc->constant; - } - - if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ - if (txc->modes == ADJ_OFFSET_SINGLESHOT) { - /* adjtime() is independent from ntp_adjtime() */ - if ((time_next_adjust = txc->offset) == 0) - time_adjust = 0; - } - else if (time_status & STA_PLL) { - ltemp = txc->offset; - - /* - * Scale the phase adjustment and - * clamp to the operating range. - */ - if (ltemp > MAXPHASE) - time_offset = MAXPHASE << SHIFT_UPDATE; - else if (ltemp < -MAXPHASE) - time_offset = -(MAXPHASE << SHIFT_UPDATE); - else - time_offset = ltemp << SHIFT_UPDATE; - - /* - * Select whether the frequency is to be controlled - * and in which mode (PLL or FLL). Clamp to the operating - * range. Ugly multiply/divide should be replaced someday. - */ - - if (time_status & STA_FREQHOLD || time_reftime == 0) - time_reftime = xtime.tv_sec; - mtemp = xtime.tv_sec - time_reftime; - time_reftime = xtime.tv_sec; - if (time_status & STA_FLL) { - if (mtemp >= MINSEC) { - ltemp = (time_offset / mtemp) << (SHIFT_USEC - - SHIFT_UPDATE); - time_freq += shift_right(ltemp, SHIFT_KH); - } else /* calibration interval too short (p. 12) */ - result = TIME_ERROR; - } else { /* PLL mode */ - if (mtemp < MAXSEC) { - ltemp *= mtemp; - time_freq += shift_right(ltemp,(time_constant + - time_constant + - SHIFT_KF - SHIFT_USEC)); - } else /* calibration interval too long (p. 12) */ - result = TIME_ERROR; - } - time_freq = min(time_freq, time_tolerance); - time_freq = max(time_freq, -time_tolerance); - } /* STA_PLL */ - } /* txc->modes & ADJ_OFFSET */ - if (txc->modes & ADJ_TICK) { - tick_usec = txc->tick; - tick_nsec = TICK_USEC_TO_NSEC(tick_usec); - } - } /* txc->modes */ -leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) - result = TIME_ERROR; - - if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) - txc->offset = save_adjust; - else { - txc->offset = shift_right(time_offset, SHIFT_UPDATE); - } - txc->freq = time_freq; - txc->maxerror = time_maxerror; - txc->esterror = time_esterror; - txc->status = time_status; - txc->constant = time_constant; - txc->precision = time_precision; - txc->tolerance = time_tolerance; - txc->tick = tick_usec; - - /* PPS is not implemented, so these are zero */ - txc->ppsfreq = 0; - txc->jitter = 0; - txc->shift = 0; - txc->stabil = 0; - txc->jitcnt = 0; - txc->calcnt = 0; - txc->errcnt = 0; - txc->stbcnt = 0; - write_sequnlock_irq(&xtime_lock); - do_gettimeofday(&txc->time); - notify_arch_cmos_timer(); - return(result); -} - asmlinkage long sys_adjtimex(struct timex __user *txc_p) { struct timex txc; /* Local copy of parameter */ diff --git a/kernel/time/Makefile b/kernel/time/Makefile index e1dfd8e86cce..61a3907d16fb 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile @@ -1 +1 @@ -obj-y += clocksource.o jiffies.o +obj-y += ntp.o clocksource.o jiffies.o diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c new file mode 100644 index 000000000000..8ccce15b4b23 --- /dev/null +++ b/kernel/time/ntp.c @@ -0,0 +1,389 @@ +/* + * linux/kernel/time/ntp.c + * + * NTP state machine interfaces and logic. + * + * This code was mainly moved from kernel/timer.c and kernel/time.c + * Please see those files for relevant copyright info and historical + * changelogs. + */ + +#include +#include +#include + +#include +#include + +/* Don't completely fail for HZ > 500. */ +int tickadj = 500/HZ ? : 1; /* microsecs */ + +/* + * phase-lock loop variables + */ +/* TIME_ERROR prevents overwriting the CMOS clock */ +int time_state = TIME_OK; /* clock synchronization status */ +int time_status = STA_UNSYNC; /* clock status bits */ +long time_offset; /* time adjustment (us) */ +long time_constant = 2; /* pll time constant */ +long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */ +long time_precision = 1; /* clock precision (us) */ +long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ +long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ +long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; + /* frequency offset (scaled ppm)*/ +static long time_adj; /* tick adjust (scaled 1 / HZ) */ +long time_reftime; /* time at last adjustment (s) */ +long time_adjust; +long time_next_adjust; + +/* + * this routine handles the overflow of the microsecond field + * + * The tricky bits of code to handle the accurate clock support + * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. + * They were originally developed for SUN and DEC kernels. + * All the kudos should go to Dave for this stuff. + */ +void second_overflow(void) +{ + long ltemp; + + /* Bump the maxerror field */ + time_maxerror += time_tolerance >> SHIFT_USEC; + if (time_maxerror > NTP_PHASE_LIMIT) { + time_maxerror = NTP_PHASE_LIMIT; + time_status |= STA_UNSYNC; + } + + /* + * Leap second processing. If in leap-insert state at the end of the + * day, the system clock is set back one second; if in leap-delete + * state, the system clock is set ahead one second. The microtime() + * routine or external clock driver will insure that reported time is + * always monotonic. The ugly divides should be replaced. + */ + switch (time_state) { + case TIME_OK: + if (time_status & STA_INS) + time_state = TIME_INS; + else if (time_status & STA_DEL) + time_state = TIME_DEL; + break; + case TIME_INS: + if (xtime.tv_sec % 86400 == 0) { + xtime.tv_sec--; + wall_to_monotonic.tv_sec++; + /* + * The timer interpolator will make time change + * gradually instead of an immediate jump by one second + */ + time_interpolator_update(-NSEC_PER_SEC); + time_state = TIME_OOP; + clock_was_set(); + printk(KERN_NOTICE "Clock: inserting leap second " + "23:59:60 UTC\n"); + } + break; + case TIME_DEL: + if ((xtime.tv_sec + 1) % 86400 == 0) { + xtime.tv_sec++; + wall_to_monotonic.tv_sec--; + /* + * Use of time interpolator for a gradual change of + * time + */ + time_interpolator_update(NSEC_PER_SEC); + time_state = TIME_WAIT; + clock_was_set(); + printk(KERN_NOTICE "Clock: deleting leap second " + "23:59:59 UTC\n"); + } + break; + case TIME_OOP: + time_state = TIME_WAIT; + break; + case TIME_WAIT: + if (!(time_status & (STA_INS | STA_DEL))) + time_state = TIME_OK; + } + + /* + * Compute the phase adjustment for the next second. In PLL mode, the + * offset is reduced by a fixed factor times the time constant. In FLL + * mode the offset is used directly. In either mode, the maximum phase + * adjustment for each second is clamped so as to spread the adjustment + * over not more than the number of seconds between updates. + */ + ltemp = time_offset; + if (!(time_status & STA_FLL)) + ltemp = shift_right(ltemp, SHIFT_KG + time_constant); + ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE); + ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE); + time_offset -= ltemp; + time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); + + /* + * Compute the frequency estimate and additional phase adjustment due + * to frequency error for the next second. + */ + ltemp = time_freq; + time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); + +#if HZ == 100 + /* + * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to + * get 128.125; => only 0.125% error (p. 14) + */ + time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5); +#endif +#if HZ == 250 + /* + * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and + * 0.78125% to get 255.85938; => only 0.05% error (p. 14) + */ + time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); +#endif +#if HZ == 1000 + /* + * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and + * 0.78125% to get 1023.4375; => only 0.05% error (p. 14) + */ + time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); +#endif +} + +/* + * Returns how many microseconds we need to add to xtime this tick + * in doing an adjustment requested with adjtime. + */ +static long adjtime_adjustment(void) +{ + long time_adjust_step; + + time_adjust_step = time_adjust; + if (time_adjust_step) { + /* + * We are doing an adjtime thing. Prepare time_adjust_step to + * be within bounds. Note that a positive time_adjust means we + * want the clock to run faster. + * + * Limit the amount of the step to be in the range + * -tickadj .. +tickadj + */ + time_adjust_step = min(time_adjust_step, (long)tickadj); + time_adjust_step = max(time_adjust_step, (long)-tickadj); + } + return time_adjust_step; +} + +/* in the NTP reference this is called "hardclock()" */ +void update_ntp_one_tick(void) +{ + long time_adjust_step; + + time_adjust_step = adjtime_adjustment(); + if (time_adjust_step) + /* Reduce by this step the amount of time left */ + time_adjust -= time_adjust_step; + + /* Changes by adjtime() do not take effect till next tick. */ + if (time_next_adjust != 0) { + time_adjust = time_next_adjust; + time_next_adjust = 0; + } +} + +/* + * Return how long ticks are at the moment, that is, how much time + * update_wall_time_one_tick will add to xtime next time we call it + * (assuming no calls to do_adjtimex in the meantime). + * The return value is in fixed-point nanoseconds shifted by the + * specified number of bits to the right of the binary point. + * This function has no side-effects. + */ +u64 current_tick_length(void) +{ + long delta_nsec; + u64 ret; + + /* calculate the finest interval NTP will allow. + * ie: nanosecond value shifted by (SHIFT_SCALE - 10) + */ + delta_nsec = tick_nsec + adjtime_adjustment() * 1000; + ret = (u64)delta_nsec << TICK_LENGTH_SHIFT; + ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10)); + + return ret; +} + + +void __attribute__ ((weak)) notify_arch_cmos_timer(void) +{ + return; +} + +/* adjtimex mainly allows reading (and writing, if superuser) of + * kernel time-keeping variables. used by xntpd. + */ +int do_adjtimex(struct timex *txc) +{ + long ltemp, mtemp, save_adjust; + int result; + + /* In order to modify anything, you gotta be super-user! */ + if (txc->modes && !capable(CAP_SYS_TIME)) + return -EPERM; + + /* Now we validate the data before disabling interrupts */ + + if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) + /* singleshot must not be used with any other mode bits */ + if (txc->modes != ADJ_OFFSET_SINGLESHOT) + return -EINVAL; + + if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) + /* adjustment Offset limited to +- .512 seconds */ + if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE ) + return -EINVAL; + + /* if the quartz is off by more than 10% something is VERY wrong ! */ + if (txc->modes & ADJ_TICK) + if (txc->tick < 900000/USER_HZ || + txc->tick > 1100000/USER_HZ) + return -EINVAL; + + write_seqlock_irq(&xtime_lock); + result = time_state; /* mostly `TIME_OK' */ + + /* Save for later - semantics of adjtime is to return old value */ + save_adjust = time_next_adjust ? time_next_adjust : time_adjust; + +#if 0 /* STA_CLOCKERR is never set yet */ + time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */ +#endif + /* If there are input parameters, then process them */ + if (txc->modes) + { + if (txc->modes & ADJ_STATUS) /* only set allowed bits */ + time_status = (txc->status & ~STA_RONLY) | + (time_status & STA_RONLY); + + if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ + if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { + result = -EINVAL; + goto leave; + } + time_freq = txc->freq; + } + + if (txc->modes & ADJ_MAXERROR) { + if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { + result = -EINVAL; + goto leave; + } + time_maxerror = txc->maxerror; + } + + if (txc->modes & ADJ_ESTERROR) { + if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { + result = -EINVAL; + goto leave; + } + time_esterror = txc->esterror; + } + + if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ + if (txc->constant < 0) { /* NTP v4 uses values > 6 */ + result = -EINVAL; + goto leave; + } + time_constant = txc->constant; + } + + if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ + if (txc->modes == ADJ_OFFSET_SINGLESHOT) { + /* adjtime() is independent from ntp_adjtime() */ + if ((time_next_adjust = txc->offset) == 0) + time_adjust = 0; + } + else if (time_status & STA_PLL) { + ltemp = txc->offset; + + /* + * Scale the phase adjustment and + * clamp to the operating range. + */ + if (ltemp > MAXPHASE) + time_offset = MAXPHASE << SHIFT_UPDATE; + else if (ltemp < -MAXPHASE) + time_offset = -(MAXPHASE << SHIFT_UPDATE); + else + time_offset = ltemp << SHIFT_UPDATE; + + /* + * Select whether the frequency is to be controlled + * and in which mode (PLL or FLL). Clamp to the operating + * range. Ugly multiply/divide should be replaced someday. + */ + + if (time_status & STA_FREQHOLD || time_reftime == 0) + time_reftime = xtime.tv_sec; + mtemp = xtime.tv_sec - time_reftime; + time_reftime = xtime.tv_sec; + if (time_status & STA_FLL) { + if (mtemp >= MINSEC) { + ltemp = (time_offset / mtemp) << (SHIFT_USEC - + SHIFT_UPDATE); + time_freq += shift_right(ltemp, SHIFT_KH); + } else /* calibration interval too short (p. 12) */ + result = TIME_ERROR; + } else { /* PLL mode */ + if (mtemp < MAXSEC) { + ltemp *= mtemp; + time_freq += shift_right(ltemp,(time_constant + + time_constant + + SHIFT_KF - SHIFT_USEC)); + } else /* calibration interval too long (p. 12) */ + result = TIME_ERROR; + } + time_freq = min(time_freq, time_tolerance); + time_freq = max(time_freq, -time_tolerance); + } /* STA_PLL */ + } /* txc->modes & ADJ_OFFSET */ + if (txc->modes & ADJ_TICK) { + tick_usec = txc->tick; + tick_nsec = TICK_USEC_TO_NSEC(tick_usec); + } + } /* txc->modes */ +leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) + result = TIME_ERROR; + + if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) + txc->offset = save_adjust; + else { + txc->offset = shift_right(time_offset, SHIFT_UPDATE); + } + txc->freq = time_freq; + txc->maxerror = time_maxerror; + txc->esterror = time_esterror; + txc->status = time_status; + txc->constant = time_constant; + txc->precision = time_precision; + txc->tolerance = time_tolerance; + txc->tick = tick_usec; + + /* PPS is not implemented, so these are zero */ + txc->ppsfreq = 0; + txc->jitter = 0; + txc->shift = 0; + txc->stabil = 0; + txc->jitcnt = 0; + txc->calcnt = 0; + txc->errcnt = 0; + txc->stbcnt = 0; + write_sequnlock_irq(&xtime_lock); + do_gettimeofday(&txc->time); + notify_arch_cmos_timer(); + return(result); +} diff --git a/kernel/timer.c b/kernel/timer.c index 4f55622b0d38..5fccc7cbf3b4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -41,12 +41,6 @@ #include #include -#ifdef CONFIG_TIME_INTERPOLATION -static void time_interpolator_update(long delta_nsec); -#else -#define time_interpolator_update(x) -#endif - u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; EXPORT_SYMBOL(jiffies_64); @@ -587,209 +581,6 @@ struct timespec wall_to_monotonic __attribute__ ((aligned (16))); EXPORT_SYMBOL(xtime); -/* Don't completely fail for HZ > 500. */ -int tickadj = 500/HZ ? : 1; /* microsecs */ - - -/* - * phase-lock loop variables - */ -/* TIME_ERROR prevents overwriting the CMOS clock */ -int time_state = TIME_OK; /* clock synchronization status */ -int time_status = STA_UNSYNC; /* clock status bits */ -long time_offset; /* time adjustment (us) */ -long time_constant = 2; /* pll time constant */ -long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */ -long time_precision = 1; /* clock precision (us) */ -long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ -long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ -long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; - /* frequency offset (scaled ppm)*/ -static long time_adj; /* tick adjust (scaled 1 / HZ) */ -long time_reftime; /* time at last adjustment (s) */ -long time_adjust; -long time_next_adjust; - -/* - * this routine handles the overflow of the microsecond field - * - * The tricky bits of code to handle the accurate clock support - * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. - * They were originally developed for SUN and DEC kernels. - * All the kudos should go to Dave for this stuff. - * - */ -static void second_overflow(void) -{ - long ltemp; - - /* Bump the maxerror field */ - time_maxerror += time_tolerance >> SHIFT_USEC; - if (time_maxerror > NTP_PHASE_LIMIT) { - time_maxerror = NTP_PHASE_LIMIT; - time_status |= STA_UNSYNC; - } - - /* - * Leap second processing. If in leap-insert state at the end of the - * day, the system clock is set back one second; if in leap-delete - * state, the system clock is set ahead one second. The microtime() - * routine or external clock driver will insure that reported time is - * always monotonic. The ugly divides should be replaced. - */ - switch (time_state) { - case TIME_OK: - if (time_status & STA_INS) - time_state = TIME_INS; - else if (time_status & STA_DEL) - time_state = TIME_DEL; - break; - case TIME_INS: - if (xtime.tv_sec % 86400 == 0) { - xtime.tv_sec--; - wall_to_monotonic.tv_sec++; - /* - * The timer interpolator will make time change - * gradually instead of an immediate jump by one second - */ - time_interpolator_update(-NSEC_PER_SEC); - time_state = TIME_OOP; - clock_was_set(); - printk(KERN_NOTICE "Clock: inserting leap second " - "23:59:60 UTC\n"); - } - break; - case TIME_DEL: - if ((xtime.tv_sec + 1) % 86400 == 0) { - xtime.tv_sec++; - wall_to_monotonic.tv_sec--; - /* - * Use of time interpolator for a gradual change of - * time - */ - time_interpolator_update(NSEC_PER_SEC); - time_state = TIME_WAIT; - clock_was_set(); - printk(KERN_NOTICE "Clock: deleting leap second " - "23:59:59 UTC\n"); - } - break; - case TIME_OOP: - time_state = TIME_WAIT; - break; - case TIME_WAIT: - if (!(time_status & (STA_INS | STA_DEL))) - time_state = TIME_OK; - } - - /* - * Compute the phase adjustment for the next second. In PLL mode, the - * offset is reduced by a fixed factor times the time constant. In FLL - * mode the offset is used directly. In either mode, the maximum phase - * adjustment for each second is clamped so as to spread the adjustment - * over not more than the number of seconds between updates. - */ - ltemp = time_offset; - if (!(time_status & STA_FLL)) - ltemp = shift_right(ltemp, SHIFT_KG + time_constant); - ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE); - ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE); - time_offset -= ltemp; - time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); - - /* - * Compute the frequency estimate and additional phase adjustment due - * to frequency error for the next second. - */ - ltemp = time_freq; - time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); - -#if HZ == 100 - /* - * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to - * get 128.125; => only 0.125% error (p. 14) - */ - time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5); -#endif -#if HZ == 250 - /* - * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and - * 0.78125% to get 255.85938; => only 0.05% error (p. 14) - */ - time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); -#endif -#if HZ == 1000 - /* - * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and - * 0.78125% to get 1023.4375; => only 0.05% error (p. 14) - */ - time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); -#endif -} - -/* - * Returns how many microseconds we need to add to xtime this tick - * in doing an adjustment requested with adjtime. - */ -static long adjtime_adjustment(void) -{ - long time_adjust_step; - - time_adjust_step = time_adjust; - if (time_adjust_step) { - /* - * We are doing an adjtime thing. Prepare time_adjust_step to - * be within bounds. Note that a positive time_adjust means we - * want the clock to run faster. - * - * Limit the amount of the step to be in the range - * -tickadj .. +tickadj - */ - time_adjust_step = min(time_adjust_step, (long)tickadj); - time_adjust_step = max(time_adjust_step, (long)-tickadj); - } - return time_adjust_step; -} - -/* in the NTP reference this is called "hardclock()" */ -static void update_ntp_one_tick(void) -{ - long time_adjust_step; - - time_adjust_step = adjtime_adjustment(); - if (time_adjust_step) - /* Reduce by this step the amount of time left */ - time_adjust -= time_adjust_step; - - /* Changes by adjtime() do not take effect till next tick. */ - if (time_next_adjust != 0) { - time_adjust = time_next_adjust; - time_next_adjust = 0; - } -} - -/* - * Return how long ticks are at the moment, that is, how much time - * update_wall_time_one_tick will add to xtime next time we call it - * (assuming no calls to do_adjtimex in the meantime). - * The return value is in fixed-point nanoseconds shifted by the - * specified number of bits to the right of the binary point. - * This function has no side-effects. - */ -u64 current_tick_length(void) -{ - long delta_nsec; - u64 ret; - - /* calculate the finest interval NTP will allow. - * ie: nanosecond value shifted by (SHIFT_SCALE - 10) - */ - delta_nsec = tick_nsec + adjtime_adjustment() * 1000; - ret = (u64)delta_nsec << TICK_LENGTH_SHIFT; - ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10)); - - return ret; -} /* XXX - all of this timekeeping code should be later moved to time.c */ #include @@ -1775,7 +1566,7 @@ unsigned long time_interpolator_get_offset(void) #define INTERPOLATOR_ADJUST 65536 #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST -static void time_interpolator_update(long delta_nsec) +void time_interpolator_update(long delta_nsec) { u64 counter; unsigned long offset; -- cgit v1.2.3 From b0ee75561beadc4db4d9a899c8ef4a7db50aa0ab Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sat, 30 Sep 2006 23:28:22 -0700 Subject: [PATCH] ntp: add ntp_update_frequency This introduces ntp_update_frequency() and deinlines ntp_clear() (as it's not performance critical). ntp_update_frequency() calculates the base tick length using tick_usec and adds a base adjustment, in case the frequency doesn't divide evenly by HZ. Signed-off-by: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 50 ++++++++++++++++++++++++++++++++++++++++++++------ kernel/timer.c | 11 ++++------- 2 files changed, 48 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 8ccce15b4b23..77137bec2aea 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -15,6 +15,13 @@ #include #include +/* + * Timekeeping variables + */ +unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ +unsigned long tick_nsec; /* ACTHZ period (nsec) */ +static u64 tick_length, tick_length_base; + /* Don't completely fail for HZ > 500. */ int tickadj = 500/HZ ? : 1; /* microsecs */ @@ -37,6 +44,36 @@ long time_reftime; /* time at last adjustment (s) */ long time_adjust; long time_next_adjust; +/** + * ntp_clear - Clears the NTP state variables + * + * Must be called while holding a write on the xtime_lock + */ +void ntp_clear(void) +{ + time_adjust = 0; /* stop active adjtime() */ + time_status |= STA_UNSYNC; + time_maxerror = NTP_PHASE_LIMIT; + time_esterror = NTP_PHASE_LIMIT; + + ntp_update_frequency(); + + tick_length = tick_length_base; +} + +#define CLOCK_TICK_OVERFLOW (LATCH * HZ - CLOCK_TICK_RATE) +#define CLOCK_TICK_ADJUST (((s64)CLOCK_TICK_OVERFLOW * NSEC_PER_SEC) / (s64)CLOCK_TICK_RATE) + +void ntp_update_frequency(void) +{ + tick_length_base = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << TICK_LENGTH_SHIFT; + tick_length_base += (s64)CLOCK_TICK_ADJUST << TICK_LENGTH_SHIFT; + + do_div(tick_length_base, HZ); + + tick_nsec = tick_length_base >> TICK_LENGTH_SHIFT; +} + /* * this routine handles the overflow of the microsecond field * @@ -151,6 +188,7 @@ void second_overflow(void) */ time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); #endif + tick_length = tick_length_base; } /* @@ -204,14 +242,13 @@ void update_ntp_one_tick(void) */ u64 current_tick_length(void) { - long delta_nsec; u64 ret; /* calculate the finest interval NTP will allow. * ie: nanosecond value shifted by (SHIFT_SCALE - 10) */ - delta_nsec = tick_nsec + adjtime_adjustment() * 1000; - ret = (u64)delta_nsec << TICK_LENGTH_SHIFT; + ret = tick_length; + ret += (u64)(adjtime_adjustment() * 1000) << TICK_LENGTH_SHIFT; ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10)); return ret; @@ -351,10 +388,11 @@ int do_adjtimex(struct timex *txc) time_freq = max(time_freq, -time_tolerance); } /* STA_PLL */ } /* txc->modes & ADJ_OFFSET */ - if (txc->modes & ADJ_TICK) { + if (txc->modes & ADJ_TICK) tick_usec = txc->tick; - tick_nsec = TICK_USEC_TO_NSEC(tick_usec); - } + + if (txc->modes & ADJ_TICK) + ntp_update_frequency(); } /* txc->modes */ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) result = TIME_ERROR; diff --git a/kernel/timer.c b/kernel/timer.c index 5fccc7cbf3b4..78d3fa10fcd6 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -562,12 +562,6 @@ found: /******************************************************************/ -/* - * Timekeeping variables - */ -unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ -unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */ - /* * The current time * wall_to_monotonic is what we need to add to xtime (or xtime corrected @@ -757,10 +751,13 @@ void __init timekeeping_init(void) unsigned long flags; write_seqlock_irqsave(&xtime_lock, flags); + + ntp_clear(); + clock = clocksource_get_next(); clocksource_calculate_interval(clock, tick_nsec); clock->cycle_last = clocksource_read(clock); - ntp_clear(); + write_sequnlock_irqrestore(&xtime_lock, flags); } -- cgit v1.2.3 From ab8783b688f33c40ed7b37b814a4a1e7d341ce11 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sat, 30 Sep 2006 23:28:23 -0700 Subject: [PATCH] ntp: add time_adj to tick length This makes time_adj local to second_overflow() and integrates it into the tick length instead of adding it everytime. Signed-off-by: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 77137bec2aea..c09628d6b848 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -39,7 +39,6 @@ long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; /* frequency offset (scaled ppm)*/ -static long time_adj; /* tick adjust (scaled 1 / HZ) */ long time_reftime; /* time at last adjustment (s) */ long time_adjust; long time_next_adjust; @@ -84,7 +83,7 @@ void ntp_update_frequency(void) */ void second_overflow(void) { - long ltemp; + long ltemp, time_adj; /* Bump the maxerror field */ time_maxerror += time_tolerance >> SHIFT_USEC; @@ -189,6 +188,7 @@ void second_overflow(void) time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); #endif tick_length = tick_length_base; + tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10)); } /* @@ -245,11 +245,9 @@ u64 current_tick_length(void) u64 ret; /* calculate the finest interval NTP will allow. - * ie: nanosecond value shifted by (SHIFT_SCALE - 10) */ ret = tick_length; ret += (u64)(adjtime_adjustment() * 1000) << TICK_LENGTH_SHIFT; - ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10)); return ret; } -- cgit v1.2.3 From dc6a43e46f1b6de22701f97bec022e97088cfa90 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sat, 30 Sep 2006 23:28:24 -0700 Subject: [PATCH] ntp: add time_freq to tick length This adds the frequency part to ntp_update_frequency(). Signed-off-by: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index c09628d6b848..ab21eb06e09b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -37,8 +37,7 @@ long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */ long time_precision = 1; /* clock precision (us) */ long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ -long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC; - /* frequency offset (scaled ppm)*/ +long time_freq; /* frequency offset (scaled ppm)*/ long time_reftime; /* time at last adjustment (s) */ long time_adjust; long time_next_adjust; @@ -67,6 +66,7 @@ void ntp_update_frequency(void) { tick_length_base = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << TICK_LENGTH_SHIFT; tick_length_base += (s64)CLOCK_TICK_ADJUST << TICK_LENGTH_SHIFT; + tick_length_base += ((s64)time_freq * NSEC_PER_USEC) << (TICK_LENGTH_SHIFT - SHIFT_USEC); do_div(tick_length_base, HZ); @@ -163,8 +163,6 @@ void second_overflow(void) * Compute the frequency estimate and additional phase adjustment due * to frequency error for the next second. */ - ltemp = time_freq; - time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); #if HZ == 100 /* @@ -389,7 +387,7 @@ int do_adjtimex(struct timex *txc) if (txc->modes & ADJ_TICK) tick_usec = txc->tick; - if (txc->modes & ADJ_TICK) + if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) ntp_update_frequency(); } /* txc->modes */ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) -- cgit v1.2.3 From 3d3675cc3d04d7fd4bb11e8c1ea79e5ade4f5e44 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sat, 30 Sep 2006 23:28:25 -0700 Subject: [PATCH] ntp: prescale time_offset This converts time_offset into a scaled per tick value. This avoids now completely the crude compensation in second_overflow(). Signed-off-by: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 64 ++++++++++++++----------------------------------------- 1 file changed, 16 insertions(+), 48 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index ab21eb06e09b..238ce47ef09d 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -31,7 +31,7 @@ int tickadj = 500/HZ ? : 1; /* microsecs */ /* TIME_ERROR prevents overwriting the CMOS clock */ int time_state = TIME_OK; /* clock synchronization status */ int time_status = STA_UNSYNC; /* clock status bits */ -long time_offset; /* time adjustment (us) */ +long time_offset; /* time adjustment (ns) */ long time_constant = 2; /* pll time constant */ long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */ long time_precision = 1; /* clock precision (us) */ @@ -57,6 +57,7 @@ void ntp_clear(void) ntp_update_frequency(); tick_length = tick_length_base; + time_offset = 0; } #define CLOCK_TICK_OVERFLOW (LATCH * HZ - CLOCK_TICK_RATE) @@ -83,7 +84,7 @@ void ntp_update_frequency(void) */ void second_overflow(void) { - long ltemp, time_adj; + long time_adj; /* Bump the maxerror field */ time_maxerror += time_tolerance >> SHIFT_USEC; @@ -151,42 +152,14 @@ void second_overflow(void) * adjustment for each second is clamped so as to spread the adjustment * over not more than the number of seconds between updates. */ - ltemp = time_offset; - if (!(time_status & STA_FLL)) - ltemp = shift_right(ltemp, SHIFT_KG + time_constant); - ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE); - ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE); - time_offset -= ltemp; - time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); - - /* - * Compute the frequency estimate and additional phase adjustment due - * to frequency error for the next second. - */ - -#if HZ == 100 - /* - * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to - * get 128.125; => only 0.125% error (p. 14) - */ - time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5); -#endif -#if HZ == 250 - /* - * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and - * 0.78125% to get 255.85938; => only 0.05% error (p. 14) - */ - time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); -#endif -#if HZ == 1000 - /* - * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and - * 0.78125% to get 1023.4375; => only 0.05% error (p. 14) - */ - time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7); -#endif tick_length = tick_length_base; - tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10)); + time_adj = time_offset; + if (!(time_status & STA_FLL)) + time_adj = shift_right(time_adj, SHIFT_KG + time_constant); + time_adj = min(time_adj, -((MAXPHASE / HZ) << SHIFT_UPDATE) / MINSEC); + time_adj = max(time_adj, ((MAXPHASE / HZ) << SHIFT_UPDATE) / MINSEC); + time_offset -= time_adj; + tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE); } /* @@ -347,12 +320,8 @@ int do_adjtimex(struct timex *txc) * Scale the phase adjustment and * clamp to the operating range. */ - if (ltemp > MAXPHASE) - time_offset = MAXPHASE << SHIFT_UPDATE; - else if (ltemp < -MAXPHASE) - time_offset = -(MAXPHASE << SHIFT_UPDATE); - else - time_offset = ltemp << SHIFT_UPDATE; + time_offset = min(ltemp, MAXPHASE); + time_offset = max(time_offset, -MAXPHASE); /* * Select whether the frequency is to be controlled @@ -366,8 +335,7 @@ int do_adjtimex(struct timex *txc) time_reftime = xtime.tv_sec; if (time_status & STA_FLL) { if (mtemp >= MINSEC) { - ltemp = (time_offset / mtemp) << (SHIFT_USEC - - SHIFT_UPDATE); + ltemp = ((time_offset << 12) / mtemp) << (SHIFT_USEC - 12); time_freq += shift_right(ltemp, SHIFT_KH); } else /* calibration interval too short (p. 12) */ result = TIME_ERROR; @@ -382,6 +350,7 @@ int do_adjtimex(struct timex *txc) } time_freq = min(time_freq, time_tolerance); time_freq = max(time_freq, -time_tolerance); + time_offset = (time_offset * NSEC_PER_USEC / HZ) << SHIFT_UPDATE; } /* STA_PLL */ } /* txc->modes & ADJ_OFFSET */ if (txc->modes & ADJ_TICK) @@ -395,9 +364,8 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) txc->offset = save_adjust; - else { - txc->offset = shift_right(time_offset, SHIFT_UPDATE); - } + else + txc->offset = shift_right(time_offset, SHIFT_UPDATE) * HZ / 1000; txc->freq = time_freq; txc->maxerror = time_maxerror; txc->esterror = time_esterror; -- cgit v1.2.3 From 8f807f8d2137ba728d22820103131038639b68a9 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sat, 30 Sep 2006 23:28:25 -0700 Subject: [PATCH] ntp: add time_adjust to tick length This folds update_ntp_one_tick() into second_overflow() and adds time_adjust to the tick length, this makes time_next_adjust unnecessary. This slightly changes the adjtime() behaviour, instead of applying it to the next tick, it's applied to the next second. Signed-off-by: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 71 ++++++++++++++----------------------------------------- kernel/timer.c | 2 -- 2 files changed, 18 insertions(+), 55 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 238ce47ef09d..65223b7ed810 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -22,8 +22,9 @@ unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ unsigned long tick_nsec; /* ACTHZ period (nsec) */ static u64 tick_length, tick_length_base; -/* Don't completely fail for HZ > 500. */ -int tickadj = 500/HZ ? : 1; /* microsecs */ +#define MAX_TICKADJ 500 /* microsecs */ +#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ + TICK_LENGTH_SHIFT) / HZ) /* * phase-lock loop variables @@ -40,7 +41,6 @@ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ long time_freq; /* frequency offset (scaled ppm)*/ long time_reftime; /* time at last adjustment (s) */ long time_adjust; -long time_next_adjust; /** * ntp_clear - Clears the NTP state variables @@ -160,46 +160,19 @@ void second_overflow(void) time_adj = max(time_adj, ((MAXPHASE / HZ) << SHIFT_UPDATE) / MINSEC); time_offset -= time_adj; tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE); -} - -/* - * Returns how many microseconds we need to add to xtime this tick - * in doing an adjustment requested with adjtime. - */ -static long adjtime_adjustment(void) -{ - long time_adjust_step; - - time_adjust_step = time_adjust; - if (time_adjust_step) { - /* - * We are doing an adjtime thing. Prepare time_adjust_step to - * be within bounds. Note that a positive time_adjust means we - * want the clock to run faster. - * - * Limit the amount of the step to be in the range - * -tickadj .. +tickadj - */ - time_adjust_step = min(time_adjust_step, (long)tickadj); - time_adjust_step = max(time_adjust_step, (long)-tickadj); - } - return time_adjust_step; -} -/* in the NTP reference this is called "hardclock()" */ -void update_ntp_one_tick(void) -{ - long time_adjust_step; - - time_adjust_step = adjtime_adjustment(); - if (time_adjust_step) - /* Reduce by this step the amount of time left */ - time_adjust -= time_adjust_step; - - /* Changes by adjtime() do not take effect till next tick. */ - if (time_next_adjust != 0) { - time_adjust = time_next_adjust; - time_next_adjust = 0; + if (unlikely(time_adjust)) { + if (time_adjust > MAX_TICKADJ) { + time_adjust -= MAX_TICKADJ; + tick_length += MAX_TICKADJ_SCALED; + } else if (time_adjust < -MAX_TICKADJ) { + time_adjust += MAX_TICKADJ; + tick_length -= MAX_TICKADJ_SCALED; + } else { + time_adjust = 0; + tick_length += (s64)(time_adjust * NSEC_PER_USEC / + HZ) << TICK_LENGTH_SHIFT; + } } } @@ -213,14 +186,7 @@ void update_ntp_one_tick(void) */ u64 current_tick_length(void) { - u64 ret; - - /* calculate the finest interval NTP will allow. - */ - ret = tick_length; - ret += (u64)(adjtime_adjustment() * 1000) << TICK_LENGTH_SHIFT; - - return ret; + return tick_length; } @@ -263,7 +229,7 @@ int do_adjtimex(struct timex *txc) result = time_state; /* mostly `TIME_OK' */ /* Save for later - semantics of adjtime is to return old value */ - save_adjust = time_next_adjust ? time_next_adjust : time_adjust; + save_adjust = time_adjust; #if 0 /* STA_CLOCKERR is never set yet */ time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */ @@ -310,8 +276,7 @@ int do_adjtimex(struct timex *txc) if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ if (txc->modes == ADJ_OFFSET_SINGLESHOT) { /* adjtime() is independent from ntp_adjtime() */ - if ((time_next_adjust = txc->offset) == 0) - time_adjust = 0; + time_adjust = txc->offset; } else if (time_status & STA_PLL) { ltemp = txc->offset; diff --git a/kernel/timer.c b/kernel/timer.c index 78d3fa10fcd6..654dd5df2417 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -937,8 +937,6 @@ static void update_wall_time(void) /* interpolator bits */ time_interpolator_update(clock->xtime_interval >> clock->shift); - /* increment the NTP state machine */ - update_ntp_one_tick(); /* accumulate error between NTP and clock interval */ clock->error += current_tick_length(); -- cgit v1.2.3 From 97eebe138caaf78354b1fad233e63bafdcc4fd54 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sat, 30 Sep 2006 23:28:26 -0700 Subject: [PATCH] ntp: remove time_tolerance time_tolerance isn't changed at all in the kernel, so simply remove it, this simplifies the next patch, as it avoids a number of conversions. Signed-off-by: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 65223b7ed810..af7563f5d4e2 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -34,7 +34,6 @@ int time_state = TIME_OK; /* clock synchronization status */ int time_status = STA_UNSYNC; /* clock status bits */ long time_offset; /* time adjustment (ns) */ long time_constant = 2; /* pll time constant */ -long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */ long time_precision = 1; /* clock precision (us) */ long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ @@ -87,7 +86,7 @@ void second_overflow(void) long time_adj; /* Bump the maxerror field */ - time_maxerror += time_tolerance >> SHIFT_USEC; + time_maxerror += MAXFREQ >> SHIFT_USEC; if (time_maxerror > NTP_PHASE_LIMIT) { time_maxerror = NTP_PHASE_LIMIT; time_status |= STA_UNSYNC; @@ -313,8 +312,8 @@ int do_adjtimex(struct timex *txc) } else /* calibration interval too long (p. 12) */ result = TIME_ERROR; } - time_freq = min(time_freq, time_tolerance); - time_freq = max(time_freq, -time_tolerance); + time_freq = min(time_freq, MAXFREQ); + time_freq = max(time_freq, -MAXFREQ); time_offset = (time_offset * NSEC_PER_USEC / HZ) << SHIFT_UPDATE; } /* STA_PLL */ } /* txc->modes & ADJ_OFFSET */ @@ -337,7 +336,7 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) txc->status = time_status; txc->constant = time_constant; txc->precision = time_precision; - txc->tolerance = time_tolerance; + txc->tolerance = MAXFREQ; txc->tick = tick_usec; /* PPS is not implemented, so these are zero */ -- cgit v1.2.3 From 04b617e71e363e640e88be1e43f53fa6a3afef9f Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sat, 30 Sep 2006 23:28:27 -0700 Subject: [PATCH] ntp: convert time_freq to nsec value This converts time_freq to a scaled nsec value and adds around 6bit of extra resolution. This pushes the time_freq to its 32bit limits so the calculatons have to be done with 64bit. Signed-off-by: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index af7563f5d4e2..9137b54613e0 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -66,7 +66,7 @@ void ntp_update_frequency(void) { tick_length_base = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << TICK_LENGTH_SHIFT; tick_length_base += (s64)CLOCK_TICK_ADJUST << TICK_LENGTH_SHIFT; - tick_length_base += ((s64)time_freq * NSEC_PER_USEC) << (TICK_LENGTH_SHIFT - SHIFT_USEC); + tick_length_base += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); do_div(tick_length_base, HZ); @@ -200,6 +200,7 @@ void __attribute__ ((weak)) notify_arch_cmos_timer(void) int do_adjtimex(struct timex *txc) { long ltemp, mtemp, save_adjust; + s64 freq_adj; int result; /* In order to modify anything, you gotta be super-user! */ @@ -245,7 +246,7 @@ int do_adjtimex(struct timex *txc) result = -EINVAL; goto leave; } - time_freq = txc->freq; + time_freq = ((s64)txc->freq * NSEC_PER_USEC) >> (SHIFT_USEC - SHIFT_NSEC); } if (txc->modes & ADJ_MAXERROR) { @@ -278,14 +279,14 @@ int do_adjtimex(struct timex *txc) time_adjust = txc->offset; } else if (time_status & STA_PLL) { - ltemp = txc->offset; + ltemp = txc->offset * NSEC_PER_USEC; /* * Scale the phase adjustment and * clamp to the operating range. */ - time_offset = min(ltemp, MAXPHASE); - time_offset = max(time_offset, -MAXPHASE); + time_offset = min(ltemp, MAXPHASE * NSEC_PER_USEC); + time_offset = max(time_offset, -MAXPHASE * NSEC_PER_USEC); /* * Select whether the frequency is to be controlled @@ -297,24 +298,31 @@ int do_adjtimex(struct timex *txc) time_reftime = xtime.tv_sec; mtemp = xtime.tv_sec - time_reftime; time_reftime = xtime.tv_sec; + freq_adj = 0; if (time_status & STA_FLL) { if (mtemp >= MINSEC) { - ltemp = ((time_offset << 12) / mtemp) << (SHIFT_USEC - 12); - time_freq += shift_right(ltemp, SHIFT_KH); + freq_adj = (s64)time_offset << (SHIFT_NSEC - SHIFT_KH); + if (time_offset < 0) { + freq_adj = -freq_adj; + do_div(freq_adj, mtemp); + freq_adj = -freq_adj; + } else + do_div(freq_adj, mtemp); } else /* calibration interval too short (p. 12) */ result = TIME_ERROR; } else { /* PLL mode */ if (mtemp < MAXSEC) { - ltemp *= mtemp; - time_freq += shift_right(ltemp,(time_constant + + freq_adj = (s64)ltemp * mtemp; + freq_adj = shift_right(freq_adj,(time_constant + time_constant + - SHIFT_KF - SHIFT_USEC)); + SHIFT_KF - SHIFT_NSEC)); } else /* calibration interval too long (p. 12) */ result = TIME_ERROR; } - time_freq = min(time_freq, MAXFREQ); - time_freq = max(time_freq, -MAXFREQ); - time_offset = (time_offset * NSEC_PER_USEC / HZ) << SHIFT_UPDATE; + freq_adj += time_freq; + freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); + time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC); + time_offset = (time_offset / HZ) << SHIFT_UPDATE; } /* STA_PLL */ } /* txc->modes & ADJ_OFFSET */ if (txc->modes & ADJ_TICK) @@ -330,7 +338,7 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) txc->offset = save_adjust; else txc->offset = shift_right(time_offset, SHIFT_UPDATE) * HZ / 1000; - txc->freq = time_freq; + txc->freq = (time_freq / NSEC_PER_USEC) << (SHIFT_USEC - SHIFT_NSEC); txc->maxerror = time_maxerror; txc->esterror = time_esterror; txc->status = time_status; -- cgit v1.2.3 From f19923937321244e7dc334767eb4b67e0e3d5c74 Mon Sep 17 00:00:00 2001 From: Roman Zippel Date: Sat, 30 Sep 2006 23:28:28 -0700 Subject: [PATCH] ntp: convert to the NTP4 reference model This converts the kernel ntp model into a model which matches the nanokernel reference implementations. The previous patches already increased the resolution and precision of the computations, so that this conversion becomes quite simple. explains: The original NTP kernel interface was defined in units of microseconds. That's what Linux implements. As computers have gotten faster and can now split microseconds easily, a new kernel interface using nanosecond units was defined ("the nanokernel", confusing as that name is to OS hackers), and there's an STA_NANO bit in the adjtimex() status field to tell the application which units it's using. The current ntpd supports both, but Linux loses some possible timing resolution because of quantization effects, and the ntpd hackers would really like to be able to drop the backwards compatibility code. Ulrich Windl has been maintaining a patch set to do the conversion for years, but it's hard to keep in sync. Signed-off-by: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 51 +++++++++++++++++++-------------------------------- 1 file changed, 19 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 9137b54613e0..1ab5e9d7fa50 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -145,18 +145,11 @@ void second_overflow(void) } /* - * Compute the phase adjustment for the next second. In PLL mode, the - * offset is reduced by a fixed factor times the time constant. In FLL - * mode the offset is used directly. In either mode, the maximum phase - * adjustment for each second is clamped so as to spread the adjustment - * over not more than the number of seconds between updates. + * Compute the phase adjustment for the next second. The offset is + * reduced by a fixed factor times the time constant. */ tick_length = tick_length_base; - time_adj = time_offset; - if (!(time_status & STA_FLL)) - time_adj = shift_right(time_adj, SHIFT_KG + time_constant); - time_adj = min(time_adj, -((MAXPHASE / HZ) << SHIFT_UPDATE) / MINSEC); - time_adj = max(time_adj, ((MAXPHASE / HZ) << SHIFT_UPDATE) / MINSEC); + time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); time_offset -= time_adj; tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE); @@ -200,7 +193,7 @@ void __attribute__ ((weak)) notify_arch_cmos_timer(void) int do_adjtimex(struct timex *txc) { long ltemp, mtemp, save_adjust; - s64 freq_adj; + s64 freq_adj, temp64; int result; /* In order to modify anything, you gotta be super-user! */ @@ -270,7 +263,7 @@ int do_adjtimex(struct timex *txc) result = -EINVAL; goto leave; } - time_constant = txc->constant; + time_constant = min(txc->constant + 4, (long)MAXTC); } if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ @@ -298,26 +291,20 @@ int do_adjtimex(struct timex *txc) time_reftime = xtime.tv_sec; mtemp = xtime.tv_sec - time_reftime; time_reftime = xtime.tv_sec; - freq_adj = 0; - if (time_status & STA_FLL) { - if (mtemp >= MINSEC) { - freq_adj = (s64)time_offset << (SHIFT_NSEC - SHIFT_KH); - if (time_offset < 0) { - freq_adj = -freq_adj; - do_div(freq_adj, mtemp); - freq_adj = -freq_adj; - } else - do_div(freq_adj, mtemp); - } else /* calibration interval too short (p. 12) */ - result = TIME_ERROR; - } else { /* PLL mode */ - if (mtemp < MAXSEC) { - freq_adj = (s64)ltemp * mtemp; - freq_adj = shift_right(freq_adj,(time_constant + - time_constant + - SHIFT_KF - SHIFT_NSEC)); - } else /* calibration interval too long (p. 12) */ - result = TIME_ERROR; + + freq_adj = (s64)time_offset * mtemp; + freq_adj = shift_right(freq_adj, time_constant * 2 + + (SHIFT_PLL + 2) * 2 - SHIFT_NSEC); + if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) { + temp64 = (s64)time_offset << (SHIFT_NSEC - SHIFT_FLL); + if (time_offset < 0) { + temp64 = -temp64; + do_div(temp64, mtemp); + freq_adj -= temp64; + } else { + do_div(temp64, mtemp); + freq_adj += temp64; + } } freq_adj += time_freq; freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC); -- cgit v1.2.3 From 70bc42f90a3f4721c89dbe865e6c95da8565b41c Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sat, 30 Sep 2006 23:28:29 -0700 Subject: [PATCH] kernel/time/ntp.c: possible cleanups This patch contains the following possible cleanups: - make the following needlessly global function static: - ntp_update_frequency() - make the following needlessly global variables static: - time_state - time_offset - time_constant - time_reftime - remove the following read-only global variable: - time_precision Signed-off-by: Adrian Bunk Cc: Roman Zippel Cc: john stultz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/time/ntp.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'kernel') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 1ab5e9d7fa50..47195fa0ec4f 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -30,17 +30,31 @@ static u64 tick_length, tick_length_base; * phase-lock loop variables */ /* TIME_ERROR prevents overwriting the CMOS clock */ -int time_state = TIME_OK; /* clock synchronization status */ +static int time_state = TIME_OK; /* clock synchronization status */ int time_status = STA_UNSYNC; /* clock status bits */ -long time_offset; /* time adjustment (ns) */ -long time_constant = 2; /* pll time constant */ -long time_precision = 1; /* clock precision (us) */ +static long time_offset; /* time adjustment (ns) */ +static long time_constant = 2; /* pll time constant */ long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ long time_freq; /* frequency offset (scaled ppm)*/ -long time_reftime; /* time at last adjustment (s) */ +static long time_reftime; /* time at last adjustment (s) */ long time_adjust; +#define CLOCK_TICK_OVERFLOW (LATCH * HZ - CLOCK_TICK_RATE) +#define CLOCK_TICK_ADJUST (((s64)CLOCK_TICK_OVERFLOW * NSEC_PER_SEC) / \ + (s64)CLOCK_TICK_RATE) + +static void ntp_update_frequency(void) +{ + tick_length_base = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << TICK_LENGTH_SHIFT; + tick_length_base += (s64)CLOCK_TICK_ADJUST << TICK_LENGTH_SHIFT; + tick_length_base += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); + + do_div(tick_length_base, HZ); + + tick_nsec = tick_length_base >> TICK_LENGTH_SHIFT; +} + /** * ntp_clear - Clears the NTP state variables * @@ -59,20 +73,6 @@ void ntp_clear(void) time_offset = 0; } -#define CLOCK_TICK_OVERFLOW (LATCH * HZ - CLOCK_TICK_RATE) -#define CLOCK_TICK_ADJUST (((s64)CLOCK_TICK_OVERFLOW * NSEC_PER_SEC) / (s64)CLOCK_TICK_RATE) - -void ntp_update_frequency(void) -{ - tick_length_base = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << TICK_LENGTH_SHIFT; - tick_length_base += (s64)CLOCK_TICK_ADJUST << TICK_LENGTH_SHIFT; - tick_length_base += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); - - do_div(tick_length_base, HZ); - - tick_nsec = tick_length_base >> TICK_LENGTH_SHIFT; -} - /* * this routine handles the overflow of the microsecond field * @@ -330,7 +330,7 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) txc->esterror = time_esterror; txc->status = time_status; txc->constant = time_constant; - txc->precision = time_precision; + txc->precision = 1; txc->tolerance = MAXFREQ; txc->tick = tick_usec; -- cgit v1.2.3 From 8ef386092d7c2891bd7acefb2a87f878f7e9a0d6 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Sat, 30 Sep 2006 23:28:31 -0700 Subject: [PATCH] kill wall_jiffies With 2.6.18-rc4-mm2, now wall_jiffies will always be the same as jiffies. So we can kill wall_jiffies completely. This is just a cleanup and logically should not change any real behavior except for one thing: RTC updating code in (old) ppc and xtensa use a condition "jiffies - wall_jiffies == 1". This condition is never met so I suppose it is just a bug. I just remove that condition only instead of kill the whole "if" block. [heiko.carstens@de.ibm.com: s390 build fix and cleanup] Signed-off-by: Atsushi Nemoto Cc: Andi Kleen Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Russell King Cc: Ian Molton Cc: Mikael Starvik Cc: David Howells Cc: Yoshinori Sato Cc: Hirokazu Takata Cc: Ralf Baechle Cc: Kyle McMartin Cc: Heiko Carstens Cc: Martin Schwidefsky Cc: Paul Mundt Cc: Kazumoto Kojima Cc: Richard Curnow Cc: William Lee Irwin III Cc: "David S. Miller" Cc: Jeff Dike Cc: Paolo 'Blaisorblade' Giarrusso Cc: Miles Bader Cc: Chris Zankel Cc: "Luck, Tony" Cc: Geert Uytterhoeven Cc: Roman Zippel Signed-off-by: Heiko Carstens Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/timer.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/timer.c b/kernel/timer.c index 654dd5df2417..c1c7fbcffec1 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -768,7 +768,7 @@ static int timekeeping_suspended; * @dev: unused * * This is for the generic clocksource timekeeping. - * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are + * xtime/wall_to_monotonic/jiffies/etc are * still managed by arch specific suspend/resume code. */ static int timekeeping_resume(struct sys_device *dev) @@ -1016,9 +1016,6 @@ static inline void calc_load(unsigned long ticks) } } -/* jiffies at the most recent update of wall time */ -unsigned long wall_jiffies = INITIAL_JIFFIES; - /* * This read-write spinlock protects us from races in SMP while * playing with xtime and avenrun. @@ -1056,7 +1053,6 @@ void run_local_timers(void) */ static inline void update_times(unsigned long ticks) { - wall_jiffies += ticks; update_wall_time(); calc_load(ticks); } -- cgit v1.2.3 From 0ae646845b603e9df5711084436d389f8371ffb3 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Sat, 30 Sep 2006 23:28:53 -0700 Subject: [PATCH] Fix taskstats size calculation (use the new genetlink utility functions) The addition of the CSA patch pushed the size of struct taskstats to 256 bytes. This exposed a problem with prepare_reply(), we were not allocating space for the netlink and genetlink header. It worked earlier because alloc_skb() would align the skb to SMP_CACHE_BYTES, which added some additonal bytes. Signed-off-by: Balbir Singh Cc: Jamal Hadi Cc: Shailabh Nagar Cc: Thomas Graf Cc: "David S. Miller" Cc: Jay Lan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/taskstats.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 2ed4040d0dc5..c451af2ddb50 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -75,7 +75,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, /* * If new attributes are added, please revisit this allocation */ - skb = nlmsg_new(size, GFP_KERNEL); + skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL); if (!skb) return -ENOMEM; -- cgit v1.2.3 From f3cef7a99469afc159fec3a61b42dc7ca5b6824f Mon Sep 17 00:00:00 2001 From: Jay Lan Date: Sat, 30 Sep 2006 23:28:55 -0700 Subject: [PATCH] csa: basic accounting over taskstats Add some basic accounting fields to the taskstats struct, add a new kernel/tsacct.c to handle basic accounting data handling upon exit. A handle is added to taskstats.c to invoke the basic accounting data handling. Signed-off-by: Jay Lan Cc: Shailabh Nagar Cc: Balbir Singh Cc: Jes Sorensen Cc: Chris Sturtivant Cc: Tony Ernst Cc: Guillaume Thouvenin Cc: "Michal Piotrowski" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/Makefile | 2 +- kernel/taskstats.c | 4 +++ kernel/tsacct.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 kernel/tsacct.c (limited to 'kernel') diff --git a/kernel/Makefile b/kernel/Makefile index e210e8cf7237..aacaafb28b9d 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -49,7 +49,7 @@ obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o -obj-$(CONFIG_TASKSTATS) += taskstats.o +obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is diff --git a/kernel/taskstats.c b/kernel/taskstats.c index c451af2ddb50..6c38dce88e8c 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -198,7 +199,10 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk, */ delayacct_add_tsk(stats, tsk); + + /* fill in basic acct fields */ stats->version = TASKSTATS_VERSION; + bacct_add_tsk(stats, tsk); /* Define err: label here if needed */ put_task_struct(tsk); diff --git a/kernel/tsacct.c b/kernel/tsacct.c new file mode 100644 index 000000000000..899067950a88 --- /dev/null +++ b/kernel/tsacct.c @@ -0,0 +1,72 @@ +/* + * tsacct.c - System accounting over taskstats interface + * + * Copyright (C) Jay Lan, + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + + +#define USEC_PER_TICK (USEC_PER_SEC/HZ) +/* + * fill in basic accounting fields + */ +void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) +{ + struct timespec uptime, ts; + s64 ac_etime; + + BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); + + /* calculate task elapsed time in timespec */ + do_posix_clock_monotonic_gettime(&uptime); + ts = timespec_sub(uptime, current->group_leader->start_time); + /* rebase elapsed time to usec */ + ac_etime = timespec_to_ns(&ts); + do_div(ac_etime, NSEC_PER_USEC); + stats->ac_etime = ac_etime; + stats->ac_btime = xtime.tv_sec - ts.tv_sec; + if (thread_group_leader(tsk)) { + stats->ac_exitcode = tsk->exit_code; + if (tsk->flags & PF_FORKNOEXEC) + stats->ac_flag |= AFORK; + } + if (tsk->flags & PF_SUPERPRIV) + stats->ac_flag |= ASU; + if (tsk->flags & PF_DUMPCORE) + stats->ac_flag |= ACORE; + if (tsk->flags & PF_SIGNALED) + stats->ac_flag |= AXSIG; + stats->ac_nice = task_nice(tsk); + stats->ac_sched = tsk->policy; + stats->ac_uid = tsk->uid; + stats->ac_gid = tsk->gid; + stats->ac_pid = tsk->pid; + stats->ac_ppid = (tsk->parent) ? tsk->parent->pid : 0; + stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; + stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; + stats->ac_minflt = tsk->min_flt; + stats->ac_majflt = tsk->maj_flt; + /* Each process gets a minimum of one usec cpu time */ + if ((stats->ac_utime == 0) && (stats->ac_stime == 0)) { + stats->ac_stime = 1; + } + + strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm)); +} + -- cgit v1.2.3 From 9acc1853519a0473620d424105f9d49ea5b4e62e Mon Sep 17 00:00:00 2001 From: Jay Lan Date: Sat, 30 Sep 2006 23:28:58 -0700 Subject: [PATCH] csa: Extended system accounting over taskstats Add extended system accounting handling over taskstats interface. A CONFIG_TASK_XACCT flag is created to enable the extended accounting code. Signed-off-by: Jay Lan Cc: Shailabh Nagar Cc: Balbir Singh Cc: Jes Sorensen Cc: Chris Sturtivant Cc: Tony Ernst Cc: Guillaume Thouvenin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/taskstats.c | 4 ++++ kernel/tsacct.c | 19 +++++++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'kernel') diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 6c38dce88e8c..5d6a8c54ee85 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -204,6 +205,9 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk, stats->version = TASKSTATS_VERSION; bacct_add_tsk(stats, tsk); + /* fill in extended acct fields */ + xacct_add_tsk(stats, tsk); + /* Define err: label here if needed */ put_task_struct(tsk); return rc; diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 899067950a88..410483490cf6 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -70,3 +70,22 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm)); } + +#ifdef CONFIG_TASK_XACCT +/* + * fill in extended accounting fields + */ +void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) +{ + stats->acct_rss_mem1 = p->acct_rss_mem1; + stats->acct_vm_mem1 = p->acct_vm_mem1; + if (p->mm) { + stats->hiwater_rss = p->mm->hiwater_rss; + stats->hiwater_vm = p->mm->hiwater_vm; + } + stats->read_char = p->rchar; + stats->write_char = p->wchar; + stats->read_syscalls = p->syscr; + stats->write_syscalls = p->syscw; +} +#endif -- cgit v1.2.3 From 8f0ab5147951267134612570604cf8341901a80c Mon Sep 17 00:00:00 2001 From: Jay Lan Date: Sat, 30 Sep 2006 23:28:59 -0700 Subject: [PATCH] csa: convert CONFIG tag for extended accounting routines There were a few accounting data/macros that are used in CSA but are #ifdef'ed inside CONFIG_BSD_PROCESS_ACCT. This patch is to change those ifdef's from CONFIG_BSD_PROCESS_ACCT to CONFIG_TASK_XACCT. A few defines are moved from kernel/acct.c and include/linux/acct.h to kernel/tsacct.c and include/linux/tsacct_kern.h. Signed-off-by: Jay Lan Cc: Shailabh Nagar Cc: Balbir Singh Cc: Jes Sorensen Cc: Chris Sturtivant Cc: Tony Ernst Cc: Guillaume Thouvenin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/acct.c | 30 ------------------------------ kernel/exit.c | 1 + kernel/fork.c | 1 + kernel/sched.c | 2 +- kernel/tsacct.c | 30 ++++++++++++++++++++++++++++++ 5 files changed, 33 insertions(+), 31 deletions(-) (limited to 'kernel') diff --git a/kernel/acct.c b/kernel/acct.c index f4330acead46..0aad5ca36a81 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -602,33 +602,3 @@ void acct_process(void) do_acct_process(file); fput(file); } - - -/** - * acct_update_integrals - update mm integral fields in task_struct - * @tsk: task_struct for accounting - */ -void acct_update_integrals(struct task_struct *tsk) -{ - if (likely(tsk->mm)) { - long delta = - cputime_to_jiffies(tsk->stime) - tsk->acct_stimexpd; - - if (delta == 0) - return; - tsk->acct_stimexpd = tsk->stime; - tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); - tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; - } -} - -/** - * acct_clear_integrals - clear the mm integral fields in task_struct - * @tsk: task_struct whose accounting fields are cleared - */ -void acct_clear_integrals(struct task_struct *tsk) -{ - tsk->acct_stimexpd = 0; - tsk->acct_rss_mem1 = 0; - tsk->acct_vm_mem1 = 0; -} diff --git a/kernel/exit.c b/kernel/exit.c index c189de2927ab..3b47f26985f2 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/fork.c b/kernel/fork.c index 1c999f3e0b47..89f666491d1f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include diff --git a/kernel/sched.c b/kernel/sched.c index 74f169ac0773..2bbd948f0169 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -49,7 +49,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 410483490cf6..47c71daa416f 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -88,4 +88,34 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) stats->read_syscalls = p->syscr; stats->write_syscalls = p->syscw; } + + +/** + * acct_update_integrals - update mm integral fields in task_struct + * @tsk: task_struct for accounting + */ +void acct_update_integrals(struct task_struct *tsk) +{ + if (likely(tsk->mm)) { + long delta = + cputime_to_jiffies(tsk->stime) - tsk->acct_stimexpd; + + if (delta == 0) + return; + tsk->acct_stimexpd = tsk->stime; + tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); + tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; + } +} + +/** + * acct_clear_integrals - clear the mm integral fields in task_struct + * @tsk: task_struct whose accounting fields are cleared + */ +void acct_clear_integrals(struct task_struct *tsk) +{ + tsk->acct_stimexpd = 0; + tsk->acct_rss_mem1 = 0; + tsk->acct_vm_mem1 = 0; +} #endif -- cgit v1.2.3 From db5fed26b2e0beed939b773dd5896077a1794d65 Mon Sep 17 00:00:00 2001 From: Jay Lan Date: Sat, 30 Sep 2006 23:29:00 -0700 Subject: [PATCH] csa accounting taskstats update ChangeLog: Feedbacks from Andrew Morton: - define TS_COMM_LEN to 32 - change acct_stimexpd field of task_struct to be of cputime_t, which is to be used to save the tsk->stime of last timer interrupt update. - a new Documentation/accounting/taskstats-struct.txt to describe fields of taskstats struct. Feedback from Balbir Singh: - keep the stime of a task to be zero when both stime and utime are zero as recoreded in task_struct. Misc: - convert accumulated RSS/VM from platform dependent pages-ticks to MBytes-usecs in the kernel Cc: Shailabh Nagar Cc: Balbir Singh Cc: Jes Sorensen Cc: Chris Sturtivant Cc: Tony Ernst Cc: Guillaume Thouvenin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/tsacct.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 47c71daa416f..db443221ba5b 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -20,6 +20,7 @@ #include #include #include +#include #define USEC_PER_TICK (USEC_PER_SEC/HZ) @@ -62,33 +63,35 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; stats->ac_minflt = tsk->min_flt; stats->ac_majflt = tsk->maj_flt; - /* Each process gets a minimum of one usec cpu time */ - if ((stats->ac_utime == 0) && (stats->ac_stime == 0)) { - stats->ac_stime = 1; - } strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm)); } #ifdef CONFIG_TASK_XACCT + +#define KB 1024 +#define MB (1024*KB) /* * fill in extended accounting fields */ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) { - stats->acct_rss_mem1 = p->acct_rss_mem1; - stats->acct_vm_mem1 = p->acct_vm_mem1; + /* convert pages-jiffies to Mbyte-usec */ + stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; + stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; if (p->mm) { - stats->hiwater_rss = p->mm->hiwater_rss; - stats->hiwater_vm = p->mm->hiwater_vm; + /* adjust to KB unit */ + stats->hiwater_rss = p->mm->hiwater_rss * PAGE_SIZE / KB; + stats->hiwater_vm = p->mm->hiwater_vm * PAGE_SIZE / KB; } stats->read_char = p->rchar; stats->write_char = p->wchar; stats->read_syscalls = p->syscr; stats->write_syscalls = p->syscw; } - +#undef KB +#undef MB /** * acct_update_integrals - update mm integral fields in task_struct @@ -97,8 +100,8 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) void acct_update_integrals(struct task_struct *tsk) { if (likely(tsk->mm)) { - long delta = - cputime_to_jiffies(tsk->stime) - tsk->acct_stimexpd; + long delta = cputime_to_jiffies( + cputime_sub(tsk->stime, tsk->acct_stimexpd)); if (delta == 0) return; -- cgit v1.2.3 From d8c76e6f45c111c32a4b3e50a2adc9210737b0d8 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Sat, 30 Sep 2006 23:29:04 -0700 Subject: [PATCH] r/o bind mount prepwork: inc_nlink() helper This is mostly included for parity with dec_nlink(), where we will have some more hooks. This one should stay pretty darn straightforward for now. Signed-off-by: Dave Hansen Acked-by: Christoph Hellwig Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 8c3c400cce91..9d850ae13b1b 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -377,7 +377,7 @@ static int cpuset_fill_super(struct super_block *sb, void *unused_data, inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directories start off with i_nlink == 2 (for "." entry) */ - inode->i_nlink++; + inc_nlink(inode); } else { return -ENOMEM; } @@ -1565,7 +1565,7 @@ static int cpuset_create_file(struct dentry *dentry, int mode) inode->i_fop = &simple_dir_operations; /* start off with i_nlink == 2 (for "." entry) */ - inode->i_nlink++; + inc_nlink(inode); } else if (S_ISREG(mode)) { inode->i_size = 0; inode->i_fop = &cpuset_file_operations; @@ -1598,7 +1598,7 @@ static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) error = cpuset_create_file(dentry, S_IFDIR | mode); if (!error) { dentry->d_fsdata = cs; - parent->d_inode->i_nlink++; + inc_nlink(parent->d_inode); cs->dentry = dentry; } dput(dentry); @@ -2033,7 +2033,7 @@ int __init cpuset_init(void) } root = cpuset_mount->mnt_sb->s_root; root->d_fsdata = &top_cpuset; - root->d_inode->i_nlink++; + inc_nlink(root->d_inode); top_cpuset.dentry = root; root->d_inode->i_op = &cpuset_dir_inode_operations; number_of_cpusets = 1; -- cgit v1.2.3 From e239ca540594cff00adcce163dc332b27015d8e5 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 30 Sep 2006 23:29:27 -0700 Subject: [PATCH] Create call_usermodehelper_pipe() A new member in the ever growing family of call_usermode* functions is born. The new call_usermodehelper_pipe() function allows to pipe data to the stdin of the called user mode progam and behaves otherwise like the normal call_usermodehelp() (except that it always waits for the child to finish) Signed-off-by: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kmod.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kmod.c b/kernel/kmod.c index 842f8015d7fd..5c63c53014a9 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -122,6 +122,7 @@ struct subprocess_info { struct key *ring; int wait; int retval; + struct file *stdin; }; /* @@ -145,12 +146,26 @@ static int ____call_usermodehelper(void *data) key_put(old_session); + /* Install input pipe when needed */ + if (sub_info->stdin) { + struct files_struct *f = current->files; + struct fdtable *fdt; + /* no races because files should be private here */ + sys_close(0); + fd_install(0, sub_info->stdin); + spin_lock(&f->file_lock); + fdt = files_fdtable(f); + FD_SET(0, fdt->open_fds); + FD_CLR(0, fdt->close_on_exec); + spin_unlock(&f->file_lock); + } + /* We can run anywhere, unlike our parent keventd(). */ set_cpus_allowed(current, CPU_MASK_ALL); retval = -EPERM; if (current->fs->root) - retval = execve(sub_info->path, sub_info->argv,sub_info->envp); + retval = execve(sub_info->path, sub_info->argv, sub_info->envp); /* Exec failed? */ sub_info->retval = retval; @@ -268,6 +283,44 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, } EXPORT_SYMBOL(call_usermodehelper_keys); +int call_usermodehelper_pipe(char *path, char **argv, char **envp, + struct file **filp) +{ + DECLARE_COMPLETION(done); + struct subprocess_info sub_info = { + .complete = &done, + .path = path, + .argv = argv, + .envp = envp, + .retval = 0, + }; + struct file *f; + DECLARE_WORK(work, __call_usermodehelper, &sub_info); + + if (!khelper_wq) + return -EBUSY; + + if (path[0] == '\0') + return 0; + + f = create_write_pipe(); + if (!f) + return -ENOMEM; + *filp = f; + + f = create_read_pipe(f); + if (!f) { + free_write_pipe(*filp); + return -ENOMEM; + } + sub_info.stdin = f; + + queue_work(khelper_wq, &work); + wait_for_completion(&done); + return sub_info.retval; +} +EXPORT_SYMBOL(call_usermodehelper_pipe); + void __init usermodehelper_init(void) { khelper_wq = create_singlethread_workqueue("khelper"); -- cgit v1.2.3 From d025c9db7f31fc0554ce7fb2dfc78d35a77f3487 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 30 Sep 2006 23:29:28 -0700 Subject: [PATCH] Support piping into commands in /proc/sys/kernel/core_pattern Using the infrastructure created in previous patches implement support to pipe core dumps into programs. This is done by overloading the existing core_pattern sysctl with a new syntax: |program When the first character of the pattern is a '|' the kernel will instead threat the rest of the pattern as a command to run. The core dump will be written to the standard input of that program instead of to a file. This is useful for having automatic core dump analysis without filling up disks. The program can do some simple analysis and save only a summary of the core dump. The core dump proces will run with the privileges and in the name space of the process that caused the core dump. I also increased the core pattern size to 128 bytes so that longer command lines fit. Most of the changes comes from allowing core dumps without seeks. They are fairly straight forward though. One small incompatibility is that if someone had a core pattern previously that started with '|' they will get suddenly new behaviour. I think that's unlikely to be a real problem though. Additional background: > Very nice, do you happen to have a program that can accept this kind of > input for crash dumps? I'm guessing that the embedded people will > really want this functionality. I had a cheesy demo/prototype. Basically it wrote the dump to a file again, ran gdb on it to get a backtrace and wrote the summary to a shared directory. Then there was a simple CGI script to generate a "top 10" crashes HTML listing. Unfortunately this still had the disadvantage to needing full disk space for a dump except for deleting it afterwards (in fact it was worse because over the pipe holes didn't work so if you have a holey address map it would require more space). Fortunately gdb seems to be happy to handle /proc/pid/fd/xxx input pipes as cores (at least it worked with zsh's =(cat core) syntax), so it would be likely possible to do it without temporary space with a simple wrapper that calls it in the right way. I ran out of time before doing that though. The demo prototype scripts weren't very good. If there is really interest I can dig them out (they are currently on a laptop disk on the desk with the laptop itself being in service), but I would recommend to rewrite them for any serious application of this and fix the disk space problem. Also to be really useful it should probably find a way to automatically fetch the debuginfos (I cheated and just installed them in advance). If nobody else does it I can probably do the rewrite myself again at some point. My hope at some point was that desktops would support it in their builtin crash reporters, but at least the KDE people I talked too seemed to be happy with their user space only solution. Alan sayeth: I don't believe that piping as such as neccessarily the right model, but the ability to intercept and processes core dumps from user space is asked for by many enterprise users as well. They want to know about, capture, analyse and process core dumps, often centrally and in automated form. [akpm@osdl.org: loff_t != unsigned long] Signed-off-by: Andi Kleen Cc: Alan Cox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kmod.c | 4 ++++ kernel/sysctl.c | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/kmod.c b/kernel/kmod.c index 5c63c53014a9..f8121b95183f 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -35,6 +35,7 @@ #include #include #include +#include #include extern int max_threads; @@ -158,6 +159,9 @@ static int ____call_usermodehelper(void *data) FD_SET(0, fdt->open_fds); FD_CLR(0, fdt->close_on_exec); spin_unlock(&f->file_lock); + + /* and disallow core files too */ + current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0}; } /* We can run anywhere, unlike our parent keventd(). */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c57c4532e296..ba42694f0453 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -294,7 +294,7 @@ static ctl_table kern_table[] = { .ctl_name = KERN_CORE_PATTERN, .procname = "core_pattern", .data = core_pattern, - .maxlen = 64, + .maxlen = 128, .mode = 0644, .proc_handler = &proc_dostring, .strategy = &sysctl_string, -- cgit v1.2.3