From 3ed80a62bf959d34ebd4d553b026fbe7e6fbcc54 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:36:58 -0500 Subject: cgroup: drop module support With module supported dropped from net_prio, no controller is using cgroup module support. None of actual resource controllers can be built as a module and we aren't gonna add new controllers which don't control resources. This patch drops module support from cgroup. * cgroup_[un]load_subsys() and cgroup_subsys->module removed. * As there's no point in distinguishing IS_BUILTIN() and IS_MODULE(), cgroup_subsys.h now uses IS_ENABLED() directly. * enum cgroup_subsys_id now exactly matches the list of enabled controllers as ordered in cgroup_subsys.h. * cgroup_subsys[] is now a contiguously occupied array. Size specification is no longer necessary and dropped. * for_each_builtin_subsys() is removed and for_each_subsys() is updated to not require any locking. * module ref handling is removed from rebind_subsystems(). * Module related comments dropped. v2: Rebased on top of fe1217c4f3f7 ("net: net_cls: move cgroupfs classid handling into core"). v3: Added {} around the if (need_forkexit_callback) block in cgroup_post_fork() for readability as suggested by Li. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 284 ++++---------------------------------------------------- 1 file changed, 16 insertions(+), 268 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e2f46ba37f72..ccb16b47e293 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -120,15 +119,9 @@ static struct workqueue_struct *cgroup_destroy_wq; */ static struct workqueue_struct *cgroup_pidlist_destroy_wq; -/* - * Generate an array of cgroup subsystem pointers. At boot time, this is - * populated with the built in subsystems, and modular subsystems are - * registered after that. The mutable section of this array is protected by - * cgroup_mutex. - */ +/* generate an array of cgroup subsystem pointers */ #define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, -#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) -static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = { +static struct cgroup_subsys *cgroup_subsys[] = { #include }; @@ -258,30 +251,13 @@ static int notify_on_release(const struct cgroup *cgrp) else /** - * for_each_subsys - iterate all loaded cgroup subsystems + * for_each_subsys - iterate all enabled cgroup subsystems * @ss: the iteration cursor * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end - * - * Iterates through all loaded subsystems. Should be called under - * cgroup_mutex or cgroup_root_mutex. */ #define for_each_subsys(ss, ssid) \ - for (({ cgroup_assert_mutex_or_root_locked(); (ssid) = 0; }); \ - (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ - if (!((ss) = cgroup_subsys[(ssid)])) { } \ - else - -/** - * for_each_builtin_subsys - iterate all built-in cgroup subsystems - * @ss: the iteration cursor - * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end - * - * Bulit-in subsystems are always present and iteration itself doesn't - * require any synchronization. - */ -#define for_each_builtin_subsys(ss, i) \ - for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT && \ - (((ss) = cgroup_subsys[i]) || true); (i)++) + for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ + (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) /* iterate across the active hierarchies */ #define for_each_active_root(root) \ @@ -975,50 +951,24 @@ static void cgroup_d_remove_dir(struct dentry *dentry) remove_dir(dentry); } -/* - * Call with cgroup_mutex held. Drops reference counts on modules, including - * any duplicate ones that parse_cgroupfs_options took. If this function - * returns an error, no reference counts are touched. - */ static int rebind_subsystems(struct cgroupfs_root *root, unsigned long added_mask, unsigned removed_mask) { struct cgroup *cgrp = &root->top_cgroup; struct cgroup_subsys *ss; - unsigned long pinned = 0; int i, ret; BUG_ON(!mutex_is_locked(&cgroup_mutex)); BUG_ON(!mutex_is_locked(&cgroup_root_mutex)); /* Check that any added subsystems are currently free */ - for_each_subsys(ss, i) { - if (!(added_mask & (1 << i))) - continue; - - /* is the subsystem mounted elsewhere? */ - if (ss->root != &cgroup_dummy_root) { - ret = -EBUSY; - goto out_put; - } - - /* pin the module */ - if (!try_module_get(ss->module)) { - ret = -ENOENT; - goto out_put; - } - pinned |= 1 << i; - } - - /* subsys could be missing if unloaded between parsing and here */ - if (added_mask != pinned) { - ret = -ENOENT; - goto out_put; - } + for_each_subsys(ss, i) + if ((added_mask & (1 << i)) && ss->root != &cgroup_dummy_root) + return -EBUSY; ret = cgroup_populate_dir(cgrp, added_mask); if (ret) - goto out_put; + return ret; /* * Nothing can fail from this point on. Remove files for the @@ -1057,9 +1007,6 @@ static int rebind_subsystems(struct cgroupfs_root *root, RCU_INIT_POINTER(cgrp->subsys[i], NULL); cgroup_subsys[i]->root = &cgroup_dummy_root; - - /* subsystem is now free - drop reference on module */ - module_put(ss->module); root->subsys_mask &= ~bit; } } @@ -1071,12 +1018,6 @@ static int rebind_subsystems(struct cgroupfs_root *root, root->flags |= CGRP_ROOT_SUBSYS_BOUND; return 0; - -out_put: - for_each_subsys(ss, i) - if (pinned & (1 << i)) - module_put(ss->module); - return ret; } static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) @@ -4506,7 +4447,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) return ret; } -static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss) +static void __init cgroup_init_cftsets(struct cgroup_subsys *ss) { INIT_LIST_HEAD(&ss->cftsets); @@ -4559,185 +4500,8 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) BUG_ON(online_css(css)); mutex_unlock(&cgroup_mutex); - - /* this function shouldn't be used with modular subsystems, since they - * need to register a subsys_id, among other things */ - BUG_ON(ss->module); } -/** - * cgroup_load_subsys: load and register a modular subsystem at runtime - * @ss: the subsystem to load - * - * This function should be called in a modular subsystem's initcall. If the - * subsystem is built as a module, it will be assigned a new subsys_id and set - * up for use. If the subsystem is built-in anyway, work is delegated to the - * simpler cgroup_init_subsys. - */ -int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss) -{ - struct cgroup_subsys_state *css; - int i, ret; - struct hlist_node *tmp; - struct css_set *cset; - unsigned long key; - - /* check name and function validity */ - if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN || - ss->css_alloc == NULL || ss->css_free == NULL) - return -EINVAL; - - /* - * we don't support callbacks in modular subsystems. this check is - * before the ss->module check for consistency; a subsystem that could - * be a module should still have no callbacks even if the user isn't - * compiling it as one. - */ - if (ss->fork || ss->exit) - return -EINVAL; - - /* - * an optionally modular subsystem is built-in: we want to do nothing, - * since cgroup_init_subsys will have already taken care of it. - */ - if (ss->module == NULL) { - /* a sanity check */ - BUG_ON(cgroup_subsys[ss->subsys_id] != ss); - return 0; - } - - /* init base cftset */ - cgroup_init_cftsets(ss); - - mutex_lock(&cgroup_mutex); - mutex_lock(&cgroup_root_mutex); - cgroup_subsys[ss->subsys_id] = ss; - - /* - * no ss->css_alloc seems to need anything important in the ss - * struct, so this can happen first (i.e. before the dummy root - * attachment). - */ - css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss)); - if (IS_ERR(css)) { - /* failure case - need to deassign the cgroup_subsys[] slot. */ - cgroup_subsys[ss->subsys_id] = NULL; - mutex_unlock(&cgroup_root_mutex); - mutex_unlock(&cgroup_mutex); - return PTR_ERR(css); - } - - ss->root = &cgroup_dummy_root; - - /* our new subsystem will be attached to the dummy hierarchy. */ - init_css(css, ss, cgroup_dummy_top); - - /* - * Now we need to entangle the css into the existing css_sets. unlike - * in cgroup_init_subsys, there are now multiple css_sets, so each one - * will need a new pointer to it; done by iterating the css_set_table. - * furthermore, modifying the existing css_sets will corrupt the hash - * table state, so each changed css_set will need its hash recomputed. - * this is all done under the css_set_lock. - */ - write_lock(&css_set_lock); - hash_for_each_safe(css_set_table, i, tmp, cset, hlist) { - /* skip entries that we already rehashed */ - if (cset->subsys[ss->subsys_id]) - continue; - /* remove existing entry */ - hash_del(&cset->hlist); - /* set new value */ - cset->subsys[ss->subsys_id] = css; - /* recompute hash and restore entry */ - key = css_set_hash(cset->subsys); - hash_add(css_set_table, &cset->hlist, key); - } - write_unlock(&css_set_lock); - - ret = online_css(css); - if (ret) { - ss->css_free(css); - goto err_unload; - } - - /* success! */ - mutex_unlock(&cgroup_root_mutex); - mutex_unlock(&cgroup_mutex); - return 0; - -err_unload: - mutex_unlock(&cgroup_root_mutex); - mutex_unlock(&cgroup_mutex); - /* @ss can't be mounted here as try_module_get() would fail */ - cgroup_unload_subsys(ss); - return ret; -} -EXPORT_SYMBOL_GPL(cgroup_load_subsys); - -/** - * cgroup_unload_subsys: unload a modular subsystem - * @ss: the subsystem to unload - * - * This function should be called in a modular subsystem's exitcall. When this - * function is invoked, the refcount on the subsystem's module will be 0, so - * the subsystem will not be attached to any hierarchy. - */ -void cgroup_unload_subsys(struct cgroup_subsys *ss) -{ - struct cgrp_cset_link *link; - struct cgroup_subsys_state *css; - - BUG_ON(ss->module == NULL); - - /* - * we shouldn't be called if the subsystem is in use, and the use of - * try_module_get() in rebind_subsystems() should ensure that it - * doesn't start being used while we're killing it off. - */ - BUG_ON(ss->root != &cgroup_dummy_root); - - mutex_lock(&cgroup_mutex); - mutex_lock(&cgroup_root_mutex); - - css = cgroup_css(cgroup_dummy_top, ss); - if (css) - offline_css(css); - - /* deassign the subsys_id */ - cgroup_subsys[ss->subsys_id] = NULL; - - /* - * disentangle the css from all css_sets attached to the dummy - * top. as in loading, we need to pay our respects to the hashtable - * gods. - */ - write_lock(&css_set_lock); - list_for_each_entry(link, &cgroup_dummy_top->cset_links, cset_link) { - struct css_set *cset = link->cset; - unsigned long key; - - hash_del(&cset->hlist); - cset->subsys[ss->subsys_id] = NULL; - key = css_set_hash(cset->subsys); - hash_add(css_set_table, &cset->hlist, key); - } - write_unlock(&css_set_lock); - - /* - * remove subsystem's css from the cgroup_dummy_top and free it - - * need to free before marking as null because ss->css_free needs - * the cgrp->subsys pointer to find their state. - */ - if (css) - ss->css_free(css); - RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL); - - mutex_unlock(&cgroup_root_mutex); - mutex_unlock(&cgroup_mutex); -} -EXPORT_SYMBOL_GPL(cgroup_unload_subsys); - /** * cgroup_init_early - cgroup initialization at system boot * @@ -4763,8 +4527,7 @@ int __init cgroup_init_early(void) list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); - /* at bootup time, we don't worry about modular subsystems */ - for_each_builtin_subsys(ss, i) { + for_each_subsys(ss, i) { BUG_ON(!ss->name); BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); BUG_ON(!ss->css_alloc); @@ -4797,7 +4560,7 @@ int __init cgroup_init(void) if (err) return err; - for_each_builtin_subsys(ss, i) { + for_each_subsys(ss, i) { if (!ss->early_init) cgroup_init_subsys(ss); } @@ -5032,15 +4795,7 @@ void cgroup_post_fork(struct task_struct *child) * and addition to css_set. */ if (need_forkexit_callback) { - /* - * fork/exit callbacks are supported only for builtin - * subsystems, and the builtin section of the subsys - * array is immutable, so we don't need to lock the - * subsys array here. On the other hand, modular section - * of the array can be freed at module unload, so we - * can't touch that. - */ - for_each_builtin_subsys(ss, i) + for_each_subsys(ss, i) if (ss->fork) ss->fork(child); } @@ -5105,11 +4860,8 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) RCU_INIT_POINTER(tsk->cgroups, &init_css_set); if (run_callbacks && need_forkexit_callback) { - /* - * fork/exit callbacks are supported only for builtin - * subsystems, see cgroup_post_fork() for details. - */ - for_each_builtin_subsys(ss, i) { + /* see cgroup_post_fork() for details */ + for_each_subsys(ss, i) { if (ss->exit) { struct cgroup_subsys_state *old_css = cset->subsys[i]; struct cgroup_subsys_state *css = task_css(tsk, i); @@ -5228,11 +4980,7 @@ static int __init cgroup_disable(char *str) if (!*token) continue; - /* - * cgroup_disable, being at boot time, can't know about - * module subsystems, so we don't worry about them. - */ - for_each_builtin_subsys(ss, i) { + for_each_subsys(ss, i) { if (!strcmp(token, ss->name)) { ss->disabled = 1; printk(KERN_INFO "Disabling %s control group" -- cgit v1.2.3 From 073219e995b4a3f8cf1ce8228b7ef440b6994ac0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:36:58 -0500 Subject: cgroup: clean up cgroup_subsys names and initialization cgroup_subsys is a bit messier than it needs to be. * The name of a subsys can be different from its internal identifier defined in cgroup_subsys.h. Most subsystems use the matching name but three - cpu, memory and perf_event - use different ones. * cgroup_subsys_id enums are postfixed with _subsys_id and each cgroup_subsys is postfixed with _subsys. cgroup.h is widely included throughout various subsystems, it doesn't and shouldn't have claim on such generic names which don't have any qualifier indicating that they belong to cgroup. * cgroup_subsys->subsys_id should always equal the matching cgroup_subsys_id enum; however, we require each controller to initialize it and then BUG if they don't match, which is a bit silly. This patch cleans up cgroup_subsys names and initialization by doing the followings. * cgroup_subsys_id enums are now postfixed with _cgrp_id, and each cgroup_subsys with _cgrp_subsys. * With the above, renaming subsys identifiers to match the userland visible names doesn't cause any naming conflicts. All non-matching identifiers are renamed to match the official names. cpu_cgroup -> cpu mem_cgroup -> memory perf -> perf_event * controllers no longer need to initialize ->subsys_id and ->name. They're generated in cgroup core and set automatically during boot. * Redundant cgroup_subsys declarations removed. * While updating BUG_ON()s in cgroup_init_early(), convert them to WARN()s. BUGging that early during boot is stupid - the kernel can't print anything, even through serial console and the trap handler doesn't even link stack frame properly for back-tracing. This patch doesn't introduce any behavior changes. v2: Rebased on top of fe1217c4f3f7 ("net: net_cls: move cgroupfs classid handling into core"). Signed-off-by: Tejun Heo Acked-by: Neil Horman Acked-by: "David S. Miller" Acked-by: "Rafael J. Wysocki" Acked-by: Michal Hocko Acked-by: Peter Zijlstra Acked-by: Aristeu Rozanski Acked-by: Ingo Molnar Acked-by: Li Zefan Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: Serge E. Hallyn Cc: Vivek Goyal Cc: Thomas Graf --- kernel/cgroup.c | 34 ++++++++++++++++++++-------------- kernel/cgroup_freezer.c | 8 ++------ kernel/cpuset.c | 10 ++++------ kernel/events/core.c | 8 +++----- kernel/sched/core.c | 6 ++---- kernel/sched/cpuacct.c | 6 ++---- 6 files changed, 33 insertions(+), 39 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ccb16b47e293..fe3f7253aa90 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -120,10 +120,18 @@ static struct workqueue_struct *cgroup_destroy_wq; static struct workqueue_struct *cgroup_pidlist_destroy_wq; /* generate an array of cgroup subsystem pointers */ -#define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, +#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, static struct cgroup_subsys *cgroup_subsys[] = { #include }; +#undef SUBSYS + +/* array of cgroup subsystem names */ +#define SUBSYS(_x) [_x ## _cgrp_id] = #_x, +static const char *cgroup_subsys_name[] = { +#include +}; +#undef SUBSYS /* * The dummy hierarchy, reserved for the subsystems that are otherwise @@ -1076,7 +1084,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) BUG_ON(!mutex_is_locked(&cgroup_mutex)); #ifdef CONFIG_CPUSETS - mask = ~(1UL << cpuset_subsys_id); + mask = ~(1UL << cpuset_cgrp_id); #endif memset(opts, 0, sizeof(*opts)); @@ -4528,15 +4536,15 @@ int __init cgroup_init_early(void) list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); for_each_subsys(ss, i) { - BUG_ON(!ss->name); - BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); - BUG_ON(!ss->css_alloc); - BUG_ON(!ss->css_free); - if (ss->subsys_id != i) { - printk(KERN_ERR "cgroup: Subsys %s id == %d\n", - ss->name, ss->subsys_id); - BUG(); - } + WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->subsys_id, + "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n", + i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, + ss->subsys_id, ss->name); + WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, + "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); + + ss->subsys_id = i; + ss->name = cgroup_subsys_name[i]; if (ss->early_init) cgroup_init_subsys(ss); @@ -5167,11 +5175,9 @@ static struct cftype debug_files[] = { { } /* terminate */ }; -struct cgroup_subsys debug_subsys = { - .name = "debug", +struct cgroup_subsys debug_cgrp_subsys = { .css_alloc = debug_css_alloc, .css_free = debug_css_free, - .subsys_id = debug_subsys_id, .base_cftypes = debug_files, }; #endif /* CONFIG_CGROUP_DEBUG */ diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 6c3154e477f6..98ea26a99076 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -52,7 +52,7 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) static inline struct freezer *task_freezer(struct task_struct *task) { - return css_freezer(task_css(task, freezer_subsys_id)); + return css_freezer(task_css(task, freezer_cgrp_id)); } static struct freezer *parent_freezer(struct freezer *freezer) @@ -84,8 +84,6 @@ static const char *freezer_state_strs(unsigned int state) return "THAWED"; }; -struct cgroup_subsys freezer_subsys; - static struct cgroup_subsys_state * freezer_css_alloc(struct cgroup_subsys_state *parent_css) { @@ -473,13 +471,11 @@ static struct cftype files[] = { { } /* terminate */ }; -struct cgroup_subsys freezer_subsys = { - .name = "freezer", +struct cgroup_subsys freezer_cgrp_subsys = { .css_alloc = freezer_css_alloc, .css_online = freezer_css_online, .css_offline = freezer_css_offline, .css_free = freezer_css_free, - .subsys_id = freezer_subsys_id, .attach = freezer_attach, .fork = freezer_fork, .base_cftypes = files, diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f1..2d018c795fea 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -119,7 +119,7 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) /* Retrieve the cpuset for a task */ static inline struct cpuset *task_cs(struct task_struct *task) { - return css_cs(task_css(task, cpuset_subsys_id)); + return css_cs(task_css(task, cpuset_cgrp_id)); } static inline struct cpuset *parent_cs(struct cpuset *cs) @@ -1521,7 +1521,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, struct task_struct *task; struct task_struct *leader = cgroup_taskset_first(tset); struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset, - cpuset_subsys_id); + cpuset_cgrp_id); struct cpuset *cs = css_cs(css); struct cpuset *oldcs = css_cs(oldcss); struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); @@ -2024,8 +2024,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) kfree(cs); } -struct cgroup_subsys cpuset_subsys = { - .name = "cpuset", +struct cgroup_subsys cpuset_cgrp_subsys = { .css_alloc = cpuset_css_alloc, .css_online = cpuset_css_online, .css_offline = cpuset_css_offline, @@ -2033,7 +2032,6 @@ struct cgroup_subsys cpuset_subsys = { .can_attach = cpuset_can_attach, .cancel_attach = cpuset_cancel_attach, .attach = cpuset_attach, - .subsys_id = cpuset_subsys_id, .base_cftypes = files, .early_init = 1, }; @@ -2699,7 +2697,7 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) goto out_free; rcu_read_lock(); - css = task_css(tsk, cpuset_subsys_id); + css = task_css(tsk, cpuset_cgrp_id); retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); rcu_read_unlock(); if (retval < 0) diff --git a/kernel/events/core.c b/kernel/events/core.c index 56003c6edfd3..64903731d834 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -342,7 +342,7 @@ struct perf_cgroup { static inline struct perf_cgroup * perf_cgroup_from_task(struct task_struct *task) { - return container_of(task_css(task, perf_subsys_id), + return container_of(task_css(task, perf_event_cgrp_id), struct perf_cgroup, css); } @@ -595,7 +595,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, rcu_read_lock(); - css = css_from_dir(f.file->f_dentry, &perf_subsys); + css = css_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys); if (IS_ERR(css)) { ret = PTR_ERR(css); goto out; @@ -8055,9 +8055,7 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css, task_function_call(task, __perf_cgroup_move, task); } -struct cgroup_subsys perf_subsys = { - .name = "perf_event", - .subsys_id = perf_subsys_id, +struct cgroup_subsys perf_event_cgrp_subsys = { .css_alloc = perf_cgroup_css_alloc, .css_free = perf_cgroup_css_free, .exit = perf_cgroup_exit, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b46131ef6aab..d4cfc5561830 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7176,7 +7176,7 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); - tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id, + tg = container_of(task_css_check(tsk, cpu_cgrp_id, lockdep_is_held(&tsk->sighand->siglock)), struct task_group, css); tg = autogroup_task_group(tsk, tg); @@ -7957,8 +7957,7 @@ static struct cftype cpu_files[] = { { } /* terminate */ }; -struct cgroup_subsys cpu_cgroup_subsys = { - .name = "cpu", +struct cgroup_subsys cpu_cgrp_subsys = { .css_alloc = cpu_cgroup_css_alloc, .css_free = cpu_cgroup_css_free, .css_online = cpu_cgroup_css_online, @@ -7966,7 +7965,6 @@ struct cgroup_subsys cpu_cgroup_subsys = { .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, .exit = cpu_cgroup_exit, - .subsys_id = cpu_cgroup_subsys_id, .base_cftypes = cpu_files, .early_init = 1, }; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 622e0818f905..c143ee380e3a 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -41,7 +41,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) /* return cpu accounting group to which this task belongs */ static inline struct cpuacct *task_ca(struct task_struct *tsk) { - return css_ca(task_css(tsk, cpuacct_subsys_id)); + return css_ca(task_css(tsk, cpuacct_cgrp_id)); } static inline struct cpuacct *parent_ca(struct cpuacct *ca) @@ -275,11 +275,9 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val) rcu_read_unlock(); } -struct cgroup_subsys cpuacct_subsys = { - .name = "cpuacct", +struct cgroup_subsys cpuacct_cgrp_subsys = { .css_alloc = cpuacct_css_alloc, .css_free = cpuacct_css_free, - .subsys_id = cpuacct_subsys_id, .base_cftypes = files, .early_init = 1, }; -- cgit v1.2.3 From aec25020f5d4b69aea5317551d1cb7043f6b04fb Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:36:58 -0500 Subject: cgroup: rename cgroup_subsys->subsys_id to ->id It's no longer referenced outside cgroup core, so renaming is easy. Let's rename it for consistency & brevity. This patch is pure rename. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fe3f7253aa90..5a77ca0784a6 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -198,7 +198,7 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, struct cgroup_subsys *ss) { if (ss) - return rcu_dereference_check(cgrp->subsys[ss->subsys_id], + return rcu_dereference_check(cgrp->subsys[ss->id], lockdep_is_held(&cgroup_mutex)); else return &cgrp->dummy_css; @@ -3982,7 +3982,7 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL); + rcu_assign_pointer(css->cgroup->subsys[css->ss->id], NULL); call_rcu(&css->rcu_head, css_free_rcu_fn); } @@ -4014,7 +4014,7 @@ static int online_css(struct cgroup_subsys_state *css) if (!ret) { css->flags |= CSS_ONLINE; css->cgroup->nr_css++; - rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css); + rcu_assign_pointer(css->cgroup->subsys[ss->id], css); } return ret; } @@ -4034,7 +4034,7 @@ static void offline_css(struct cgroup_subsys_state *css) css->flags &= ~CSS_ONLINE; css->cgroup->nr_css--; - RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css); + RCU_INIT_POINTER(css->cgroup->subsys[ss->id], css); } /** @@ -4065,7 +4065,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) init_css(css, ss, cgrp); - err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); + err = cgroup_populate_dir(cgrp, 1 << ss->id); if (err) goto err_free; @@ -4292,7 +4292,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref) */ static void kill_css(struct cgroup_subsys_state *css) { - cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); + cgroup_clear_dir(css->cgroup, 1 << css->ss->id); /* * Killing would put the base ref, but we need to keep it alive @@ -4496,7 +4496,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) * pointer to this state - since the subsystem is * newly registered, all tasks and hence the * init_css_set is in the subsystem's top cgroup. */ - init_css_set.subsys[ss->subsys_id] = css; + init_css_set.subsys[ss->id] = css; need_forkexit_callback |= ss->fork || ss->exit; @@ -4536,14 +4536,14 @@ int __init cgroup_init_early(void) list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); for_each_subsys(ss, i) { - WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->subsys_id, + WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n", i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free, - ss->subsys_id, ss->name); + ss->id, ss->name); WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN, "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]); - ss->subsys_id = i; + ss->id = i; ss->name = cgroup_subsys_name[i]; if (ss->early_init) -- cgit v1.2.3 From 69e943b7d3c2dcca1087e03e556ac6cb0d4433b4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:36:58 -0500 Subject: cgroup: update locking in cgroup_show_options() cgroup_show_options() grabs cgroup_root_mutex to protect the options changing while printing; however, holding root_mutex or not doesn't really make much difference for the function. subsys_mask can be atomically tested and most of the options aren't allowed to change anyway once mounted. The only field which needs synchronization is ->release_agent_path. This patch introduces a dedicated spinlock to synchronize accesses to the field and drops cgroup_root_mutex locking from cgroup_show_options(). The next patch will remove cgroup_root_mutex. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5a77ca0784a6..b15058602120 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -92,6 +92,12 @@ static DEFINE_MUTEX(cgroup_mutex); static DEFINE_MUTEX(cgroup_root_mutex); +/* + * Protects cgroup_subsys->release_agent_path. Modifying it also requires + * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. + */ +static DEFINE_SPINLOCK(release_agent_path_lock); + #define cgroup_assert_mutex_or_rcu_locked() \ rcu_lockdep_assert(rcu_read_lock_held() || \ lockdep_is_held(&cgroup_mutex), \ @@ -1034,7 +1040,6 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) struct cgroup_subsys *ss; int ssid; - mutex_lock(&cgroup_root_mutex); for_each_subsys(ss, ssid) if (root->subsys_mask & (1 << ssid)) seq_printf(seq, ",%s", ss->name); @@ -1044,13 +1049,16 @@ static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) seq_puts(seq, ",noprefix"); if (root->flags & CGRP_ROOT_XATTR) seq_puts(seq, ",xattr"); + + spin_lock(&release_agent_path_lock); if (strlen(root->release_agent_path)) seq_printf(seq, ",release_agent=%s", root->release_agent_path); + spin_unlock(&release_agent_path_lock); + if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags)) seq_puts(seq, ",clone_children"); if (strlen(root->name)) seq_printf(seq, ",name=%s", root->name); - mutex_unlock(&cgroup_root_mutex); return 0; } @@ -1272,8 +1280,11 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) if (ret) goto out_unlock; - if (opts.release_agent) + if (opts.release_agent) { + spin_lock(&release_agent_path_lock); strcpy(root->release_agent_path, opts.release_agent); + spin_unlock(&release_agent_path_lock); + } out_unlock: kfree(opts.release_agent); kfree(opts.name); @@ -2183,7 +2194,9 @@ static int cgroup_release_agent_write(struct cgroup_subsys_state *css, if (!cgroup_lock_live_group(css->cgroup)) return -ENODEV; mutex_lock(&cgroup_root_mutex); + spin_lock(&release_agent_path_lock); strcpy(css->cgroup->root->release_agent_path, buffer); + spin_unlock(&release_agent_path_lock); mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); return 0; -- cgit v1.2.3 From 3417ae1f5f59bbf36c3defbbf2a76c5ca498db2a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 8 Feb 2014 10:37:01 -0500 Subject: cgroup: remove cgroup_root_mutex cgroup_root_mutex was added to avoid deadlock involving namespace_sem via cgroup_show_options(). It added a lot of overhead for the small purpose of it and, because it's nested under cgroup_mutex, it has very limited usefulness. The previous patch made cgroup_show_options() not use cgroup_root_mutex, so nobody needs it anymore. Remove it. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 42 +----------------------------------------- 1 file changed, 1 insertion(+), 41 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b15058602120..0e7829078049 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -70,18 +70,6 @@ /* * cgroup_mutex is the master lock. Any modification to cgroup or its * hierarchy must be performed while holding it. - * - * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify - * cgroupfs_root of any cgroup hierarchy - subsys list, flags, - * release_agent_path and so on. Modifying requires both cgroup_mutex and - * cgroup_root_mutex. Readers can acquire either of the two. This is to - * break the following locking order cycle. - * - * A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem - * B. namespace_sem -> cgroup_mutex - * - * B happens only through cgroup_show_options() and using cgroup_root_mutex - * breaks it. */ #ifdef CONFIG_PROVE_RCU DEFINE_MUTEX(cgroup_mutex); @@ -90,8 +78,6 @@ EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for lockdep */ static DEFINE_MUTEX(cgroup_mutex); #endif -static DEFINE_MUTEX(cgroup_root_mutex); - /* * Protects cgroup_subsys->release_agent_path. Modifying it also requires * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. @@ -103,14 +89,6 @@ static DEFINE_SPINLOCK(release_agent_path_lock); lockdep_is_held(&cgroup_mutex), \ "cgroup_mutex or RCU read lock required"); -#ifdef CONFIG_LOCKDEP -#define cgroup_assert_mutex_or_root_locked() \ - WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&cgroup_mutex) && \ - !lockdep_is_held(&cgroup_root_mutex))) -#else -#define cgroup_assert_mutex_or_root_locked() do { } while (0) -#endif - /* * cgroup destruction makes heavy use of work items and there can be a lot * of concurrent destructions. Use a separate workqueue so that cgroup @@ -154,11 +132,7 @@ static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup; static LIST_HEAD(cgroup_roots); static int cgroup_root_count; -/* - * Hierarchy ID allocation and mapping. It follows the same exclusion - * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for - * writes, either for reads. - */ +/* hierarchy ID allocation and mapping, protected by cgroup_mutex */ static DEFINE_IDR(cgroup_hierarchy_idr); static struct cgroup_name root_cgroup_name = { .name = "/" }; @@ -973,7 +947,6 @@ static int rebind_subsystems(struct cgroupfs_root *root, int i, ret; BUG_ON(!mutex_is_locked(&cgroup_mutex)); - BUG_ON(!mutex_is_locked(&cgroup_root_mutex)); /* Check that any added subsystems are currently free */ for_each_subsys(ss, i) @@ -1246,7 +1219,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) mutex_lock(&cgrp->dentry->d_inode->i_mutex); mutex_lock(&cgroup_mutex); - mutex_lock(&cgroup_root_mutex); /* See what subsystems are wanted */ ret = parse_cgroupfs_options(data, &opts); @@ -1288,7 +1260,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) out_unlock: kfree(opts.release_agent); kfree(opts.name); - mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); mutex_unlock(&cgrp->dentry->d_inode->i_mutex); return ret; @@ -1331,7 +1302,6 @@ static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) int id; lockdep_assert_held(&cgroup_mutex); - lockdep_assert_held(&cgroup_root_mutex); id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end, GFP_KERNEL); @@ -1345,7 +1315,6 @@ static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) static void cgroup_exit_root_id(struct cgroupfs_root *root) { lockdep_assert_held(&cgroup_mutex); - lockdep_assert_held(&cgroup_root_mutex); if (root->hierarchy_id) { idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); @@ -1524,7 +1493,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, mutex_lock(&inode->i_mutex); mutex_lock(&cgroup_mutex); - mutex_lock(&cgroup_root_mutex); root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); @@ -1597,7 +1565,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, BUG_ON(!list_empty(&root_cgrp->children)); BUG_ON(root->number_of_cgroups != 1); - mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); mutex_unlock(&inode->i_mutex); } else { @@ -1628,7 +1595,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, revert_creds(cred); unlock_drop: cgroup_exit_root_id(root); - mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); mutex_unlock(&inode->i_mutex); drop_new_super: @@ -1653,7 +1619,6 @@ static void cgroup_kill_sb(struct super_block *sb) mutex_lock(&cgrp->dentry->d_inode->i_mutex); mutex_lock(&cgroup_mutex); - mutex_lock(&cgroup_root_mutex); /* Rebind all subsystems back to the default hierarchy */ if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { @@ -1682,7 +1647,6 @@ static void cgroup_kill_sb(struct super_block *sb) cgroup_exit_root_id(root); - mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); mutex_unlock(&cgrp->dentry->d_inode->i_mutex); @@ -2193,11 +2157,9 @@ static int cgroup_release_agent_write(struct cgroup_subsys_state *css, return -EINVAL; if (!cgroup_lock_live_group(css->cgroup)) return -ENODEV; - mutex_lock(&cgroup_root_mutex); spin_lock(&release_agent_path_lock); strcpy(css->cgroup->root->release_agent_path, buffer); spin_unlock(&release_agent_path_lock); - mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); return 0; } @@ -4588,7 +4550,6 @@ int __init cgroup_init(void) /* allocate id for the dummy hierarchy */ mutex_lock(&cgroup_mutex); - mutex_lock(&cgroup_root_mutex); /* Add init_css_set to the hash table */ key = css_set_hash(init_css_set.subsys); @@ -4600,7 +4561,6 @@ int __init cgroup_init(void) 0, 1, GFP_KERNEL); BUG_ON(err < 0); - mutex_unlock(&cgroup_root_mutex); mutex_unlock(&cgroup_mutex); cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); -- cgit v1.2.3 From 5a17f543ed6808e9085063277fe46795dea484bd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:47 -0500 Subject: cgroup: improve css_from_dir() into css_tryget_from_dir() css_from_dir() returns the matching css (cgroup_subsys_state) given a dentry and subsystem. The function doesn't pin the css before returning and requires the caller to be holding RCU read lock or cgroup_mutex and handling pinning on the caller side. Given that users of the function are likely to want to pin the returned css (both existing users do) and that getting and putting css's are very cheap, there's no reason for the interface to be tricky like this. Rename css_from_dir() to css_tryget_from_dir() and make it try to pin the found css and return it only if pinning succeeded. The callers are updated so that they no longer do RCU locking and pinning around the function and just use the returned css. This will also ease converting cgroup to kernfs. Signed-off-by: Tejun Heo Acked-by: Michal Hocko Acked-by: Li Zefan Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki --- kernel/cgroup.c | 25 ++++++++++++++++--------- kernel/events/core.c | 17 +---------------- 2 files changed, 17 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2de8decfd99f..fc2db071d95e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4978,28 +4978,35 @@ static int __init cgroup_disable(char *str) __setup("cgroup_disable=", cgroup_disable); /** - * css_from_dir - get corresponding css from the dentry of a cgroup dir + * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir * @dentry: directory dentry of interest * @ss: subsystem of interest * - * Must be called under cgroup_mutex or RCU read lock. The caller is - * responsible for pinning the returned css if it needs to be accessed - * outside the critical section. + * If @dentry is a directory for a cgroup which has @ss enabled on it, try + * to get the corresponding css and return it. If such css doesn't exist + * or can't be pinned, an ERR_PTR value is returned. */ -struct cgroup_subsys_state *css_from_dir(struct dentry *dentry, - struct cgroup_subsys *ss) +struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry, + struct cgroup_subsys *ss) { struct cgroup *cgrp; - - cgroup_assert_mutex_or_rcu_locked(); + struct cgroup_subsys_state *css; /* is @dentry a cgroup dir? */ if (!dentry->d_inode || dentry->d_inode->i_op != &cgroup_dir_inode_operations) return ERR_PTR(-EBADF); + rcu_read_lock(); + cgrp = __d_cgrp(dentry); - return cgroup_css(cgrp, ss) ?: ERR_PTR(-ENOENT); + css = cgroup_css(cgrp, ss); + + if (!css || !css_tryget(css)) + css = ERR_PTR(-ENOENT); + + rcu_read_unlock(); + return css; } /** diff --git a/kernel/events/core.c b/kernel/events/core.c index 64903731d834..a3c3ab50271a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -370,11 +370,6 @@ perf_cgroup_match(struct perf_event *event) event->cgrp->css.cgroup); } -static inline bool perf_tryget_cgroup(struct perf_event *event) -{ - return css_tryget(&event->cgrp->css); -} - static inline void perf_put_cgroup(struct perf_event *event) { css_put(&event->cgrp->css); @@ -593,9 +588,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, if (!f.file) return -EBADF; - rcu_read_lock(); - - css = css_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys); + css = css_tryget_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys); if (IS_ERR(css)) { ret = PTR_ERR(css); goto out; @@ -604,13 +597,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, cgrp = container_of(css, struct perf_cgroup, css); event->cgrp = cgrp; - /* must be done before we fput() the file */ - if (!perf_tryget_cgroup(event)) { - event->cgrp = NULL; - ret = -ENOENT; - goto out; - } - /* * all events in a group must monitor * the same cgroup because a task belongs @@ -621,7 +607,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, ret = -EINVAL; } out: - rcu_read_unlock(); fdput(f); return ret; } -- cgit v1.2.3 From ace2bee8135a3dc725958b8d08c55ee9df813d39 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:47 -0500 Subject: cgroup: introduce cgroup_tree_mutex Currently cgroup uses combination of inode->i_mutex'es and cgroup_mutex for synchronization. With the scheduled kernfs conversion, i_mutex'es will be removed. Unfortunately, just using cgroup_mutex isn't possible. All kernfs file and syscall operations, most of which require grabbing cgroup_mutex, will be called with kernfs active ref held and, if we try to perform kernfs removals under cgroup_mutex, it can deadlock as kernfs_remove() tries to drain the target node. Let's introduce a new outer mutex, cgroup_tree_mutex, which protects stuff used during hierarchy changing operations - cftypes and all the operations which may affect the cgroupfs. It also covers css association and iteration. This allows cgroup_css(), for_each_css() and other css iterators to be called under cgroup_tree_mutex. The new mutex will nest above both kernfs's active ref protection and cgroup_mutex. By protecting tree modifications with a separate outer mutex, we can get rid of the forementioned deadlock condition. Actual file additions and removals now require cgroup_tree_mutex instead of cgroup_mutex. Currently, cgroup_tree_mutex is never used without cgroup_mutex; however, we'll soon add hierarchy modification sections which are only protected by cgroup_tree_mutex. In the future, we might want to make the locking more granular by better splitting the coverages of the two mutexes. For now, this should do. v2: Rebased on top of 0ab02ca8f887 ("cgroup: protect modifications to cgroup_idr with cgroup_mutex"). Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 66 +++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 53 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fc2db071d95e..cb20d12cb096 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -67,6 +67,15 @@ */ #define CGROUP_PIDLIST_DESTROY_DELAY HZ +/* + * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file + * creation/removal and hierarchy changing operations including cgroup + * creation, removal, css association and controller rebinding. This outer + * lock is needed mainly to resolve the circular dependency between kernfs + * active ref and cgroup_mutex. cgroup_tree_mutex nests above both. + */ +static DEFINE_MUTEX(cgroup_tree_mutex); + /* * cgroup_mutex is the master lock. Any modification to cgroup or its * hierarchy must be performed while holding it. @@ -84,10 +93,11 @@ static DEFINE_MUTEX(cgroup_mutex); */ static DEFINE_SPINLOCK(release_agent_path_lock); -#define cgroup_assert_mutex_or_rcu_locked() \ +#define cgroup_assert_mutexes_or_rcu_locked() \ rcu_lockdep_assert(rcu_read_lock_held() || \ + lockdep_is_held(&cgroup_tree_mutex) || \ lockdep_is_held(&cgroup_mutex), \ - "cgroup_mutex or RCU read lock required"); + "cgroup_[tree_]mutex or RCU read lock required"); /* * cgroup destruction makes heavy use of work items and there can be a lot @@ -179,7 +189,8 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, { if (ss) return rcu_dereference_check(cgrp->subsys[ss->id], - lockdep_is_held(&cgroup_mutex)); + lockdep_is_held(&cgroup_tree_mutex) || + lockdep_is_held(&cgroup_mutex)); else return &cgrp->dummy_css; } @@ -235,6 +246,7 @@ static int notify_on_release(const struct cgroup *cgrp) for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ if (!((css) = rcu_dereference_check( \ (cgrp)->subsys[(ssid)], \ + lockdep_is_held(&cgroup_tree_mutex) || \ lockdep_is_held(&cgroup_mutex)))) { } \ else @@ -883,7 +895,7 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) struct cfent *cfe; lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); - lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&cgroup_tree_mutex); /* * If we're doing cleanup due to failure of cgroup_create(), @@ -948,7 +960,8 @@ static int rebind_subsystems(struct cgroupfs_root *root, struct cgroup_subsys *ss; int i, ret; - BUG_ON(!mutex_is_locked(&cgroup_mutex)); + lockdep_assert_held(&cgroup_tree_mutex); + lockdep_assert_held(&cgroup_mutex); /* Check that any added subsystems are currently free */ for_each_subsys(ss, i) @@ -1220,6 +1233,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) } mutex_lock(&cgrp->dentry->d_inode->i_mutex); + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); /* See what subsystems are wanted */ @@ -1263,6 +1277,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) kfree(opts.release_agent); kfree(opts.name); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); mutex_unlock(&cgrp->dentry->d_inode->i_mutex); return ret; } @@ -1494,6 +1509,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, inode = sb->s_root->d_inode; mutex_lock(&inode->i_mutex); + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); @@ -1568,6 +1584,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, BUG_ON(root->number_of_cgroups != 1); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); mutex_unlock(&inode->i_mutex); } else { /* @@ -1598,6 +1615,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, unlock_drop: cgroup_exit_root_id(root); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); mutex_unlock(&inode->i_mutex); drop_new_super: deactivate_locked_super(sb); @@ -1620,6 +1638,7 @@ static void cgroup_kill_sb(struct super_block *sb) BUG_ON(!list_empty(&cgrp->children)); mutex_lock(&cgrp->dentry->d_inode->i_mutex); + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); /* Rebind all subsystems back to the default hierarchy */ @@ -1650,6 +1669,7 @@ static void cgroup_kill_sb(struct super_block *sb) cgroup_exit_root_id(root); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); mutex_unlock(&cgrp->dentry->d_inode->i_mutex); simple_xattrs_free(&cgrp->xattrs); @@ -2625,7 +2645,7 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], int ret; lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); - lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&cgroup_tree_mutex); for (cft = cfts; cft->name[0] != '\0'; cft++) { /* does cft->flags tell us to skip this file on @cgrp? */ @@ -2659,6 +2679,7 @@ static void cgroup_cfts_prepare(void) * Instead, we use css_for_each_descendant_pre() and drop RCU read * lock before calling cgroup_addrm_files(). */ + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); } @@ -2679,6 +2700,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) if (!cfts || ss->root == &cgroup_dummy_root || !atomic_inc_not_zero(&sb->s_active)) { mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); return 0; } @@ -2702,7 +2724,9 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) prev = cgrp->dentry; mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); mutex_lock(&inode->i_mutex); + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) ret = cgroup_addrm_files(cgrp, cfts, is_add); @@ -2711,6 +2735,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) break; } mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); dput(prev); deactivate_super(sb); return ret; @@ -2856,7 +2881,7 @@ css_next_child(struct cgroup_subsys_state *pos_css, struct cgroup *cgrp = parent_css->cgroup; struct cgroup *next; - cgroup_assert_mutex_or_rcu_locked(); + cgroup_assert_mutexes_or_rcu_locked(); /* * @pos could already have been removed. Once a cgroup is removed, @@ -2914,7 +2939,7 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos, { struct cgroup_subsys_state *next; - cgroup_assert_mutex_or_rcu_locked(); + cgroup_assert_mutexes_or_rcu_locked(); /* if first iteration, visit @root */ if (!pos) @@ -2955,7 +2980,7 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos) { struct cgroup_subsys_state *last, *tmp; - cgroup_assert_mutex_or_rcu_locked(); + cgroup_assert_mutexes_or_rcu_locked(); do { last = pos; @@ -3003,7 +3028,7 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, { struct cgroup_subsys_state *next; - cgroup_assert_mutex_or_rcu_locked(); + cgroup_assert_mutexes_or_rcu_locked(); /* if first iteration, visit leftmost descendant which may be @root */ if (!pos) @@ -3977,6 +4002,7 @@ static int online_css(struct cgroup_subsys_state *css) struct cgroup_subsys *ss = css->ss; int ret = 0; + lockdep_assert_held(&cgroup_tree_mutex); lockdep_assert_held(&cgroup_mutex); if (ss->css_online) @@ -3994,6 +4020,7 @@ static void offline_css(struct cgroup_subsys_state *css) { struct cgroup_subsys *ss = css->ss; + lockdep_assert_held(&cgroup_tree_mutex); lockdep_assert_held(&cgroup_mutex); if (!(css->flags & CSS_ONLINE)) @@ -4093,6 +4120,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, } rcu_assign_pointer(cgrp->name, name); + mutex_lock(&cgroup_tree_mutex); + /* * Only live parents can have children. Note that the liveliness * check isn't strictly necessary because cgroup_mkdir() and @@ -4102,7 +4131,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, */ if (!cgroup_lock_live_group(parent)) { err = -ENODEV; - goto err_free_name; + goto err_unlock_tree; } /* @@ -4176,6 +4205,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, } mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); mutex_unlock(&cgrp->dentry->d_inode->i_mutex); return 0; @@ -4186,7 +4216,8 @@ err_free_id: deactivate_super(sb); err_unlock: mutex_unlock(&cgroup_mutex); -err_free_name: +err_unlock_tree: + mutex_unlock(&cgroup_tree_mutex); kfree(rcu_dereference_raw(cgrp->name)); err_free_cgrp: kfree(cgrp); @@ -4195,6 +4226,7 @@ err_free_cgrp: err_destroy: cgroup_destroy_locked(cgrp); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); mutex_unlock(&dentry->d_inode->i_mutex); return err; } @@ -4217,6 +4249,7 @@ static void css_killed_work_fn(struct work_struct *work) container_of(work, struct cgroup_subsys_state, destroy_work); struct cgroup *cgrp = css->cgroup; + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); /* @@ -4234,6 +4267,7 @@ static void css_killed_work_fn(struct work_struct *work) cgroup_destroy_css_killed(cgrp); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); /* * Put the css refs from kill_css(). Each css holds an extra @@ -4321,6 +4355,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) int ssid; lockdep_assert_held(&d->d_inode->i_mutex); + lockdep_assert_held(&cgroup_tree_mutex); lockdep_assert_held(&cgroup_mutex); /* @@ -4407,6 +4442,7 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) struct cgroup *parent = cgrp->parent; struct dentry *d = cgrp->dentry; + lockdep_assert_held(&cgroup_tree_mutex); lockdep_assert_held(&cgroup_mutex); /* delete this cgroup from parent->children */ @@ -4422,9 +4458,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) { int ret; + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); ret = cgroup_destroy_locked(dentry->d_fsdata); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); return ret; } @@ -4454,6 +4492,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); /* init base cftset */ @@ -4482,6 +4521,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) BUG_ON(online_css(css)); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); } /** @@ -5021,7 +5061,7 @@ struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) { struct cgroup *cgrp; - cgroup_assert_mutex_or_rcu_locked(); + cgroup_assert_mutexes_or_rcu_locked(); cgrp = idr_find(&ss->root->cgroup_idr, id); if (cgrp) -- cgit v1.2.3 From 4ac0601744eb86e982fbdadde35f1945f7ce5882 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:47 -0500 Subject: cgroup: release cgroup_mutex over file removals Now that cftypes and all tree modification operations are protected by cgroup_tree_mutex, we can drop cgroup_mutex while deleting files and directories. Drop cgroup_mutex over removals. This doesn't make any noticeable difference now but is to help kernfs conversion. In kernfs, removals are sync points which drain in-flight operations as those operations would grab cgroup_mutex, trying to delete under cgroup_mutex would deadlock. This can be resolved by just holding the outer cgroup_tree_mutex which nests outside both kernfs active reference and cgroup_mutex. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index cb20d12cb096..d28cf75f33c1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -976,7 +976,9 @@ static int rebind_subsystems(struct cgroupfs_root *root, * Nothing can fail from this point on. Remove files for the * removed subsystems and rebind each subsystem. */ + mutex_unlock(&cgroup_mutex); cgroup_clear_dir(cgrp, removed_mask); + mutex_lock(&cgroup_mutex); for_each_subsys(ss, i) { unsigned long bit = 1UL << i; @@ -2696,10 +2698,11 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) u64 update_before; int ret = 0; + mutex_unlock(&cgroup_mutex); + /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ if (!cfts || ss->root == &cgroup_dummy_root || !atomic_inc_not_zero(&sb->s_active)) { - mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); return 0; } @@ -2723,18 +2726,15 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) dput(prev); prev = cgrp->dentry; - mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); mutex_lock(&inode->i_mutex); mutex_lock(&cgroup_tree_mutex); - mutex_lock(&cgroup_mutex); if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) ret = cgroup_addrm_files(cgrp, cfts, is_add); mutex_unlock(&inode->i_mutex); if (ret) break; } - mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); dput(prev); deactivate_super(sb); @@ -4387,10 +4387,13 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) /* * Initiate massacre of all css's. cgroup_destroy_css_killed() * will be invoked to perform the rest of destruction once the - * percpu refs of all css's are confirmed to be killed. + * percpu refs of all css's are confirmed to be killed. This + * involves removing the subsystem's files, drop cgroup_mutex. */ + mutex_unlock(&cgroup_mutex); for_each_css(css, ssid, cgrp) kill_css(css); + mutex_lock(&cgroup_mutex); /* * Mark @cgrp dead. This prevents further task migration and child @@ -4421,9 +4424,11 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) * puts the base ref but we aren't quite done with @cgrp yet, so * hold onto it. */ + mutex_unlock(&cgroup_mutex); cgroup_addrm_files(cgrp, cgroup_base_files, false); dget(d); cgroup_d_remove_dir(d); + mutex_lock(&cgroup_mutex); return 0; }; -- cgit v1.2.3 From 8e30e2b8ba0ee58aa0f442d0b4a3cac1a4f2efb5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:48 -0500 Subject: cgroup: restructure locking and error handling in cgroup_mount() cgroup is scheduled to be converted to kernfs. After conversion, cgroup_mount() won't use the sget() machinery for finding out existing super_blocks but instead would do that directly. It'll search the existing cgroupfs_roots for a matching one and create a new one iff a match doesn't exist. To ease such conversion, this patch restructures locking and error handling of the function. cgroup_tree_mutex and cgroup_mutex are grabbed from the get-go and held until return. For now, due to the way vfs locks nest outside cgroup mutexes, the two cgroup mutexes are temporarily dropped across sget() and inode mutex locking, which looks quite ridiculous; however, these will be removed through kernfs conversion and structuring the code this way makes the conversion less painful. The error goto labels are consolidated to two. This looks unwieldy now but the next patch will factor out creation of new root into a separate function with accompanying error handling and it'll look a lot better. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 73 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d28cf75f33c1..083b53d79d6f 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1459,21 +1459,22 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data) { + LIST_HEAD(tmp_links); + struct super_block *sb = NULL; + struct inode *inode = NULL; + struct cgroupfs_root *root = NULL; struct cgroup_sb_opts opts; - struct cgroupfs_root *root; - int ret = 0; - struct super_block *sb; struct cgroupfs_root *new_root; - struct list_head tmp_links; - struct inode *inode; const struct cred *cred; + int ret; - /* First find the desired set of subsystems */ + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); + + /* First find the desired set of subsystems */ ret = parse_cgroupfs_options(data, &opts); - mutex_unlock(&cgroup_mutex); if (ret) - goto out_err; + goto out_unlock; /* * Allocate a new cgroup root. We may not need it if we're @@ -1482,16 +1483,20 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, new_root = cgroup_root_from_opts(&opts); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); - goto out_err; + goto out_unlock; } opts.new_root = new_root; /* Locate an existing or new sb for this hierarchy */ + mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts); + mutex_lock(&cgroup_tree_mutex); + mutex_lock(&cgroup_mutex); if (IS_ERR(sb)) { ret = PTR_ERR(sb); cgroup_free_root(opts.new_root); - goto out_err; + goto out_unlock; } root = sb->s_fs_info; @@ -1505,9 +1510,12 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, BUG_ON(sb->s_root != NULL); + mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); + ret = cgroup_get_rootdir(sb); if (ret) - goto drop_new_super; + goto out_unlock; inode = sb->s_root->d_inode; mutex_lock(&inode->i_mutex); @@ -1516,7 +1524,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); if (ret < 0) - goto unlock_drop; + goto out_unlock; root_cgrp->id = ret; /* Check for name clashes with existing mounts */ @@ -1524,7 +1532,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (strlen(root->name)) for_each_active_root(existing_root) if (!strcmp(existing_root->name, root->name)) - goto unlock_drop; + goto out_unlock; /* * We're accessing css_set_count without locking @@ -1535,12 +1543,12 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, */ ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); if (ret) - goto unlock_drop; + goto out_unlock; /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ ret = cgroup_init_root_id(root, 2, 0); if (ret) - goto unlock_drop; + goto out_unlock; sb->s_root->d_fsdata = root_cgrp; root_cgrp->dentry = sb->s_root; @@ -1580,14 +1588,8 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, link_css_set(&tmp_links, cset, root_cgrp); write_unlock(&css_set_lock); - free_cgrp_cset_links(&tmp_links); - BUG_ON(!list_empty(&root_cgrp->children)); BUG_ON(root->number_of_cgroups != 1); - - mutex_unlock(&cgroup_mutex); - mutex_unlock(&cgroup_tree_mutex); - mutex_unlock(&inode->i_mutex); } else { /* * We re-used an existing hierarchy - the new root (if @@ -1599,32 +1601,37 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); ret = -EINVAL; - goto drop_new_super; + goto out_unlock; } else { pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n"); } } } - kfree(opts.release_agent); - kfree(opts.name); - return dget(sb->s_root); + ret = 0; + goto out_unlock; - rm_base_files: - free_cgrp_cset_links(&tmp_links); +rm_base_files: cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false); revert_creds(cred); - unlock_drop: cgroup_exit_root_id(root); +out_unlock: mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - mutex_unlock(&inode->i_mutex); - drop_new_super: - deactivate_locked_super(sb); - out_err: + if (inode) + mutex_unlock(&inode->i_mutex); + + if (ret && !IS_ERR_OR_NULL(sb)) + deactivate_locked_super(sb); + + free_cgrp_cset_links(&tmp_links); kfree(opts.release_agent); kfree(opts.name); - return ERR_PTR(ret); + + if (!ret) + return dget(sb->s_root); + else + return ERR_PTR(ret); } static void cgroup_kill_sb(struct super_block *sb) -- cgit v1.2.3 From d427dfeb120b92c0c5e2ca9d1ec6952de67ebad9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:48 -0500 Subject: cgroup: factor out cgroup_setup_root() from cgroup_mount() Factor out new root initialization into cgroup_setup_root() from cgroup_mount(). This makes it easier to follow and will ease kernfs conversion. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 211 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 113 insertions(+), 98 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 083b53d79d6f..0a178cd1f836 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1455,17 +1455,126 @@ static int cgroup_get_rootdir(struct super_block *sb) return 0; } +static int cgroup_setup_root(struct cgroupfs_root *root) +{ + LIST_HEAD(tmp_links); + struct super_block *sb = root->sb; + struct cgroup *root_cgrp = &root->top_cgroup; + struct cgroupfs_root *existing_root; + struct css_set *cset; + struct inode *inode; + const struct cred *cred; + int i, ret; + + lockdep_assert_held(&cgroup_tree_mutex); + lockdep_assert_held(&cgroup_mutex); + BUG_ON(sb->s_root != NULL); + + mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); + + ret = cgroup_get_rootdir(sb); + if (ret) { + mutex_lock(&cgroup_tree_mutex); + mutex_lock(&cgroup_mutex); + return ret; + } + inode = sb->s_root->d_inode; + + mutex_lock(&inode->i_mutex); + mutex_lock(&cgroup_tree_mutex); + mutex_lock(&cgroup_mutex); + + ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); + if (ret < 0) + goto out_unlock; + root_cgrp->id = ret; + + /* check for name clashes with existing mounts */ + ret = -EBUSY; + if (strlen(root->name)) + for_each_active_root(existing_root) + if (!strcmp(existing_root->name, root->name)) + goto out_unlock; + + /* + * We're accessing css_set_count without locking css_set_lock here, + * but that's OK - it can only be increased by someone holding + * cgroup_lock, and that's us. The worst that can happen is that we + * have some link structures left over + */ + ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); + if (ret) + goto out_unlock; + + /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ + ret = cgroup_init_root_id(root, 2, 0); + if (ret) + goto out_unlock; + + sb->s_root->d_fsdata = root_cgrp; + root_cgrp->dentry = sb->s_root; + + /* + * We're inside get_sb() and will call lookup_one_len() to create + * the root files, which doesn't work if SELinux is in use. The + * following cred dancing somehow works around it. See 2ce9738ba + * ("cgroupfs: use init_cred when populating new cgroupfs mount") + * for more details. + */ + cred = override_creds(&init_cred); + + ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true); + if (ret) + goto rm_base_files; + + ret = rebind_subsystems(root, root->subsys_mask, 0); + if (ret) + goto rm_base_files; + + revert_creds(cred); + + /* + * There must be no failure case after here, since rebinding takes + * care of subsystems' refcounts, which are explicitly dropped in + * the failure exit path. + */ + list_add(&root->root_list, &cgroup_roots); + cgroup_root_count++; + + /* + * Link the top cgroup in this hierarchy into all the css_set + * objects. + */ + write_lock(&css_set_lock); + hash_for_each(css_set_table, i, cset, hlist) + link_css_set(&tmp_links, cset, root_cgrp); + write_unlock(&css_set_lock); + + BUG_ON(!list_empty(&root_cgrp->children)); + BUG_ON(root->number_of_cgroups != 1); + + ret = 0; + goto out_unlock; + +rm_base_files: + cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false); + revert_creds(cred); + cgroup_exit_root_id(root); +out_unlock: + mutex_unlock(&inode->i_mutex); + free_cgrp_cset_links(&tmp_links); + return ret; +} + static struct dentry *cgroup_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data) { - LIST_HEAD(tmp_links); struct super_block *sb = NULL; - struct inode *inode = NULL; struct cgroupfs_root *root = NULL; struct cgroup_sb_opts opts; struct cgroupfs_root *new_root; - const struct cred *cred; int ret; mutex_lock(&cgroup_tree_mutex); @@ -1502,94 +1611,9 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, root = sb->s_fs_info; BUG_ON(!root); if (root == opts.new_root) { - /* We used the new root structure, so this is a new hierarchy */ - struct cgroup *root_cgrp = &root->top_cgroup; - struct cgroupfs_root *existing_root; - int i; - struct css_set *cset; - - BUG_ON(sb->s_root != NULL); - - mutex_unlock(&cgroup_mutex); - mutex_unlock(&cgroup_tree_mutex); - - ret = cgroup_get_rootdir(sb); + ret = cgroup_setup_root(root); if (ret) goto out_unlock; - inode = sb->s_root->d_inode; - - mutex_lock(&inode->i_mutex); - mutex_lock(&cgroup_tree_mutex); - mutex_lock(&cgroup_mutex); - - ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); - if (ret < 0) - goto out_unlock; - root_cgrp->id = ret; - - /* Check for name clashes with existing mounts */ - ret = -EBUSY; - if (strlen(root->name)) - for_each_active_root(existing_root) - if (!strcmp(existing_root->name, root->name)) - goto out_unlock; - - /* - * We're accessing css_set_count without locking - * css_set_lock here, but that's OK - it can only be - * increased by someone holding cgroup_lock, and - * that's us. The worst that can happen is that we - * have some link structures left over - */ - ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); - if (ret) - goto out_unlock; - - /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ - ret = cgroup_init_root_id(root, 2, 0); - if (ret) - goto out_unlock; - - sb->s_root->d_fsdata = root_cgrp; - root_cgrp->dentry = sb->s_root; - - /* - * We're inside get_sb() and will call lookup_one_len() to - * create the root files, which doesn't work if SELinux is - * in use. The following cred dancing somehow works around - * it. See 2ce9738ba ("cgroupfs: use init_cred when - * populating new cgroupfs mount") for more details. - */ - cred = override_creds(&init_cred); - - ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true); - if (ret) - goto rm_base_files; - - ret = rebind_subsystems(root, root->subsys_mask, 0); - if (ret) - goto rm_base_files; - - revert_creds(cred); - - /* - * There must be no failure case after here, since rebinding - * takes care of subsystems' refcounts, which are explicitly - * dropped in the failure exit path. - */ - - list_add(&root->root_list, &cgroup_roots); - cgroup_root_count++; - - /* Link the top cgroup in this hierarchy into all - * the css_set objects */ - write_lock(&css_set_lock); - hash_for_each(css_set_table, i, cset, hlist) - link_css_set(&tmp_links, cset, root_cgrp); - write_unlock(&css_set_lock); - - BUG_ON(!list_empty(&root_cgrp->children)); - BUG_ON(root->number_of_cgroups != 1); } else { /* * We re-used an existing hierarchy - the new root (if @@ -1609,22 +1633,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, } ret = 0; - goto out_unlock; - -rm_base_files: - cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false); - revert_creds(cred); - cgroup_exit_root_id(root); out_unlock: mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - if (inode) - mutex_unlock(&inode->i_mutex); if (ret && !IS_ERR_OR_NULL(sb)) deactivate_locked_super(sb); - free_cgrp_cset_links(&tmp_links); kfree(opts.release_agent); kfree(opts.name); -- cgit v1.2.3 From 8d7e6fb0a1db970ac3589f87af0f2a20ef46654b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:48 -0500 Subject: cgroup: update cgroup name handling Straightforward updates to cgroup name handling in preparation of kernfs conversion. * cgroup_alloc_name() is updated to take const char * isntead of dentry * for name source. * cgroup name formatting is separated out into cgroup_file_name(). While at it, buffer length protection is added. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0a178cd1f836..3f204429d108 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -67,6 +67,9 @@ */ #define CGROUP_PIDLIST_DESTROY_DELAY HZ +#define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ + MAX_CFTYPE_NAME + 2) + /* * cgroup_tree_mutex nests above cgroup_mutex and protects cftypes, file * creation/removal and hierarchy changing operations including cgroup @@ -799,17 +802,29 @@ static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb) return inode; } -static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry) +static struct cgroup_name *cgroup_alloc_name(const char *name_str) { struct cgroup_name *name; - name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL); + name = kmalloc(sizeof(*name) + strlen(name_str) + 1, GFP_KERNEL); if (!name) return NULL; - strcpy(name->name, dentry->d_name.name); + strcpy(name->name, name_str); return name; } +static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, + char *buf) +{ + if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && + !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) + snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s", + cft->ss->name, cft->name); + else + strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX); + return buf; +} + static void cgroup_free_fn(struct work_struct *work) { struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); @@ -2437,7 +2452,7 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, if (cgroup_sane_behavior(cgrp)) return -EPERM; - name = cgroup_alloc_name(new_dentry); + name = cgroup_alloc_name(new_dentry->d_name.name); if (!name) return -ENOMEM; @@ -2613,14 +2628,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) struct cfent *cfe; int error; umode_t mode; - char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; - - if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && - !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) { - strcpy(name, cft->ss->name); - strcat(name, "."); - } - strcat(name, cft->name); + char name[CGROUP_FILE_NAME_MAX]; BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); @@ -2628,6 +2636,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) if (!cfe) return -ENOMEM; + cgroup_file_name(cgrp, cft, name); dentry = lookup_one_len(name, dir, strlen(name)); if (IS_ERR(dentry)) { error = PTR_ERR(dentry); @@ -4135,7 +4144,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, if (!cgrp) return -ENOMEM; - name = cgroup_alloc_name(dentry); + name = cgroup_alloc_name(dentry->d_name.name); if (!name) { err = -ENOMEM; goto err_free_cgrp; -- cgit v1.2.3 From de00ffa56ea3132c6013fc8f07133b8a1014cf53 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:48 -0500 Subject: cgroup: make cgroup_subsys->base_cftypes use cgroup_add_cftypes() Currently, cgroup_subsys->base_cftypes registration is different from dynamic cftypes registartion. Instead of going through cgroup_add_cftypes(), cgroup_init_subsys() invokes cgroup_init_cftsets() which makes use of cgroup_subsys->base_cftset which doesn't involve dynamic allocation. While avoiding dynamic allocation is somewhat nice, having two separate paths for cftypes registration is nasty, especially as we're planning to add more operations during cftypes registration. This patch drops cgroup_init_cftsets() and cgroup_subsys->base_cftset and registers base_cftypes using cgroup_add_cftypes(). This is done as a separate step in cgroup_init() instead of a part of cgroup_init_subsys(). This is because cgroup_init_subsys() can be called very early during boot when kmalloc() isn't available yet. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 29 ++++++++--------------------- 1 file changed, 8 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3f204429d108..eb002c622cd6 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4503,25 +4503,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) return ret; } -static void __init cgroup_init_cftsets(struct cgroup_subsys *ss) -{ - INIT_LIST_HEAD(&ss->cftsets); - - /* - * base_cftset is embedded in subsys itself, no need to worry about - * deregistration. - */ - if (ss->base_cftypes) { - struct cftype *cft; - - for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++) - cft->ss = ss; - - ss->base_cftset.cfts = ss->base_cftypes; - list_add_tail(&ss->base_cftset.node, &ss->cftsets); - } -} - static void __init cgroup_init_subsys(struct cgroup_subsys *ss) { struct cgroup_subsys_state *css; @@ -4531,8 +4512,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); - /* init base cftset */ - cgroup_init_cftsets(ss); + INIT_LIST_HEAD(&ss->cftsets); /* Create the top cgroup state for this subsystem */ ss->root = &cgroup_dummy_root; @@ -4621,6 +4601,13 @@ int __init cgroup_init(void) for_each_subsys(ss, i) { if (!ss->early_init) cgroup_init_subsys(ss); + + /* + * cftype registration needs kmalloc and can't be done + * during early_init. Register base cftypes separately. + */ + if (ss->base_cftypes) + WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes)); } /* allocate id for the dummy hierarchy */ -- cgit v1.2.3 From 5f46990787e2721b4db190ddc8af6fdbe8f010d7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:48 -0500 Subject: cgroup: update the meaning of cftype->max_write_len cftype->max_write_len is used to extend the maximum size of writes. It's interpreted in such a way that the actual maximum size is one less than the specified value. The default size is defined by CGROUP_LOCAL_BUFFER_SIZE. Its interpretation is quite confusing - its value is decremented by 1 and then compared for equality with max size, which means that the actual default size is CGROUP_LOCAL_BUFFER_SIZE - 2, which is 62 chars. There's no point in having a limit that low. Update its definition so that it means the actual string length sans termination and anything below PAGE_SIZE-1 is treated as PAGE_SIZE-1. .max_write_len for "release_agent" is updated to PATH_MAX-1 and cgroup_release_agent_write() is updated so that the redundant strlen() check is removed and it uses strlcpy() instead of strcpy(). .max_write_len initializations in blk-throttle.c and cfq-iosched.c are no longer necessary and removed. The one in cpuset is kept unchanged as it's an approximated value to begin with. This will also make transition to kernfs smoother. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index eb002c622cd6..fde3633ef389 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2213,13 +2213,14 @@ static int cgroup_procs_write(struct cgroup_subsys_state *css, static int cgroup_release_agent_write(struct cgroup_subsys_state *css, struct cftype *cft, const char *buffer) { - BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX); - if (strlen(buffer) >= PATH_MAX) - return -EINVAL; + struct cgroupfs_root *root = css->cgroup->root; + + BUILD_BUG_ON(sizeof(root->release_agent_path) < PATH_MAX); if (!cgroup_lock_live_group(css->cgroup)) return -ENODEV; spin_lock(&release_agent_path_lock); - strcpy(css->cgroup->root->release_agent_path, buffer); + strlcpy(root->release_agent_path, buffer, + sizeof(root->release_agent_path)); spin_unlock(&release_agent_path_lock); mutex_unlock(&cgroup_mutex); return 0; @@ -2245,20 +2246,17 @@ static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) return 0; } -/* A buffer size big enough for numbers or short strings */ -#define CGROUP_LOCAL_BUFFER_SIZE 64 - static ssize_t cgroup_file_write(struct file *file, const char __user *userbuf, size_t nbytes, loff_t *ppos) { struct cfent *cfe = __d_cfe(file->f_dentry); struct cftype *cft = __d_cft(file->f_dentry); struct cgroup_subsys_state *css = cfe->css; - size_t max_bytes = cft->max_write_len ?: CGROUP_LOCAL_BUFFER_SIZE - 1; + size_t max_bytes = max(cft->max_write_len, PAGE_SIZE); char *buf; int ret; - if (nbytes >= max_bytes) + if (nbytes > max_bytes) return -E2BIG; buf = kmalloc(nbytes + 1, GFP_KERNEL); @@ -3919,7 +3917,7 @@ static struct cftype cgroup_base_files[] = { .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT, .seq_show = cgroup_release_agent_show, .write_string = cgroup_release_agent_write, - .max_write_len = PATH_MAX, + .max_write_len = PATH_MAX - 1, }, { } /* terminate */ }; -- cgit v1.2.3 From 2da440a26ce4743bd3e71ba964ba3f983d09bba5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:48 -0500 Subject: cgroup: introduce cgroup_init/exit_cftypes() Factor out cft->ss initialization into cgroup_init_cftypes() from cgroup_add_cftypes() and add cft->ss clearing to cgroup_rm_cftypes() through cgroup_exit_cftypes(). This doesn't make any meaningful difference now but the two new functions will be expanded during kernfs transition. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fde3633ef389..42e588ef62d1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2770,6 +2770,22 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) return ret; } +static void cgroup_exit_cftypes(struct cftype *cfts) +{ + struct cftype *cft; + + for (cft = cfts; cft->name[0] != '\0'; cft++) + cft->ss = NULL; +} + +static void cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) +{ + struct cftype *cft; + + for (cft = cfts; cft->name[0] != '\0'; cft++) + cft->ss = ss; +} + /** * cgroup_add_cftypes - add an array of cftypes to a subsystem * @ss: target cgroup subsystem @@ -2787,15 +2803,13 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) { struct cftype_set *set; - struct cftype *cft; int ret; set = kzalloc(sizeof(*set), GFP_KERNEL); if (!set) return -ENOMEM; - for (cft = cfts; cft->name[0] != '\0'; cft++) - cft->ss = ss; + cgroup_init_cftypes(ss, cfts); cgroup_cfts_prepare(); set->cfts = cfts; @@ -2820,6 +2834,7 @@ EXPORT_SYMBOL_GPL(cgroup_add_cftypes); */ int cgroup_rm_cftypes(struct cftype *cfts) { + struct cftype *found = NULL; struct cftype_set *set; if (!cfts || !cfts[0].ss) @@ -2831,13 +2846,14 @@ int cgroup_rm_cftypes(struct cftype *cfts) if (set->cfts == cfts) { list_del(&set->node); kfree(set); - cgroup_cfts_commit(cfts, false); - return 0; + found = cfts; + break; } } - cgroup_cfts_commit(NULL, false); - return -ENOENT; + cgroup_cfts_commit(found, false); + cgroup_exit_cftypes(cfts); + return found ? 0 : -ENOENT; } /** @@ -4596,6 +4612,8 @@ int __init cgroup_init(void) if (err) return err; + cgroup_init_cftypes(NULL, cgroup_base_files); + for_each_subsys(ss, i) { if (!ss->early_init) cgroup_init_subsys(ss); -- cgit v1.2.3 From b1664924062393bb048203bd4622e0b1c9e1d328 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:49 -0500 Subject: cgroup: introduce cgroup_ino() mm/memory-failure.c::hwpoison_filter_task() has been reaching into cgroup to extract the associated ino to be used as a filtering criterion. This is an implementation detail which shouldn't be depended upon from outside cgroup proper and is about to change with the scheduled kernfs conversion. This patch introduces a proper interface to determine the associated ino, cgroup_ino(), and updates hwpoison_filter_task() to use it instead of reaching directly into cgroup. Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: Andi Kleen Cc: Wu Fengguang --- kernel/cgroup.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 42e588ef62d1..11f7a05e791e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -792,7 +792,10 @@ static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb) struct inode *inode = new_inode(sb); if (inode) { - inode->i_ino = get_next_ino(); + do { + /* ino 0 is reserved for dummy_root */ + inode->i_ino = get_next_ino(); + } while (!inode->i_ino); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); -- cgit v1.2.3 From 59f5296b51b86718dd6eecf0a268b2f1a1ec0a2d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:49 -0500 Subject: cgroup: misc preps for kernfs conversion * Un-inline seq_css(). After kernfs conversion, the function will need to dereference internal data structures. * Add cgroup_get/put_root() and replace direct super_block->s_active manipulatinos with them. These will be converted to kernfs_root refcnting. * Add cgroup_get/put() and replace dget/put() on cgrp->dentry with them. These will be converted to kernfs refcnting. * Update current_css_set_cg_links_read() to use cgroup_name() instead of reaching into the dentry name. The end result is the same. These changes don't make functional differences but will make transition to kernfs easier. v2: Rebased on top of 0ab02ca8f887 ("cgroup: protect modifications to cgroup_idr with cgroup_mutex"). Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 85 +++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 53 insertions(+), 32 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 11f7a05e791e..9e9e8fd632d8 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -169,6 +169,7 @@ static int need_forkexit_callback __read_mostly; static struct cftype cgroup_base_files[]; +static void cgroup_put(struct cgroup *cgrp); static void cgroup_destroy_css_killed(struct cgroup *cgrp); static int cgroup_destroy_locked(struct cgroup *cgrp); static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], @@ -204,6 +205,13 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp) return test_bit(CGRP_DEAD, &cgrp->flags); } +struct cgroup_subsys_state *seq_css(struct seq_file *seq) +{ + struct cgroup_open_file *of = seq->private; + return of->cfe->css; +} +EXPORT_SYMBOL_GPL(seq_css); + /** * cgroup_is_descendant - test ancestry * @cgrp: the cgroup to be tested @@ -682,6 +690,16 @@ static struct css_set *find_css_set(struct css_set *old_cset, return cset; } +static void cgroup_get_root(struct cgroupfs_root *root) +{ + atomic_inc(&root->sb->s_active); +} + +static void cgroup_put_root(struct cgroupfs_root *root) +{ + deactivate_super(root->sb); +} + /* * Return the cgroup for "task" from the given hierarchy. Must be * called with cgroup_mutex held. @@ -837,18 +855,14 @@ static void cgroup_free_fn(struct work_struct *work) mutex_unlock(&cgroup_mutex); /* - * We get a ref to the parent's dentry, and put the ref when - * this cgroup is being freed, so it's guaranteed that the - * parent won't be destroyed before its children. + * We get a ref to the parent, and put the ref when this cgroup is + * being freed, so it's guaranteed that the parent won't be + * destroyed before its children. */ - dput(cgrp->parent->dentry); + cgroup_put(cgrp->parent); - /* - * Drop the active superblock reference that we took when we - * created the cgroup. This will free cgrp->root, if we are - * holding the last reference to @sb. - */ - deactivate_super(cgrp->root->sb); + /* put the root reference that we took when we created the cgroup */ + cgroup_put_root(cgrp->root); cgroup_pidlist_destroy_all(cgrp); @@ -866,6 +880,11 @@ static void cgroup_free_rcu(struct rcu_head *head) queue_work(cgroup_destroy_wq, &cgrp->destroy_work); } +static void cgroup_get(struct cgroup *cgrp) +{ + dget(cgrp->dentry); +} + static void cgroup_diput(struct dentry *dentry, struct inode *inode) { /* is dentry a directory ? if so, kfree() associated cgroup */ @@ -899,6 +918,11 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) iput(inode); } +static void cgroup_put(struct cgroup *cgrp) +{ + dput(cgrp->dentry); +} + static void remove_dir(struct dentry *d) { struct dentry *parent = dget(d->d_parent); @@ -2724,7 +2748,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) struct cgroup_subsys *ss = cfts[0].ss; struct cgroup *root = &ss->root->top_cgroup; struct super_block *sb = ss->root->sb; - struct dentry *prev = NULL; + struct cgroup *prev = NULL; struct inode *inode; struct cgroup_subsys_state *css; u64 update_before; @@ -2754,9 +2778,10 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) continue; inode = cgrp->dentry->d_inode; - dget(cgrp->dentry); - dput(prev); - prev = cgrp->dentry; + cgroup_get(cgrp); + if (prev) + cgroup_put(prev); + prev = cgrp; mutex_unlock(&cgroup_tree_mutex); mutex_lock(&inode->i_mutex); @@ -2768,8 +2793,8 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) break; } mutex_unlock(&cgroup_tree_mutex); - dput(prev); - deactivate_super(sb); + cgroup_put(prev); + cgroup_put_root(ss->root); return ret; } @@ -3863,11 +3888,9 @@ static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, */ static void cgroup_dput(struct cgroup *cgrp) { - struct super_block *sb = cgrp->root->sb; - - atomic_inc(&sb->s_active); - dput(cgrp->dentry); - deactivate_super(sb); + cgroup_get_root(cgrp->root); + cgroup_put(cgrp); + cgroup_put_root(cgrp->root); } static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, @@ -4118,7 +4141,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) if (err) goto err_free; - dget(cgrp->dentry); + cgroup_get(cgrp); css_get(css->parent); if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && @@ -4197,7 +4220,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, * can be done outside cgroup_mutex, since the sb can't * disappear while someone has an open control file on the * fs */ - atomic_inc(&sb->s_active); + cgroup_get_root(root); init_cgroup_housekeeping(cgrp); @@ -4231,7 +4254,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, root->number_of_cgroups++; /* hold a ref to the parent's dentry */ - dget(parent->dentry); + cgroup_get(parent); /* * @cgrp is now fully operational. If something fails after this @@ -4261,7 +4284,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, err_free_id: idr_remove(&root->cgroup_idr, cgrp->id); /* Release the reference count that we took on the superblock */ - deactivate_super(sb); + cgroup_put_root(root); err_unlock: mutex_unlock(&cgroup_mutex); err_unlock_tree: @@ -4493,7 +4516,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) static void cgroup_destroy_css_killed(struct cgroup *cgrp) { struct cgroup *parent = cgrp->parent; - struct dentry *d = cgrp->dentry; lockdep_assert_held(&cgroup_tree_mutex); lockdep_assert_held(&cgroup_mutex); @@ -4501,7 +4523,7 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) /* delete this cgroup from parent->children */ list_del_rcu(&cgrp->sibling); - dput(d); + cgroup_put(cgrp); set_bit(CGRP_RELEASABLE, &parent->flags); check_for_release(parent); @@ -5161,12 +5183,11 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) cset = rcu_dereference(current->cgroups); list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; - const char *name; + const char *name = "?"; + + if (c != cgroup_dummy_top) + name = cgroup_name(c); - if (c->dentry) - name = c->dentry->d_name.name; - else - name = "?"; seq_printf(seq, "Root %d group %s\n", c->root->hierarchy_id, name); } -- cgit v1.2.3 From f2e85d574e881ff3c597518c1ab48c86f9109880 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:49 -0500 Subject: cgroup: relocate functions in preparation of kernfs conversion Relocate cgroup_init/exit_root_id(), cgroup_free_root(), cgroup_kill_sb() and cgroup_file_name() in preparation of kernfs conversion. These are pure relocations to make kernfs conversion easier to follow. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 232 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 117 insertions(+), 115 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 9e9e8fd632d8..d8efca44de5f 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -170,6 +170,8 @@ static int need_forkexit_callback __read_mostly; static struct cftype cgroup_base_files[]; static void cgroup_put(struct cgroup *cgrp); +static int rebind_subsystems(struct cgroupfs_root *root, + unsigned long added_mask, unsigned removed_mask); static void cgroup_destroy_css_killed(struct cgroup *cgrp); static int cgroup_destroy_locked(struct cgroup *cgrp); static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], @@ -690,6 +692,42 @@ static struct css_set *find_css_set(struct css_set *old_cset, return cset; } +static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) +{ + int id; + + lockdep_assert_held(&cgroup_mutex); + + id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end, + GFP_KERNEL); + if (id < 0) + return id; + + root->hierarchy_id = id; + return 0; +} + +static void cgroup_exit_root_id(struct cgroupfs_root *root) +{ + lockdep_assert_held(&cgroup_mutex); + + if (root->hierarchy_id) { + idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); + root->hierarchy_id = 0; + } +} + +static void cgroup_free_root(struct cgroupfs_root *root) +{ + if (root) { + /* hierarhcy ID shoulid already have been released */ + WARN_ON_ONCE(root->hierarchy_id); + + idr_destroy(&root->cgroup_idr); + kfree(root); + } +} + static void cgroup_get_root(struct cgroupfs_root *root) { atomic_inc(&root->sb->s_active); @@ -700,6 +738,59 @@ static void cgroup_put_root(struct cgroupfs_root *root) deactivate_super(root->sb); } +static void cgroup_kill_sb(struct super_block *sb) +{ + struct cgroupfs_root *root = sb->s_fs_info; + struct cgroup *cgrp = &root->top_cgroup; + struct cgrp_cset_link *link, *tmp_link; + int ret; + + BUG_ON(!root); + + BUG_ON(root->number_of_cgroups != 1); + BUG_ON(!list_empty(&cgrp->children)); + + mutex_lock(&cgrp->dentry->d_inode->i_mutex); + mutex_lock(&cgroup_tree_mutex); + mutex_lock(&cgroup_mutex); + + /* Rebind all subsystems back to the default hierarchy */ + if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { + ret = rebind_subsystems(root, 0, root->subsys_mask); + /* Shouldn't be able to fail ... */ + BUG_ON(ret); + } + + /* + * Release all the links from cset_links to this hierarchy's + * root cgroup + */ + write_lock(&css_set_lock); + + list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { + list_del(&link->cset_link); + list_del(&link->cgrp_link); + kfree(link); + } + write_unlock(&css_set_lock); + + if (!list_empty(&root->root_list)) { + list_del(&root->root_list); + cgroup_root_count--; + } + + cgroup_exit_root_id(root); + + mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); + mutex_unlock(&cgrp->dentry->d_inode->i_mutex); + + simple_xattrs_free(&cgrp->xattrs); + + kill_litter_super(sb); + cgroup_free_root(root); +} + /* * Return the cgroup for "task" from the given hierarchy. Must be * called with cgroup_mutex held. @@ -846,6 +937,32 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, return buf; } +/** + * cgroup_file_mode - deduce file mode of a control file + * @cft: the control file in question + * + * returns cft->mode if ->mode is not 0 + * returns S_IRUGO|S_IWUSR if it has both a read and a write handler + * returns S_IRUGO if it has only a read handler + * returns S_IWUSR if it has only a write hander + */ +static umode_t cgroup_file_mode(const struct cftype *cft) +{ + umode_t mode = 0; + + if (cft->mode) + return cft->mode; + + if (cft->read_u64 || cft->read_s64 || cft->seq_show) + mode |= S_IRUGO; + + if (cft->write_u64 || cft->write_s64 || cft->write_string || + cft->trigger) + mode |= S_IWUSR; + + return mode; +} + static void cgroup_free_fn(struct work_struct *work) { struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); @@ -1358,31 +1475,6 @@ static void init_cgroup_root(struct cgroupfs_root *root) idr_init(&root->cgroup_idr); } -static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) -{ - int id; - - lockdep_assert_held(&cgroup_mutex); - - id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end, - GFP_KERNEL); - if (id < 0) - return id; - - root->hierarchy_id = id; - return 0; -} - -static void cgroup_exit_root_id(struct cgroupfs_root *root) -{ - lockdep_assert_held(&cgroup_mutex); - - if (root->hierarchy_id) { - idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); - root->hierarchy_id = 0; - } -} - static int cgroup_test_super(struct super_block *sb, void *data) { struct cgroup_sb_opts *opts = data; @@ -1435,17 +1527,6 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) return root; } -static void cgroup_free_root(struct cgroupfs_root *root) -{ - if (root) { - /* hierarhcy ID shoulid already have been released */ - WARN_ON_ONCE(root->hierarchy_id); - - idr_destroy(&root->cgroup_idr); - kfree(root); - } -} - static int cgroup_set_super(struct super_block *sb, void *data) { int ret; @@ -1691,59 +1772,6 @@ out_unlock: return ERR_PTR(ret); } -static void cgroup_kill_sb(struct super_block *sb) -{ - struct cgroupfs_root *root = sb->s_fs_info; - struct cgroup *cgrp = &root->top_cgroup; - struct cgrp_cset_link *link, *tmp_link; - int ret; - - BUG_ON(!root); - - BUG_ON(root->number_of_cgroups != 1); - BUG_ON(!list_empty(&cgrp->children)); - - mutex_lock(&cgrp->dentry->d_inode->i_mutex); - mutex_lock(&cgroup_tree_mutex); - mutex_lock(&cgroup_mutex); - - /* Rebind all subsystems back to the default hierarchy */ - if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { - ret = rebind_subsystems(root, 0, root->subsys_mask); - /* Shouldn't be able to fail ... */ - BUG_ON(ret); - } - - /* - * Release all the links from cset_links to this hierarchy's - * root cgroup - */ - write_lock(&css_set_lock); - - list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { - list_del(&link->cset_link); - list_del(&link->cgrp_link); - kfree(link); - } - write_unlock(&css_set_lock); - - if (!list_empty(&root->root_list)) { - list_del(&root->root_list); - cgroup_root_count--; - } - - cgroup_exit_root_id(root); - - mutex_unlock(&cgroup_mutex); - mutex_unlock(&cgroup_tree_mutex); - mutex_unlock(&cgrp->dentry->d_inode->i_mutex); - - simple_xattrs_free(&cgrp->xattrs); - - kill_litter_super(sb); - cgroup_free_root(root); -} - static struct file_system_type cgroup_fs_type = { .name = "cgroup", .mount = cgroup_mount, @@ -2619,32 +2647,6 @@ static int cgroup_create_file(struct dentry *dentry, umode_t mode, return 0; } -/** - * cgroup_file_mode - deduce file mode of a control file - * @cft: the control file in question - * - * returns cft->mode if ->mode is not 0 - * returns S_IRUGO|S_IWUSR if it has both a read and a write handler - * returns S_IRUGO if it has only a read handler - * returns S_IWUSR if it has only a write hander - */ -static umode_t cgroup_file_mode(const struct cftype *cft) -{ - umode_t mode = 0; - - if (cft->mode) - return cft->mode; - - if (cft->read_u64 || cft->read_s64 || cft->seq_show) - mode |= S_IRUGO; - - if (cft->write_u64 || cft->write_s64 || cft->write_string || - cft->trigger) - mode |= S_IWUSR; - - return mode; -} - static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) { struct dentry *dir = cgrp->dentry; -- cgit v1.2.3 From 2bd59d48ebfb3df41ee56938946ca0dd30887312 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 11 Feb 2014 11:52:49 -0500 Subject: cgroup: convert to kernfs cgroup filesystem code was derived from the original sysfs implementation which was heavily intertwined with vfs objects and locking with the goal of re-using the existing vfs infrastructure. That experiment turned out rather disastrous and sysfs switched, a long time ago, to distributed filesystem model where a separate representation is maintained which is queried by vfs. Unfortunately, cgroup stuck with the failed experiment all these years and accumulated even more problems over time. Locking and object lifetime management being entangled with vfs is probably the most egregious. vfs is never designed to be misused like this and cgroup ends up jumping through various convoluted dancing to make things work. Even then, operations across multiple cgroups can't be done safely as it'll deadlock with rename locking. Recently, kernfs is separated out from sysfs so that it can be used by users other than sysfs. This patch converts cgroup to use kernfs, which will bring the following benefits. * Separation from vfs internals. Locking and object lifetime management is contained in cgroup proper making things a lot simpler. This removes significant amount of locking convolutions, hairy object lifetime rules and the restriction on multi-cgroup operations. * Can drop a lot of code to implement filesystem interface as most are provided by kernfs. * Proper "severing" semantics, which allows controllers to not worry about lingering file accesses after offline. While the preceding patches did as much as possible to make the transition less painful, large part of the conversion has to be one discrete step making this patch rather large. The rest of the commit message lists notable changes in different areas. Overall ------- * vfs constructs replaced with kernfs ones. cgroup->dentry w/ ->kn, cgroupfs_root->sb w/ ->kf_root. * All dentry accessors are removed. Helpers to map from kernfs constructs are added. * All vfs plumbing around dentry, inode and bdi removed. * cgroup_mount() now directly looks for matching root and then proceeds to create a new one if not found. Synchronization and object lifetime ----------------------------------- * vfs inode locking removed. Among other things, this removes the need for the convolution in cgroup_cfts_commit(). Future patches will further simplify it. * vfs refcnting replaced with cgroup internal ones. cgroup->refcnt, cgroupfs_root->refcnt added. cgroup_put_root() now directly puts root->refcnt and when it reaches zero proceeds to destroy it thus merging cgroup_put_root() and the former cgroup_kill_sb(). Simliarly, cgroup_put() now directly schedules cgroup_free_rcu() when refcnt reaches zero. * Unlike before, kernfs objects don't hold onto cgroup objects. When cgroup destroys a kernfs node, all existing operations are drained and the association is broken immediately. The same for cgroupfs_roots and mounts. * All operations which come through kernfs guarantee that the associated cgroup is and stays valid for the duration of operation; however, there are two paths which need to find out the associated cgroup from dentry without going through kernfs - css_tryget_from_dir() and cgroupstats_build(). For these two, kernfs_node->priv is RCU managed so that they can dereference it under RCU read lock. File and directory handling --------------------------- * File and directory operations converted to kernfs_ops and kernfs_syscall_ops. * xattrs is implicitly supported by kernfs. No need to worry about it from cgroup. This means that "xattr" mount option is no longer necessary. A future patch will add a deprecated warning message when sane_behavior. * When cftype->max_write_len > PAGE_SIZE, it's necessary to make a private copy of one of the kernfs_ops to set its atomic_write_len. cftype->kf_ops is added and cgroup_init/exit_cftypes() are updated to handle it. * cftype->lockdep_key added so that kernfs lockdep annotation can be per cftype. * Inidividual file entries and open states are now managed by kernfs. No need to worry about them from cgroup. cfent, cgroup_open_file and their friends are removed. * kernfs_nodes are created deactivated and kernfs_activate() invocations added to places where creation of new nodes are committed. * cgroup_rmdir() uses kernfs_[un]break_active_protection() for self-removal. v2: - Li pointed out in an earlier patch that specifying "name=" during mount without subsystem specification should succeed if there's an existing hierarchy with a matching name although it should fail with -EINVAL if a new hierarchy should be created. Prior to the conversion, this used by handled by deferring failure from NULL return from cgroup_root_from_opts(), which was necessary because root was being created before checking for existing ones. Note that cgroup_root_from_opts() returned an ERR_PTR() value for error conditions which require immediate mount failure. As we now have separate search and creation steps, deferring failure from cgroup_root_from_opts() is no longer necessary. cgroup_root_from_opts() is updated to always return ERR_PTR() value on failure. - The logic to match existing roots is updated so that a mount attempt with a matching name but different subsys_mask are rejected. This was handled by a separate matching loop under the comment "Check for name clashes with existing mounts" but got lost during conversion. Merge the check into the main search loop. - Add __rcu __force casting in RCU_INIT_POINTER() in cgroup_destroy_locked() to avoid the sparse address space warning reported by kbuild test bot. Maybe we want an explicit interface to use kn->priv as RCU protected pointer? v3: Make CONFIG_CGROUPS select CONFIG_KERNFS. v4: Rebased on top of 0ab02ca8f887 ("cgroup: protect modifications to cgroup_idr with cgroup_mutex"). Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: kbuild test robot fengguang.wu@intel.com> --- kernel/cgroup.c | 1115 ++++++++++++++++++------------------------------------- 1 file changed, 362 insertions(+), 753 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d8efca44de5f..cda614da40cf 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -40,9 +40,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -50,7 +48,6 @@ #include #include #include -#include #include #include #include /* TODO: replace with more sophisticated array */ @@ -176,7 +173,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp); static int cgroup_destroy_locked(struct cgroup *cgrp); static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], bool is_add); -static int cgroup_file_release(struct inode *inode, struct file *file); static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); /** @@ -209,8 +205,22 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp) struct cgroup_subsys_state *seq_css(struct seq_file *seq) { - struct cgroup_open_file *of = seq->private; - return of->cfe->css; + struct kernfs_open_file *of = seq->private; + struct cgroup *cgrp = of->kn->parent->priv; + struct cftype *cft = seq_cft(seq); + + /* + * This is open and unprotected implementation of cgroup_css(). + * seq_css() is only called from a kernfs file operation which has + * an active reference on the file. Because all the subsystem + * files are drained before a css is disassociated with a cgroup, + * the matching css from the cgroup's subsys table is guaranteed to + * be and stay valid until the enclosing operation is complete. + */ + if (cft->ss) + return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); + else + return &cgrp->dummy_css; } EXPORT_SYMBOL_GPL(seq_css); @@ -276,21 +286,6 @@ static int notify_on_release(const struct cgroup *cgrp) #define for_each_active_root(root) \ list_for_each_entry((root), &cgroup_roots, root_list) -static inline struct cgroup *__d_cgrp(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -static inline struct cfent *__d_cfe(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -static inline struct cftype *__d_cft(struct dentry *dentry) -{ - return __d_cfe(dentry)->type; -} - /** * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. * @cgrp: the cgroup to be checked for liveness @@ -692,6 +687,13 @@ static struct css_set *find_css_set(struct css_set *old_cset, return cset; } +static struct cgroupfs_root *cgroup_root_from_kf(struct kernfs_root *kf_root) +{ + struct cgroup *top_cgrp = kf_root->kn->priv; + + return top_cgrp->root; +} + static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) { int id; @@ -730,30 +732,37 @@ static void cgroup_free_root(struct cgroupfs_root *root) static void cgroup_get_root(struct cgroupfs_root *root) { - atomic_inc(&root->sb->s_active); + /* + * The caller must ensure that @root is alive, which can be + * achieved by holding a ref on one of the member cgroups or + * following a registered reference to @root while holding + * cgroup_tree_mutex. + */ + WARN_ON_ONCE(atomic_read(&root->refcnt) <= 0); + atomic_inc(&root->refcnt); } static void cgroup_put_root(struct cgroupfs_root *root) { - deactivate_super(root->sb); -} - -static void cgroup_kill_sb(struct super_block *sb) -{ - struct cgroupfs_root *root = sb->s_fs_info; struct cgroup *cgrp = &root->top_cgroup; struct cgrp_cset_link *link, *tmp_link; int ret; - BUG_ON(!root); + /* + * @root's refcnt reaching zero and its deregistration should be + * atomic w.r.t. cgroup_tree_mutex. This ensures that + * cgroup_get_root() is safe to invoke if @root is registered. + */ + mutex_lock(&cgroup_tree_mutex); + if (!atomic_dec_and_test(&root->refcnt)) { + mutex_unlock(&cgroup_tree_mutex); + return; + } + mutex_lock(&cgroup_mutex); BUG_ON(root->number_of_cgroups != 1); BUG_ON(!list_empty(&cgrp->children)); - mutex_lock(&cgrp->dentry->d_inode->i_mutex); - mutex_lock(&cgroup_tree_mutex); - mutex_lock(&cgroup_mutex); - /* Rebind all subsystems back to the default hierarchy */ if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { ret = rebind_subsystems(root, 0, root->subsys_mask); @@ -783,11 +792,8 @@ static void cgroup_kill_sb(struct super_block *sb) mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - mutex_unlock(&cgrp->dentry->d_inode->i_mutex); - - simple_xattrs_free(&cgrp->xattrs); - kill_litter_super(sb); + kernfs_destroy_root(root->kf_root); cgroup_free_root(root); } @@ -878,42 +884,10 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, * update of a tasks cgroup pointer by cgroup_attach_task() */ -/* - * A couple of forward declarations required, due to cyclic reference loop: - * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> - * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations - * -> cgroup_mkdir. - */ - -static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); -static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask); -static const struct inode_operations cgroup_dir_inode_operations; +static struct kernfs_syscall_ops cgroup_kf_syscall_ops; static const struct file_operations proc_cgroupstats_operations; -static struct backing_dev_info cgroup_backing_dev_info = { - .name = "cgroup", - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, -}; - -static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb) -{ - struct inode *inode = new_inode(sb); - - if (inode) { - do { - /* ino 0 is reserved for dummy_root */ - inode->i_ino = get_next_ino(); - } while (!inode->i_ino); - inode->i_mode = mode; - inode->i_uid = current_fsuid(); - inode->i_gid = current_fsgid(); - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; - } - return inode; -} - static struct cgroup_name *cgroup_alloc_name(const char *name_str) { struct cgroup_name *name; @@ -983,8 +957,6 @@ static void cgroup_free_fn(struct work_struct *work) cgroup_pidlist_destroy_all(cgrp); - simple_xattrs_free(&cgrp->xattrs); - kfree(rcu_dereference_raw(cgrp->name)); kfree(cgrp); } @@ -999,81 +971,38 @@ static void cgroup_free_rcu(struct rcu_head *head) static void cgroup_get(struct cgroup *cgrp) { - dget(cgrp->dentry); -} - -static void cgroup_diput(struct dentry *dentry, struct inode *inode) -{ - /* is dentry a directory ? if so, kfree() associated cgroup */ - if (S_ISDIR(inode->i_mode)) { - struct cgroup *cgrp = dentry->d_fsdata; - - BUG_ON(!(cgroup_is_dead(cgrp))); - - /* - * XXX: cgrp->id is only used to look up css's. As cgroup - * and css's lifetimes will be decoupled, it should be made - * per-subsystem and moved to css->id so that lookups are - * successful until the target css is released. - */ - mutex_lock(&cgroup_mutex); - idr_remove(&cgrp->root->cgroup_idr, cgrp->id); - mutex_unlock(&cgroup_mutex); - cgrp->id = -1; - - call_rcu(&cgrp->rcu_head, cgroup_free_rcu); - } else { - struct cfent *cfe = __d_cfe(dentry); - struct cgroup *cgrp = dentry->d_parent->d_fsdata; - - WARN_ONCE(!list_empty(&cfe->node) && - cgrp != &cgrp->root->top_cgroup, - "cfe still linked for %s\n", cfe->type->name); - simple_xattrs_free(&cfe->xattrs); - kfree(cfe); - } - iput(inode); + WARN_ON_ONCE(cgroup_is_dead(cgrp)); + WARN_ON_ONCE(atomic_read(&cgrp->refcnt) <= 0); + atomic_inc(&cgrp->refcnt); } static void cgroup_put(struct cgroup *cgrp) { - dput(cgrp->dentry); -} + if (!atomic_dec_and_test(&cgrp->refcnt)) + return; + if (WARN_ON_ONCE(!cgroup_is_dead(cgrp))) + return; -static void remove_dir(struct dentry *d) -{ - struct dentry *parent = dget(d->d_parent); + /* + * XXX: cgrp->id is only used to look up css's. As cgroup and + * css's lifetimes will be decoupled, it should be made + * per-subsystem and moved to css->id so that lookups are + * successful until the target css is released. + */ + mutex_lock(&cgroup_mutex); + idr_remove(&cgrp->root->cgroup_idr, cgrp->id); + mutex_unlock(&cgroup_mutex); + cgrp->id = -1; - d_delete(d); - simple_rmdir(parent->d_inode, d); - dput(parent); + call_rcu(&cgrp->rcu_head, cgroup_free_rcu); } static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) { - struct cfent *cfe; + char name[CGROUP_FILE_NAME_MAX]; - lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); lockdep_assert_held(&cgroup_tree_mutex); - - /* - * If we're doing cleanup due to failure of cgroup_create(), - * the corresponding @cfe may not exist. - */ - list_for_each_entry(cfe, &cgrp->files, node) { - struct dentry *d = cfe->dentry; - - if (cft && cfe->type != cft) - continue; - - dget(d); - d_delete(d); - simple_unlink(cgrp->dentry->d_inode, d); - list_del_init(&cfe->node); - dput(d); - - break; - } + kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); } /** @@ -1096,22 +1025,6 @@ static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask) } } -/* - * NOTE : the dentry must have been dget()'ed - */ -static void cgroup_d_remove_dir(struct dentry *dentry) -{ - struct dentry *parent; - - parent = dentry->d_parent; - spin_lock(&parent->d_lock); - spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); - list_del_init(&dentry->d_u.d_child); - spin_unlock(&dentry->d_lock); - spin_unlock(&parent->d_lock); - remove_dir(dentry); -} - static int rebind_subsystems(struct cgroupfs_root *root, unsigned long added_mask, unsigned removed_mask) { @@ -1179,13 +1092,15 @@ static int rebind_subsystems(struct cgroupfs_root *root, * now matches the bound subsystems. */ root->flags |= CGRP_ROOT_SUBSYS_BOUND; + kernfs_activate(cgrp->kn); return 0; } -static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) +static int cgroup_show_options(struct seq_file *seq, + struct kernfs_root *kf_root) { - struct cgroupfs_root *root = dentry->d_sb->s_fs_info; + struct cgroupfs_root *root = cgroup_root_from_kf(kf_root); struct cgroup_subsys *ss; int ssid; @@ -1219,9 +1134,6 @@ struct cgroup_sb_opts { char *name; /* User explicitly requested empty subsystem */ bool none; - - struct cgroupfs_root *new_root; - }; /* @@ -1380,11 +1292,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) return 0; } -static int cgroup_remount(struct super_block *sb, int *flags, char *data) +static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) { int ret = 0; - struct cgroupfs_root *root = sb->s_fs_info; - struct cgroup *cgrp = &root->top_cgroup; + struct cgroupfs_root *root = cgroup_root_from_kf(kf_root); struct cgroup_sb_opts opts; unsigned long added_mask, removed_mask; @@ -1393,7 +1304,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) return -EINVAL; } - mutex_lock(&cgrp->dentry->d_inode->i_mutex); mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); @@ -1439,34 +1349,26 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) kfree(opts.name); mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - mutex_unlock(&cgrp->dentry->d_inode->i_mutex); return ret; } -static const struct super_operations cgroup_ops = { - .statfs = simple_statfs, - .drop_inode = generic_delete_inode, - .show_options = cgroup_show_options, - .remount_fs = cgroup_remount, -}; - static void init_cgroup_housekeeping(struct cgroup *cgrp) { + atomic_set(&cgrp->refcnt, 1); INIT_LIST_HEAD(&cgrp->sibling); INIT_LIST_HEAD(&cgrp->children); - INIT_LIST_HEAD(&cgrp->files); INIT_LIST_HEAD(&cgrp->cset_links); INIT_LIST_HEAD(&cgrp->release_list); INIT_LIST_HEAD(&cgrp->pidlists); mutex_init(&cgrp->pidlist_mutex); cgrp->dummy_css.cgroup = cgrp; - simple_xattrs_init(&cgrp->xattrs); } static void init_cgroup_root(struct cgroupfs_root *root) { struct cgroup *cgrp = &root->top_cgroup; + atomic_set(&root->refcnt, 1); INIT_LIST_HEAD(&root->root_list); root->number_of_cgroups = 1; cgrp->root = root; @@ -1475,32 +1377,12 @@ static void init_cgroup_root(struct cgroupfs_root *root) idr_init(&root->cgroup_idr); } -static int cgroup_test_super(struct super_block *sb, void *data) -{ - struct cgroup_sb_opts *opts = data; - struct cgroupfs_root *root = sb->s_fs_info; - - /* If we asked for a name then it must match */ - if (opts->name && strcmp(opts->name, root->name)) - return 0; - - /* - * If we asked for subsystems (or explicitly for no - * subsystems) then they must match - */ - if ((opts->subsys_mask || opts->none) - && (opts->subsys_mask != root->subsys_mask)) - return 0; - - return 1; -} - static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) { struct cgroupfs_root *root; if (!opts->subsys_mask && !opts->none) - return NULL; + return ERR_PTR(-EINVAL); root = kzalloc(sizeof(*root), GFP_KERNEL); if (!root) @@ -1527,99 +1409,21 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) return root; } -static int cgroup_set_super(struct super_block *sb, void *data) -{ - int ret; - struct cgroup_sb_opts *opts = data; - - /* If we don't have a new root, we can't set up a new sb */ - if (!opts->new_root) - return -EINVAL; - - BUG_ON(!opts->subsys_mask && !opts->none); - - ret = set_anon_super(sb, NULL); - if (ret) - return ret; - - sb->s_fs_info = opts->new_root; - opts->new_root->sb = sb; - - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = CGROUP_SUPER_MAGIC; - sb->s_op = &cgroup_ops; - - return 0; -} - -static int cgroup_get_rootdir(struct super_block *sb) -{ - static const struct dentry_operations cgroup_dops = { - .d_iput = cgroup_diput, - .d_delete = always_delete_dentry, - }; - - struct inode *inode = - cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); - - if (!inode) - return -ENOMEM; - - inode->i_fop = &simple_dir_operations; - inode->i_op = &cgroup_dir_inode_operations; - /* directories start off with i_nlink == 2 (for "." entry) */ - inc_nlink(inode); - sb->s_root = d_make_root(inode); - if (!sb->s_root) - return -ENOMEM; - /* for everything else we want ->d_op set */ - sb->s_d_op = &cgroup_dops; - return 0; -} - static int cgroup_setup_root(struct cgroupfs_root *root) { LIST_HEAD(tmp_links); - struct super_block *sb = root->sb; struct cgroup *root_cgrp = &root->top_cgroup; - struct cgroupfs_root *existing_root; struct css_set *cset; - struct inode *inode; - const struct cred *cred; int i, ret; lockdep_assert_held(&cgroup_tree_mutex); lockdep_assert_held(&cgroup_mutex); - BUG_ON(sb->s_root != NULL); - - mutex_unlock(&cgroup_mutex); - mutex_unlock(&cgroup_tree_mutex); - - ret = cgroup_get_rootdir(sb); - if (ret) { - mutex_lock(&cgroup_tree_mutex); - mutex_lock(&cgroup_mutex); - return ret; - } - inode = sb->s_root->d_inode; - - mutex_lock(&inode->i_mutex); - mutex_lock(&cgroup_tree_mutex); - mutex_lock(&cgroup_mutex); ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); if (ret < 0) - goto out_unlock; + goto out; root_cgrp->id = ret; - /* check for name clashes with existing mounts */ - ret = -EBUSY; - if (strlen(root->name)) - for_each_active_root(existing_root) - if (!strcmp(existing_root->name, root->name)) - goto out_unlock; - /* * We're accessing css_set_count without locking css_set_lock here, * but that's OK - it can only be increased by someone holding @@ -1628,34 +1432,29 @@ static int cgroup_setup_root(struct cgroupfs_root *root) */ ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); if (ret) - goto out_unlock; + goto out; /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ ret = cgroup_init_root_id(root, 2, 0); if (ret) - goto out_unlock; - - sb->s_root->d_fsdata = root_cgrp; - root_cgrp->dentry = sb->s_root; + goto out; - /* - * We're inside get_sb() and will call lookup_one_len() to create - * the root files, which doesn't work if SELinux is in use. The - * following cred dancing somehow works around it. See 2ce9738ba - * ("cgroupfs: use init_cred when populating new cgroupfs mount") - * for more details. - */ - cred = override_creds(&init_cred); + root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops, + KERNFS_ROOT_CREATE_DEACTIVATED, + root_cgrp); + if (IS_ERR(root->kf_root)) { + ret = PTR_ERR(root->kf_root); + goto exit_root_id; + } + root_cgrp->kn = root->kf_root->kn; ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true); if (ret) - goto rm_base_files; + goto destroy_root; ret = rebind_subsystems(root, root->subsys_mask, 0); if (ret) - goto rm_base_files; - - revert_creds(cred); + goto destroy_root; /* * There must be no failure case after here, since rebinding takes @@ -1677,15 +1476,16 @@ static int cgroup_setup_root(struct cgroupfs_root *root) BUG_ON(!list_empty(&root_cgrp->children)); BUG_ON(root->number_of_cgroups != 1); + kernfs_activate(root_cgrp->kn); ret = 0; - goto out_unlock; + goto out; -rm_base_files: - cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false); - revert_creds(cred); +destroy_root: + kernfs_destroy_root(root->kf_root); + root->kf_root = NULL; +exit_root_id: cgroup_exit_root_id(root); -out_unlock: - mutex_unlock(&inode->i_mutex); +out: free_cgrp_cset_links(&tmp_links); return ret; } @@ -1694,10 +1494,9 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data) { - struct super_block *sb = NULL; - struct cgroupfs_root *root = NULL; + struct cgroupfs_root *root; struct cgroup_sb_opts opts; - struct cgroupfs_root *new_root; + struct dentry *dentry; int ret; mutex_lock(&cgroup_tree_mutex); @@ -1708,41 +1507,32 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, if (ret) goto out_unlock; - /* - * Allocate a new cgroup root. We may not need it if we're - * reusing an existing hierarchy. - */ - new_root = cgroup_root_from_opts(&opts); - if (IS_ERR(new_root)) { - ret = PTR_ERR(new_root); - goto out_unlock; - } - opts.new_root = new_root; + /* look for a matching existing root */ + for_each_active_root(root) { + bool name_match = false; - /* Locate an existing or new sb for this hierarchy */ - mutex_unlock(&cgroup_mutex); - mutex_unlock(&cgroup_tree_mutex); - sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts); - mutex_lock(&cgroup_tree_mutex); - mutex_lock(&cgroup_mutex); - if (IS_ERR(sb)) { - ret = PTR_ERR(sb); - cgroup_free_root(opts.new_root); - goto out_unlock; - } + /* + * If we asked for a name then it must match. Also, if + * name matches but sybsys_mask doesn't, we should fail. + * Remember whether name matched. + */ + if (opts.name) { + if (strcmp(opts.name, root->name)) + continue; + name_match = true; + } - root = sb->s_fs_info; - BUG_ON(!root); - if (root == opts.new_root) { - ret = cgroup_setup_root(root); - if (ret) - goto out_unlock; - } else { /* - * We re-used an existing hierarchy - the new root (if - * any) is not needed + * If we asked for subsystems (or explicitly for no + * subsystems) then they must match. */ - cgroup_free_root(opts.new_root); + if ((opts.subsys_mask || opts.none) && + (opts.subsys_mask != root->subsys_mask)) { + if (!name_match) + continue; + ret = -EBUSY; + goto out_unlock; + } if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) { if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { @@ -1753,23 +1543,45 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n"); } } + + cgroup_get_root(root); + goto out_unlock; } - ret = 0; + /* no such thing, create a new one */ + root = cgroup_root_from_opts(&opts); + if (IS_ERR(root)) { + ret = PTR_ERR(root); + goto out_unlock; + } + + ret = cgroup_setup_root(root); + if (ret) + cgroup_free_root(root); + out_unlock: mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - if (ret && !IS_ERR_OR_NULL(sb)) - deactivate_locked_super(sb); - kfree(opts.release_agent); kfree(opts.name); - if (!ret) - return dget(sb->s_root); - else + if (ret) return ERR_PTR(ret); + + dentry = kernfs_mount(fs_type, flags, root->kf_root); + if (IS_ERR(dentry)) + cgroup_put_root(root); + return dentry; +} + +static void cgroup_kill_sb(struct super_block *sb) +{ + struct kernfs_root *kf_root = kernfs_root_from_sb(sb); + struct cgroupfs_root *root = cgroup_root_from_kf(kf_root); + + cgroup_put_root(root); + kernfs_kill_sb(sb); } static struct file_system_type cgroup_fs_type = { @@ -2301,29 +2113,23 @@ static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) return 0; } -static ssize_t cgroup_file_write(struct file *file, const char __user *userbuf, - size_t nbytes, loff_t *ppos) +static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) { - struct cfent *cfe = __d_cfe(file->f_dentry); - struct cftype *cft = __d_cft(file->f_dentry); - struct cgroup_subsys_state *css = cfe->css; - size_t max_bytes = max(cft->max_write_len, PAGE_SIZE); - char *buf; + struct cgroup *cgrp = of->kn->parent->priv; + struct cftype *cft = of->kn->priv; + struct cgroup_subsys_state *css; int ret; - if (nbytes > max_bytes) - return -E2BIG; - - buf = kmalloc(nbytes + 1, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - if (copy_from_user(buf, userbuf, nbytes)) { - ret = -EFAULT; - goto out_free; - } - - buf[nbytes] = '\0'; + /* + * kernfs guarantees that a file isn't deleted with operations in + * flight, which means that the matching css is and stays alive and + * doesn't need to be pinned. The RCU locking is not necessary + * either. It's just for the convenience of using cgroup_css(). + */ + rcu_read_lock(); + css = cgroup_css(cgrp, cft->ss); + rcu_read_unlock(); if (cft->write_string) { ret = cft->write_string(css, cft, strstrip(buf)); @@ -2342,53 +2148,23 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *userbuf, } else { ret = -EINVAL; } -out_free: - kfree(buf); + return ret ?: nbytes; } -/* - * seqfile ops/methods for returning structured data. Currently just - * supports string->u64 maps, but can be extended in future. - */ - static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) { - struct cftype *cft = seq_cft(seq); - - if (cft->seq_start) { - return cft->seq_start(seq, ppos); - } else { - /* - * The same behavior and code as single_open(). Returns - * !NULL if pos is at the beginning; otherwise, NULL. - */ - return NULL + !*ppos; - } + return seq_cft(seq)->seq_start(seq, ppos); } static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) { - struct cftype *cft = seq_cft(seq); - - if (cft->seq_next) { - return cft->seq_next(seq, v, ppos); - } else { - /* - * The same behavior and code as single_open(), always - * terminate after the initial read. - */ - ++*ppos; - return NULL; - } + return seq_cft(seq)->seq_next(seq, v, ppos); } static void cgroup_seqfile_stop(struct seq_file *seq, void *v) { - struct cftype *cft = seq_cft(seq); - - if (cft->seq_stop) - cft->seq_stop(seq, v); + seq_cft(seq)->seq_stop(seq, v); } static int cgroup_seqfile_show(struct seq_file *m, void *arg) @@ -2408,96 +2184,36 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg) return 0; } -static struct seq_operations cgroup_seq_operations = { - .start = cgroup_seqfile_start, - .next = cgroup_seqfile_next, - .stop = cgroup_seqfile_stop, - .show = cgroup_seqfile_show, +static struct kernfs_ops cgroup_kf_single_ops = { + .atomic_write_len = PAGE_SIZE, + .write = cgroup_file_write, + .seq_show = cgroup_seqfile_show, }; -static int cgroup_file_open(struct inode *inode, struct file *file) -{ - struct cfent *cfe = __d_cfe(file->f_dentry); - struct cftype *cft = __d_cft(file->f_dentry); - struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent); - struct cgroup_subsys_state *css; - struct cgroup_open_file *of; - int err; - - err = generic_file_open(inode, file); - if (err) - return err; - - /* - * If the file belongs to a subsystem, pin the css. Will be - * unpinned either on open failure or release. This ensures that - * @css stays alive for all file operations. - */ - rcu_read_lock(); - css = cgroup_css(cgrp, cft->ss); - if (cft->ss && !css_tryget(css)) - css = NULL; - rcu_read_unlock(); - - if (!css) - return -ENODEV; - - /* - * @cfe->css is used by read/write/close to determine the - * associated css. @file->private_data would be a better place but - * that's already used by seqfile. Multiple accessors may use it - * simultaneously which is okay as the association never changes. - */ - WARN_ON_ONCE(cfe->css && cfe->css != css); - cfe->css = css; - - of = __seq_open_private(file, &cgroup_seq_operations, - sizeof(struct cgroup_open_file)); - if (of) { - of->cfe = cfe; - return 0; - } - - if (css->ss) - css_put(css); - return -ENOMEM; -} - -static int cgroup_file_release(struct inode *inode, struct file *file) -{ - struct cfent *cfe = __d_cfe(file->f_dentry); - struct cgroup_subsys_state *css = cfe->css; - - if (css->ss) - css_put(css); - return seq_release_private(inode, file); -} +static struct kernfs_ops cgroup_kf_ops = { + .atomic_write_len = PAGE_SIZE, + .write = cgroup_file_write, + .seq_start = cgroup_seqfile_start, + .seq_next = cgroup_seqfile_next, + .seq_stop = cgroup_seqfile_stop, + .seq_show = cgroup_seqfile_show, +}; /* * cgroup_rename - Only allow simple rename of directories in place. */ -static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) +static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, + const char *new_name_str) { - int ret; + struct cgroup *cgrp = kn->priv; struct cgroup_name *name, *old_name; - struct cgroup *cgrp; - - /* - * It's convinient to use parent dir's i_mutex to protected - * cgrp->name. - */ - lockdep_assert_held(&old_dir->i_mutex); + int ret; - if (!S_ISDIR(old_dentry->d_inode->i_mode)) + if (kernfs_type(kn) != KERNFS_DIR) return -ENOTDIR; - if (new_dentry->d_inode) - return -EEXIST; - if (old_dir != new_dir) + if (kn->parent != new_parent) return -EIO; - cgrp = __d_cgrp(old_dentry); - /* * This isn't a proper migration and its usefulness is very * limited. Disallow if sane_behavior. @@ -2505,186 +2221,43 @@ static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, if (cgroup_sane_behavior(cgrp)) return -EPERM; - name = cgroup_alloc_name(new_dentry->d_name.name); + name = cgroup_alloc_name(new_name_str); if (!name) return -ENOMEM; - ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry); - if (ret) { - kfree(name); - return ret; + mutex_lock(&cgroup_tree_mutex); + mutex_lock(&cgroup_mutex); + + ret = kernfs_rename(kn, new_parent, new_name_str); + if (!ret) { + old_name = rcu_dereference_protected(cgrp->name, true); + rcu_assign_pointer(cgrp->name, name); + } else { + old_name = name; } - old_name = rcu_dereference_protected(cgrp->name, true); - rcu_assign_pointer(cgrp->name, name); + mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); kfree_rcu(old_name, rcu_head); - return 0; -} - -static struct simple_xattrs *__d_xattrs(struct dentry *dentry) -{ - if (S_ISDIR(dentry->d_inode->i_mode)) - return &__d_cgrp(dentry)->xattrs; - else - return &__d_cfe(dentry)->xattrs; -} - -static inline int xattr_enabled(struct dentry *dentry) -{ - struct cgroupfs_root *root = dentry->d_sb->s_fs_info; - return root->flags & CGRP_ROOT_XATTR; -} - -static bool is_valid_xattr(const char *name) -{ - if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || - !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) - return true; - return false; -} - -static int cgroup_setxattr(struct dentry *dentry, const char *name, - const void *val, size_t size, int flags) -{ - if (!xattr_enabled(dentry)) - return -EOPNOTSUPP; - if (!is_valid_xattr(name)) - return -EINVAL; - return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags); -} - -static int cgroup_removexattr(struct dentry *dentry, const char *name) -{ - if (!xattr_enabled(dentry)) - return -EOPNOTSUPP; - if (!is_valid_xattr(name)) - return -EINVAL; - return simple_xattr_remove(__d_xattrs(dentry), name); -} - -static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name, - void *buf, size_t size) -{ - if (!xattr_enabled(dentry)) - return -EOPNOTSUPP; - if (!is_valid_xattr(name)) - return -EINVAL; - return simple_xattr_get(__d_xattrs(dentry), name, buf, size); -} - -static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size) -{ - if (!xattr_enabled(dentry)) - return -EOPNOTSUPP; - return simple_xattr_list(__d_xattrs(dentry), buf, size); -} - -static const struct file_operations cgroup_file_operations = { - .read = seq_read, - .write = cgroup_file_write, - .llseek = generic_file_llseek, - .open = cgroup_file_open, - .release = cgroup_file_release, -}; - -static const struct inode_operations cgroup_file_inode_operations = { - .setxattr = cgroup_setxattr, - .getxattr = cgroup_getxattr, - .listxattr = cgroup_listxattr, - .removexattr = cgroup_removexattr, -}; - -static const struct inode_operations cgroup_dir_inode_operations = { - .lookup = simple_lookup, - .mkdir = cgroup_mkdir, - .rmdir = cgroup_rmdir, - .rename = cgroup_rename, - .setxattr = cgroup_setxattr, - .getxattr = cgroup_getxattr, - .listxattr = cgroup_listxattr, - .removexattr = cgroup_removexattr, -}; - -static int cgroup_create_file(struct dentry *dentry, umode_t mode, - struct super_block *sb) -{ - struct inode *inode; - - if (!dentry) - return -ENOENT; - if (dentry->d_inode) - return -EEXIST; - - inode = cgroup_new_inode(mode, sb); - if (!inode) - return -ENOMEM; - - if (S_ISDIR(mode)) { - inode->i_op = &cgroup_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - - /* start off with i_nlink == 2 (for "." entry) */ - inc_nlink(inode); - inc_nlink(dentry->d_parent->d_inode); - - /* - * Control reaches here with cgroup_mutex held. - * @inode->i_mutex should nest outside cgroup_mutex but we - * want to populate it immediately without releasing - * cgroup_mutex. As @inode isn't visible to anyone else - * yet, trylock will always succeed without affecting - * lockdep checks. - */ - WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex)); - } else if (S_ISREG(mode)) { - inode->i_size = 0; - inode->i_fop = &cgroup_file_operations; - inode->i_op = &cgroup_file_inode_operations; - } - d_instantiate(dentry, inode); - dget(dentry); /* Extra count - pin the dentry in core */ - return 0; + return ret; } static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) { - struct dentry *dir = cgrp->dentry; - struct cgroup *parent = __d_cgrp(dir); - struct dentry *dentry; - struct cfent *cfe; - int error; - umode_t mode; char name[CGROUP_FILE_NAME_MAX]; + struct kernfs_node *kn; + struct lock_class_key *key = NULL; - BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); - - cfe = kzalloc(sizeof(*cfe), GFP_KERNEL); - if (!cfe) - return -ENOMEM; - - cgroup_file_name(cgrp, cft, name); - dentry = lookup_one_len(name, dir, strlen(name)); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out; - } - - cfe->type = (void *)cft; - cfe->dentry = dentry; - dentry->d_fsdata = cfe; - simple_xattrs_init(&cfe->xattrs); - - mode = cgroup_file_mode(cft); - error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); - if (!error) { - list_add_tail(&cfe->node, &parent->files); - cfe = NULL; - } - dput(dentry); -out: - kfree(cfe); - return error; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + key = &cft->lockdep_key; +#endif + kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), + cgroup_file_mode(cft), 0, cft->kf_ops, cft, + NULL, false, key); + if (IS_ERR(kn)) + return PTR_ERR(kn); + return 0; } /** @@ -2704,7 +2277,6 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], struct cftype *cft; int ret; - lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); lockdep_assert_held(&cgroup_tree_mutex); for (cft = cfts; cft->name[0] != '\0'; cft++) { @@ -2749,9 +2321,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) LIST_HEAD(pending); struct cgroup_subsys *ss = cfts[0].ss; struct cgroup *root = &ss->root->top_cgroup; - struct super_block *sb = ss->root->sb; struct cgroup *prev = NULL; - struct inode *inode; struct cgroup_subsys_state *css; u64 update_before; int ret = 0; @@ -2759,12 +2329,13 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) mutex_unlock(&cgroup_mutex); /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ - if (!cfts || ss->root == &cgroup_dummy_root || - !atomic_inc_not_zero(&sb->s_active)) { + if (!cfts || ss->root == &cgroup_dummy_root) { mutex_unlock(&cgroup_tree_mutex); return 0; } + cgroup_get_root(ss->root); + /* * All cgroups which are created after we drop cgroup_mutex will * have the updated set of files, so we only need to update the @@ -2779,18 +2350,16 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) if (cgroup_is_dead(cgrp)) continue; - inode = cgrp->dentry->d_inode; cgroup_get(cgrp); if (prev) cgroup_put(prev); prev = cgrp; - mutex_unlock(&cgroup_tree_mutex); - mutex_lock(&inode->i_mutex); - mutex_lock(&cgroup_tree_mutex); - if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) + if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) { ret = cgroup_addrm_files(cgrp, cfts, is_add); - mutex_unlock(&inode->i_mutex); + if (is_add) + kernfs_activate(cgrp->kn); + } if (ret) break; } @@ -2804,16 +2373,45 @@ static void cgroup_exit_cftypes(struct cftype *cfts) { struct cftype *cft; - for (cft = cfts; cft->name[0] != '\0'; cft++) + for (cft = cfts; cft->name[0] != '\0'; cft++) { + /* free copy for custom atomic_write_len, see init_cftypes() */ + if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) + kfree(cft->kf_ops); + cft->kf_ops = NULL; cft->ss = NULL; + } } -static void cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) +static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) { struct cftype *cft; - for (cft = cfts; cft->name[0] != '\0'; cft++) + for (cft = cfts; cft->name[0] != '\0'; cft++) { + struct kernfs_ops *kf_ops; + + if (cft->seq_start) + kf_ops = &cgroup_kf_ops; + else + kf_ops = &cgroup_kf_single_ops; + + /* + * Ugh... if @cft wants a custom max_write_len, we need to + * make a copy of kf_ops to set its atomic_write_len. + */ + if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) { + kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL); + if (!kf_ops) { + cgroup_exit_cftypes(cfts); + return -ENOMEM; + } + kf_ops->atomic_write_len = cft->max_write_len; + } + + cft->kf_ops = kf_ops; cft->ss = ss; + } + + return 0; } /** @@ -2839,7 +2437,9 @@ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) if (!set) return -ENOMEM; - cgroup_init_cftypes(ss, cfts); + ret = cgroup_init_cftypes(ss, cfts); + if (ret) + return ret; cgroup_cfts_prepare(); set->cfts = cfts; @@ -3706,21 +3306,27 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type, */ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) { - int ret = -EINVAL; + struct kernfs_node *kn = kernfs_node_from_dentry(dentry); struct cgroup *cgrp; struct css_task_iter it; struct task_struct *tsk; + /* it should be kernfs_node belonging to cgroupfs and is a directory */ + if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || + kernfs_type(kn) != KERNFS_DIR) + return -EINVAL; + /* - * Validate dentry by checking the superblock operations, - * and make sure it's a directory. + * We aren't being called from kernfs and there's no guarantee on + * @kn->priv's validity. For this and css_tryget_from_dir(), + * @kn->priv is RCU safe. Let's do the RCU dancing. */ - if (dentry->d_sb->s_op != &cgroup_ops || - !S_ISDIR(dentry->d_inode->i_mode)) - goto err; - - ret = 0; - cgrp = dentry->d_fsdata; + rcu_read_lock(); + cgrp = rcu_dereference(kn->priv); + if (!cgrp) { + rcu_read_unlock(); + return -ENOENT; + } css_task_iter_start(&cgrp->dummy_css, &it); while ((tsk = css_task_iter_next(&it))) { @@ -3745,8 +3351,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) } css_task_iter_end(&it); -err: - return ret; + rcu_read_unlock(); + return 0; } @@ -3764,7 +3370,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) * after a seek to the start). Use a binary-search to find the * next pid to display, if any */ - struct cgroup_open_file *of = s->private; + struct kernfs_open_file *of = s->private; struct cgroup *cgrp = seq_css(s)->cgroup; struct cgroup_pidlist *l; enum cgroup_filetype type = seq_cft(s)->private; @@ -3819,7 +3425,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos) static void cgroup_pidlist_stop(struct seq_file *s, void *v) { - struct cgroup_open_file *of = s->private; + struct kernfs_open_file *of = s->private; struct cgroup_pidlist *l = of->priv; if (l) @@ -3830,7 +3436,7 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v) static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) { - struct cgroup_open_file *of = s->private; + struct kernfs_open_file *of = s->private; struct cgroup_pidlist *l = of->priv; pid_t *p = v; pid_t *end = l->list + l->length; @@ -3880,21 +3486,6 @@ static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, return 0; } -/* - * When dput() is called asynchronously, if umount has been done and - * then deactivate_super() in cgroup_free_fn() kills the superblock, - * there's a small window that vfs will see the root dentry with non-zero - * refcnt and trigger BUG(). - * - * That's why we hold a reference before dput() and drop it right after. - */ -static void cgroup_dput(struct cgroup *cgrp) -{ - cgroup_get_root(cgrp->root); - cgroup_put(cgrp); - cgroup_put_root(cgrp->root); -} - static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -4029,7 +3620,7 @@ static void css_free_work_fn(struct work_struct *work) css_put(css->parent); css->ss->css_free(css); - cgroup_dput(cgrp); + cgroup_put(cgrp); } static void css_free_rcu_fn(struct rcu_head *rcu_head) @@ -4037,10 +3628,6 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head) struct cgroup_subsys_state *css = container_of(rcu_head, struct cgroup_subsys_state, rcu_head); - /* - * css holds an extra ref to @cgrp->dentry which is put on the last - * css_put(). dput() requires process context which we don't have. - */ INIT_WORK(&css->destroy_work, css_free_work_fn); queue_work(cgroup_destroy_wq, &css->destroy_work); } @@ -4122,7 +3709,6 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) struct cgroup_subsys_state *css; int err; - lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); lockdep_assert_held(&cgroup_mutex); css = ss->css_alloc(cgroup_css(parent, ss)); @@ -4163,30 +3749,28 @@ err_free: return err; } -/* +/** * cgroup_create - create a cgroup * @parent: cgroup that will be parent of the new cgroup - * @dentry: dentry of the new cgroup - * @mode: mode to set on new inode - * - * Must be called with the mutex on the parent inode held + * @name_str: name of the new cgroup + * @mode: mode to set on new cgroup */ -static long cgroup_create(struct cgroup *parent, struct dentry *dentry, - umode_t mode) +static long cgroup_create(struct cgroup *parent, const char *name_str, + umode_t mode) { struct cgroup *cgrp; struct cgroup_name *name; struct cgroupfs_root *root = parent->root; int ssid, err; struct cgroup_subsys *ss; - struct super_block *sb = root->sb; + struct kernfs_node *kn; /* allocate the cgroup and its ID, 0 is reserved for the root */ cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); if (!cgrp) return -ENOMEM; - name = cgroup_alloc_name(dentry->d_name.name); + name = cgroup_alloc_name(name_str); if (!name) { err = -ENOMEM; goto err_free_cgrp; @@ -4217,18 +3801,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, goto err_unlock; } - /* Grab a reference on the superblock so the hierarchy doesn't - * get deleted on unmount if there are child cgroups. This - * can be done outside cgroup_mutex, since the sb can't - * disappear while someone has an open control file on the - * fs */ - cgroup_get_root(root); - init_cgroup_housekeeping(cgrp); - dentry->d_fsdata = cgrp; - cgrp->dentry = dentry; - cgrp->parent = parent; cgrp->dummy_css.parent = &parent->dummy_css; cgrp->root = parent->root; @@ -4239,15 +3813,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags)) set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); - /* - * Create directory. cgroup_create_file() returns with the new - * directory locked on success so that it can be populated without - * dropping cgroup_mutex. - */ - err = cgroup_create_file(dentry, S_IFDIR | mode, sb); - if (err < 0) + /* create the directory */ + kn = kernfs_create_dir(parent->kn, name->name, mode, cgrp); + if (IS_ERR(kn)) { + err = PTR_ERR(kn); goto err_free_id; - lockdep_assert_held(&dentry->d_inode->i_mutex); + } + cgrp->kn = kn; cgrp->serial_nr = cgroup_serial_nr_next++; @@ -4255,7 +3827,11 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); root->number_of_cgroups++; - /* hold a ref to the parent's dentry */ + /* + * Grab a reference on the root and parent so that they don't get + * deleted while there are child cgroups. + */ + cgroup_get_root(root); cgroup_get(parent); /* @@ -4277,16 +3853,15 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, } } + kernfs_activate(kn); + mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - mutex_unlock(&cgrp->dentry->d_inode->i_mutex); return 0; err_free_id: idr_remove(&root->cgroup_idr, cgrp->id); - /* Release the reference count that we took on the superblock */ - cgroup_put_root(root); err_unlock: mutex_unlock(&cgroup_mutex); err_unlock_tree: @@ -4300,16 +3875,15 @@ err_destroy: cgroup_destroy_locked(cgrp); mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - mutex_unlock(&dentry->d_inode->i_mutex); return err; } -static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, + umode_t mode) { - struct cgroup *c_parent = dentry->d_parent->d_fsdata; + struct cgroup *parent = parent_kn->priv; - /* the vfs holds inode->i_mutex already */ - return cgroup_create(c_parent, dentry, mode | S_IFDIR); + return cgroup_create(parent, name, mode); } /* @@ -4373,6 +3947,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref) */ static void kill_css(struct cgroup_subsys_state *css) { + /* + * This must happen before css is disassociated with its cgroup. + * See seq_css() for details. + */ cgroup_clear_dir(css->cgroup, 1 << css->ss->id); /* @@ -4421,13 +3999,12 @@ static void kill_css(struct cgroup_subsys_state *css) static int cgroup_destroy_locked(struct cgroup *cgrp) __releases(&cgroup_mutex) __acquires(&cgroup_mutex) { - struct dentry *d = cgrp->dentry; - struct cgroup_subsys_state *css; struct cgroup *child; + struct cgroup_subsys_state *css; + struct kernfs_node *kn; bool empty; int ssid; - lockdep_assert_held(&d->d_inode->i_mutex); lockdep_assert_held(&cgroup_tree_mutex); lockdep_assert_held(&cgroup_mutex); @@ -4492,15 +4069,24 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) if (!cgrp->nr_css) cgroup_destroy_css_killed(cgrp); + /* remove @cgrp directory along with the base files */ + mutex_unlock(&cgroup_mutex); + /* - * Clear the base files and remove @cgrp directory. The removal - * puts the base ref but we aren't quite done with @cgrp yet, so - * hold onto it. + * There are two control paths which try to determine cgroup from + * dentry without going through kernfs - cgroupstats_build() and + * css_tryget_from_dir(). Those are supported by RCU protecting + * clearing of cgrp->kn->priv backpointer, which should happen + * after all files under it have been removed. */ - mutex_unlock(&cgroup_mutex); - cgroup_addrm_files(cgrp, cgroup_base_files, false); - dget(d); - cgroup_d_remove_dir(d); + kn = cgrp->kn; + kernfs_get(kn); + + kernfs_remove(cgrp->kn); + + RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL); + kernfs_put(kn); + mutex_lock(&cgroup_mutex); return 0; @@ -4531,19 +4117,46 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp) check_for_release(parent); } -static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) +static int cgroup_rmdir(struct kernfs_node *kn) { - int ret; + struct cgroup *cgrp = kn->priv; + int ret = 0; + + /* + * This is self-destruction but @kn can't be removed while this + * callback is in progress. Let's break active protection. Once + * the protection is broken, @cgrp can be destroyed at any point. + * Pin it so that it stays accessible. + */ + cgroup_get(cgrp); + kernfs_break_active_protection(kn); mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); - ret = cgroup_destroy_locked(dentry->d_fsdata); + + /* + * @cgrp might already have been destroyed while we're trying to + * grab the mutexes. + */ + if (!cgroup_is_dead(cgrp)) + ret = cgroup_destroy_locked(cgrp); + mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); + kernfs_unbreak_active_protection(kn); + cgroup_put(cgrp); return ret; } +static struct kernfs_syscall_ops cgroup_kf_syscall_ops = { + .remount_fs = cgroup_remount, + .show_options = cgroup_show_options, + .mkdir = cgroup_mkdir, + .rmdir = cgroup_rmdir, + .rename = cgroup_rename, +}; + static void __init cgroup_init_subsys(struct cgroup_subsys *ss) { struct cgroup_subsys_state *css; @@ -4635,11 +4248,7 @@ int __init cgroup_init(void) unsigned long key; int i, err; - err = bdi_init(&cgroup_backing_dev_info); - if (err) - return err; - - cgroup_init_cftypes(NULL, cgroup_base_files); + BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); for_each_subsys(ss, i) { if (!ss->early_init) @@ -4669,24 +4278,17 @@ int __init cgroup_init(void) mutex_unlock(&cgroup_mutex); cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); - if (!cgroup_kobj) { - err = -ENOMEM; - goto out; - } + if (!cgroup_kobj) + return -ENOMEM; err = register_filesystem(&cgroup_fs_type); if (err < 0) { kobject_put(cgroup_kobj); - goto out; + return err; } proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations); - -out: - if (err) - bdi_destroy(&cgroup_backing_dev_info); - - return err; + return 0; } static int __init cgroup_wq_init(void) @@ -5095,18 +4697,25 @@ __setup("cgroup_disable=", cgroup_disable); struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry, struct cgroup_subsys *ss) { + struct kernfs_node *kn = kernfs_node_from_dentry(dentry); + struct cgroup_subsys_state *css = NULL; struct cgroup *cgrp; - struct cgroup_subsys_state *css; /* is @dentry a cgroup dir? */ - if (!dentry->d_inode || - dentry->d_inode->i_op != &cgroup_dir_inode_operations) + if (dentry->d_sb->s_type != &cgroup_fs_type || !kn || + kernfs_type(kn) != KERNFS_DIR) return ERR_PTR(-EBADF); rcu_read_lock(); - cgrp = __d_cgrp(dentry); - css = cgroup_css(cgrp, ss); + /* + * This path doesn't originate from kernfs and @kn could already + * have been or be removed at any point. @kn->priv is RCU + * protected for this access. See destroy_locked() for details. + */ + cgrp = rcu_dereference(kn->priv); + if (cgrp) + css = cgroup_css(cgrp, ss); if (!css || !css_tryget(css)) css = ERR_PTR(-ENOENT); -- cgit v1.2.3 From 86bf4b68759141459864ebd36ac3038a9cda895b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 09:29:48 -0500 Subject: cgroup: warn if "xattr" is specified with "sane_behavior" Mount option "xattr" is no longer necessary as it's enabled by default on kernfs. Warn if "xattr" is specified with "sane_behavior" so that the option can be removed in the future. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index cda614da40cf..a0fab71f200f 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1267,6 +1267,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) pr_err("cgroup: sane_behavior: clone_children is not allowed\n"); return -EINVAL; } + + if (opts->flags & CGRP_ROOT_XATTR) + pr_warning("cgroup: sane_behavior: xattr is always available, flag unnecessary\n"); } /* -- cgit v1.2.3 From 80b13586997d8e584caa772bd99e2a3e55ac6abe Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 09:29:48 -0500 Subject: cgroup: relocate cgroup_rm_cftypes() cftype handling is about to be revamped. Relocate cgroup_rm_cftypes() above cgroup_add_cftypes() in preparation. This is pure relocation. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 70 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 35 insertions(+), 35 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a0fab71f200f..a2cbd1549995 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2417,6 +2417,41 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) return 0; } +/** + * cgroup_rm_cftypes - remove an array of cftypes from a subsystem + * @cfts: zero-length name terminated array of cftypes + * + * Unregister @cfts. Files described by @cfts are removed from all + * existing cgroups and all future cgroups won't have them either. This + * function can be called anytime whether @cfts' subsys is attached or not. + * + * Returns 0 on successful unregistration, -ENOENT if @cfts is not + * registered. + */ +int cgroup_rm_cftypes(struct cftype *cfts) +{ + struct cftype *found = NULL; + struct cftype_set *set; + + if (!cfts || !cfts[0].ss) + return -ENOENT; + + cgroup_cfts_prepare(); + + list_for_each_entry(set, &cfts[0].ss->cftsets, node) { + if (set->cfts == cfts) { + list_del(&set->node); + kfree(set); + found = cfts; + break; + } + } + + cgroup_cfts_commit(found, false); + cgroup_exit_cftypes(cfts); + return found ? 0 : -ENOENT; +} + /** * cgroup_add_cftypes - add an array of cftypes to a subsystem * @ss: target cgroup subsystem @@ -2454,41 +2489,6 @@ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) } EXPORT_SYMBOL_GPL(cgroup_add_cftypes); -/** - * cgroup_rm_cftypes - remove an array of cftypes from a subsystem - * @cfts: zero-length name terminated array of cftypes - * - * Unregister @cfts. Files described by @cfts are removed from all - * existing cgroups and all future cgroups won't have them either. This - * function can be called anytime whether @cfts' subsys is attached or not. - * - * Returns 0 on successful unregistration, -ENOENT if @cfts is not - * registered. - */ -int cgroup_rm_cftypes(struct cftype *cfts) -{ - struct cftype *found = NULL; - struct cftype_set *set; - - if (!cfts || !cfts[0].ss) - return -ENOENT; - - cgroup_cfts_prepare(); - - list_for_each_entry(set, &cfts[0].ss->cftsets, node) { - if (set->cfts == cfts) { - list_del(&set->node); - kfree(set); - found = cfts; - break; - } - } - - cgroup_cfts_commit(found, false); - cgroup_exit_cftypes(cfts); - return found ? 0 : -ENOENT; -} - /** * cgroup_task_count - count the number of tasks in a cgroup. * @cgrp: the cgroup in question -- cgit v1.2.3 From 0adb070426dde2fd0b84e7f4f5cefcd8f0b24410 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 09:29:48 -0500 Subject: cgroup: remove cftype_set cftype_set was added primarily to allow registering the same cftype array more than once for different subsystems. Nobody uses or needs such thing and it's already broken because each cftype has ->ss pointer which is initialized during registration. Let's add list_head ->node to cftype and use the first cftype entry in the array to link them instead of allocating separate cftype_set. While at it, trigger WARN if cft seems previously initialized during registration. This simplifies cftype handling a bit. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 41 +++++++++++++---------------------------- 1 file changed, 13 insertions(+), 28 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a2cbd1549995..506ebd61d1c2 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1016,12 +1016,12 @@ static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask) int i; for_each_subsys(ss, i) { - struct cftype_set *set; + struct cftype *cfts; if (!test_bit(i, &subsys_mask)) continue; - list_for_each_entry(set, &ss->cftsets, node) - cgroup_addrm_files(cgrp, set->cfts, false); + list_for_each_entry(cfts, &ss->cfts, node) + cgroup_addrm_files(cgrp, cfts, false); } } @@ -2392,6 +2392,8 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) for (cft = cfts; cft->name[0] != '\0'; cft++) { struct kernfs_ops *kf_ops; + WARN_ON(cft->ss || cft->kf_ops); + if (cft->seq_start) kf_ops = &cgroup_kf_ops; else @@ -2430,26 +2432,15 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) */ int cgroup_rm_cftypes(struct cftype *cfts) { - struct cftype *found = NULL; - struct cftype_set *set; - if (!cfts || !cfts[0].ss) return -ENOENT; cgroup_cfts_prepare(); + list_del(&cfts->node); + cgroup_cfts_commit(cfts, false); - list_for_each_entry(set, &cfts[0].ss->cftsets, node) { - if (set->cfts == cfts) { - list_del(&set->node); - kfree(set); - found = cfts; - break; - } - } - - cgroup_cfts_commit(found, false); cgroup_exit_cftypes(cfts); - return found ? 0 : -ENOENT; + return 0; } /** @@ -2468,20 +2459,14 @@ int cgroup_rm_cftypes(struct cftype *cfts) */ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) { - struct cftype_set *set; int ret; - set = kzalloc(sizeof(*set), GFP_KERNEL); - if (!set) - return -ENOMEM; - ret = cgroup_init_cftypes(ss, cfts); if (ret) return ret; cgroup_cfts_prepare(); - set->cfts = cfts; - list_add_tail(&set->node, &ss->cftsets); + list_add_tail(&cfts->node, &ss->cfts); ret = cgroup_cfts_commit(cfts, true); if (ret) cgroup_rm_cftypes(cfts); @@ -3574,13 +3559,13 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask) /* process cftsets of each subsystem */ for_each_subsys(ss, i) { - struct cftype_set *set; + struct cftype *cfts; if (!test_bit(i, &subsys_mask)) continue; - list_for_each_entry(set, &ss->cftsets, node) { - ret = cgroup_addrm_files(cgrp, set->cfts, true); + list_for_each_entry(cfts, &ss->cfts, node) { + ret = cgroup_addrm_files(cgrp, cfts, true); if (ret < 0) goto err; } @@ -4169,7 +4154,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); - INIT_LIST_HEAD(&ss->cftsets); + INIT_LIST_HEAD(&ss->cfts); /* Create the top cgroup state for this subsystem */ ss->root = &cgroup_dummy_root; -- cgit v1.2.3 From 21a2d3430ba8c188af405a5c2eb9c06bdcb6add6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 09:29:49 -0500 Subject: cgroup: simplify dynamic cftype addition and removal Dynamic cftype addition and removal using cgroup_add/rm_cftypes() respectively has been quite hairy due to vfs i_mutex. As i_mutex nests outside cgroup_mutex, cgroup_mutex has to be released and regrabbed on each iteration through the hierarchy complicating the process. Now that i_mutex is no longer in play, it can be simplified. * Just holding cgroup_tree_mutex is enough. No need to meddle with cgroup_mutex. * No reason to play the unlock - relock - check serial_nr dancing. Everything can be atomically while holding cgroup_tree_mutex. * cgroup_cfts_prepare() is replaced with direct locking of cgroup_tree_mutex. * cgroup_cfts_commit() no longer fiddles with locking. It just applies the cftypes change to the existing cgroups in the hierarchy. Renamed to cgroup_cfts_apply(). Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 87 +++++++++++++++++++++------------------------------------ 1 file changed, 32 insertions(+), 55 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 506ebd61d1c2..f4409715a2f5 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2305,46 +2305,19 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], return 0; } -static void cgroup_cfts_prepare(void) - __acquires(&cgroup_mutex) -{ - /* - * Thanks to the entanglement with vfs inode locking, we can't walk - * the existing cgroups under cgroup_mutex and create files. - * Instead, we use css_for_each_descendant_pre() and drop RCU read - * lock before calling cgroup_addrm_files(). - */ - mutex_lock(&cgroup_tree_mutex); - mutex_lock(&cgroup_mutex); -} - -static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) - __releases(&cgroup_mutex) +static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) { LIST_HEAD(pending); struct cgroup_subsys *ss = cfts[0].ss; struct cgroup *root = &ss->root->top_cgroup; - struct cgroup *prev = NULL; struct cgroup_subsys_state *css; - u64 update_before; int ret = 0; - mutex_unlock(&cgroup_mutex); + lockdep_assert_held(&cgroup_tree_mutex); - /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ - if (!cfts || ss->root == &cgroup_dummy_root) { - mutex_unlock(&cgroup_tree_mutex); + /* don't bother if @ss isn't attached */ + if (ss->root == &cgroup_dummy_root) return 0; - } - - cgroup_get_root(ss->root); - - /* - * All cgroups which are created after we drop cgroup_mutex will - * have the updated set of files, so we only need to update the - * cgroups created before the current @cgroup_serial_nr_next. - */ - update_before = cgroup_serial_nr_next; /* add/rm files for all cgroups created before */ css_for_each_descendant_pre(css, cgroup_css(root, ss)) { @@ -2353,22 +2326,13 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) if (cgroup_is_dead(cgrp)) continue; - cgroup_get(cgrp); - if (prev) - cgroup_put(prev); - prev = cgrp; - - if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) { - ret = cgroup_addrm_files(cgrp, cfts, is_add); - if (is_add) - kernfs_activate(cgrp->kn); - } + ret = cgroup_addrm_files(cgrp, cfts, is_add); if (ret) break; } - mutex_unlock(&cgroup_tree_mutex); - cgroup_put(prev); - cgroup_put_root(ss->root); + + if (is_add && !ret) + kernfs_activate(root->kn); return ret; } @@ -2419,6 +2383,19 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) return 0; } +static int cgroup_rm_cftypes_locked(struct cftype *cfts) +{ + lockdep_assert_held(&cgroup_tree_mutex); + + if (!cfts || !cfts[0].ss) + return -ENOENT; + + list_del(&cfts->node); + cgroup_apply_cftypes(cfts, false); + cgroup_exit_cftypes(cfts); + return 0; +} + /** * cgroup_rm_cftypes - remove an array of cftypes from a subsystem * @cfts: zero-length name terminated array of cftypes @@ -2432,15 +2409,12 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) */ int cgroup_rm_cftypes(struct cftype *cfts) { - if (!cfts || !cfts[0].ss) - return -ENOENT; - - cgroup_cfts_prepare(); - list_del(&cfts->node); - cgroup_cfts_commit(cfts, false); + int ret; - cgroup_exit_cftypes(cfts); - return 0; + mutex_lock(&cgroup_tree_mutex); + ret = cgroup_rm_cftypes_locked(cfts); + mutex_unlock(&cgroup_tree_mutex); + return ret; } /** @@ -2465,11 +2439,14 @@ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) if (ret) return ret; - cgroup_cfts_prepare(); + mutex_lock(&cgroup_tree_mutex); + list_add_tail(&cfts->node, &ss->cfts); - ret = cgroup_cfts_commit(cfts, true); + ret = cgroup_apply_cftypes(cfts, true); if (ret) - cgroup_rm_cftypes(cfts); + cgroup_rm_cftypes_locked(cfts); + + mutex_unlock(&cgroup_tree_mutex); return ret; } EXPORT_SYMBOL_GPL(cgroup_add_cftypes); -- cgit v1.2.3 From 6f30558f37bfbd428e3854c2c34b5c32117c8f7e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 09:29:50 -0500 Subject: cgroup: make cgroup hold onto its kernfs_node cgroup currently releases its kernfs_node when it gets removed. While not buggy, this makes cgroup->kn access rules complicated than necessary and leads to things like get/put protection around kernfs_remove() in cgroup_destroy_locked(). In addition, we want to use kernfs_name/path() and friends but also want to be able to determine a cgroup's name between removal and release. This patch makes cgroup hold onto its kernfs_node until freed so that cgroup->kn is always accessible. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index f4409715a2f5..59dfb025f1ac 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -957,6 +957,8 @@ static void cgroup_free_fn(struct work_struct *work) cgroup_pidlist_destroy_all(cgrp); + kernfs_put(cgrp->kn); + kfree(rcu_dereference_raw(cgrp->name)); kfree(cgrp); } @@ -3786,6 +3788,12 @@ static long cgroup_create(struct cgroup *parent, const char *name_str, } cgrp->kn = kn; + /* + * This extra ref will be put in cgroup_free_fn() and guarantees + * that @cgrp->kn is always accessible. + */ + kernfs_get(kn); + cgrp->serial_nr = cgroup_serial_nr_next++; /* allocation complete, commit to creation */ @@ -3966,7 +3974,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) { struct cgroup *child; struct cgroup_subsys_state *css; - struct kernfs_node *kn; bool empty; int ssid; @@ -4044,13 +4051,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) * clearing of cgrp->kn->priv backpointer, which should happen * after all files under it have been removed. */ - kn = cgrp->kn; - kernfs_get(kn); - - kernfs_remove(cgrp->kn); - + kernfs_remove(cgrp->kn); /* @cgrp has an extra ref on its kn */ RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL); - kernfs_put(kn); mutex_lock(&cgroup_mutex); -- cgit v1.2.3 From e61734c55c24cdf11b07e52a74aec4dc4a7f4bd0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 09:29:50 -0500 Subject: cgroup: remove cgroup->name cgroup->name handling became quite complicated over time involving dedicated struct cgroup_name for RCU protection. Now that cgroup is on kernfs, we can drop all of it and simply use kernfs_name/path() and friends. Replace cgroup->name and all related code with kernfs name/path constructs. * Reimplement cgroup_name() and cgroup_path() as thin wrappers on top of kernfs counterparts, which involves semantic changes. pr_cont_cgroup_name() and pr_cont_cgroup_path() added. * cgroup->name handling dropped from cgroup_rename(). * All users of cgroup_name/path() updated to the new semantics. Users which were formatting the string just to printk them are converted to use pr_cont_cgroup_name/path() instead, which simplifies things quite a bit. As cgroup_name() no longer requires RCU read lock around it, RCU lockings which were protecting only cgroup_name() are removed. v2: Comment above oom_info_lock updated as suggested by Michal. v3: dummy_top doesn't have a kn associated and pr_cont_cgroup_name/path() ended up calling the matching kernfs functions with NULL kn leading to oops. Test for NULL kn and print "/" if so. This issue was reported by Fengguang Wu. v4: Rebased on top of 0ab02ca8f887 ("cgroup: protect modifications to cgroup_idr with cgroup_mutex"). Signed-off-by: Tejun Heo Acked-by: Peter Zijlstra Acked-by: Michal Hocko Acked-by: Li Zefan Cc: Fengguang Wu Cc: Ingo Molnar Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki --- kernel/cgroup.c | 146 ++++++++++++--------------------------------------- kernel/cpuset.c | 27 +++++----- kernel/sched/debug.c | 3 +- 3 files changed, 47 insertions(+), 129 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 59dfb025f1ac..638df032fb94 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -145,8 +145,6 @@ static int cgroup_root_count; /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ static DEFINE_IDR(cgroup_hierarchy_idr); -static struct cgroup_name root_cgroup_name = { .name = "/" }; - /* * Assign a monotonically increasing serial number to cgroups. It * guarantees cgroups with bigger numbers are newer than those with smaller @@ -888,17 +886,6 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask); static struct kernfs_syscall_ops cgroup_kf_syscall_ops; static const struct file_operations proc_cgroupstats_operations; -static struct cgroup_name *cgroup_alloc_name(const char *name_str) -{ - struct cgroup_name *name; - - name = kmalloc(sizeof(*name) + strlen(name_str) + 1, GFP_KERNEL); - if (!name) - return NULL; - strcpy(name->name, name_str); - return name; -} - static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, char *buf) { @@ -958,8 +945,6 @@ static void cgroup_free_fn(struct work_struct *work) cgroup_pidlist_destroy_all(cgrp); kernfs_put(cgrp->kn); - - kfree(rcu_dereference_raw(cgrp->name)); kfree(cgrp); } @@ -1377,7 +1362,6 @@ static void init_cgroup_root(struct cgroupfs_root *root) INIT_LIST_HEAD(&root->root_list); root->number_of_cgroups = 1; cgrp->root = root; - RCU_INIT_POINTER(cgrp->name, &root_cgroup_name); init_cgroup_housekeeping(cgrp); idr_init(&root->cgroup_idr); } @@ -1597,57 +1581,6 @@ static struct file_system_type cgroup_fs_type = { static struct kobject *cgroup_kobj; -/** - * cgroup_path - generate the path of a cgroup - * @cgrp: the cgroup in question - * @buf: the buffer to write the path into - * @buflen: the length of the buffer - * - * Writes path of cgroup into buf. Returns 0 on success, -errno on error. - * - * We can't generate cgroup path using dentry->d_name, as accessing - * dentry->name must be protected by irq-unsafe dentry->d_lock or parent - * inode's i_mutex, while on the other hand cgroup_path() can be called - * with some irq-safe spinlocks held. - */ -int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) -{ - int ret = -ENAMETOOLONG; - char *start; - - if (!cgrp->parent) { - if (strlcpy(buf, "/", buflen) >= buflen) - return -ENAMETOOLONG; - return 0; - } - - start = buf + buflen - 1; - *start = '\0'; - - rcu_read_lock(); - do { - const char *name = cgroup_name(cgrp); - int len; - - len = strlen(name); - if ((start -= len) < buf) - goto out; - memcpy(start, name, len); - - if (--start < buf) - goto out; - *start = '/'; - - cgrp = cgrp->parent; - } while (cgrp->parent); - ret = 0; - memmove(buf, start, buf + buflen - start); -out: - rcu_read_unlock(); - return ret; -} -EXPORT_SYMBOL_GPL(cgroup_path); - /** * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy * @task: target task @@ -1659,16 +1592,14 @@ EXPORT_SYMBOL_GPL(cgroup_path); * function grabs cgroup_mutex and shouldn't be used inside locks used by * cgroup controller callbacks. * - * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short. + * Return value is the same as kernfs_path(). */ -int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) +char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) { struct cgroupfs_root *root; struct cgroup *cgrp; - int hierarchy_id = 1, ret = 0; - - if (buflen < 2) - return -ENAMETOOLONG; + int hierarchy_id = 1; + char *path = NULL; mutex_lock(&cgroup_mutex); @@ -1676,14 +1607,15 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) if (root) { cgrp = task_cgroup_from_root(task, root); - ret = cgroup_path(cgrp, buf, buflen); + path = cgroup_path(cgrp, buf, buflen); } else { /* if no hierarchy exists, everyone is in "/" */ - memcpy(buf, "/", 2); + if (strlcpy(buf, "/", buflen) < buflen) + path = buf; } mutex_unlock(&cgroup_mutex); - return ret; + return path; } EXPORT_SYMBOL_GPL(task_cgroup_path); @@ -2211,7 +2143,6 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, const char *new_name_str) { struct cgroup *cgrp = kn->priv; - struct cgroup_name *name, *old_name; int ret; if (kernfs_type(kn) != KERNFS_DIR) @@ -2226,25 +2157,13 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, if (cgroup_sane_behavior(cgrp)) return -EPERM; - name = cgroup_alloc_name(new_name_str); - if (!name) - return -ENOMEM; - mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); ret = kernfs_rename(kn, new_parent, new_name_str); - if (!ret) { - old_name = rcu_dereference_protected(cgrp->name, true); - rcu_assign_pointer(cgrp->name, name); - } else { - old_name = name; - } mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - - kfree_rcu(old_name, rcu_head); return ret; } @@ -3719,14 +3638,13 @@ err_free: /** * cgroup_create - create a cgroup * @parent: cgroup that will be parent of the new cgroup - * @name_str: name of the new cgroup + * @name: name of the new cgroup * @mode: mode to set on new cgroup */ -static long cgroup_create(struct cgroup *parent, const char *name_str, +static long cgroup_create(struct cgroup *parent, const char *name, umode_t mode) { struct cgroup *cgrp; - struct cgroup_name *name; struct cgroupfs_root *root = parent->root; int ssid, err; struct cgroup_subsys *ss; @@ -3737,13 +3655,6 @@ static long cgroup_create(struct cgroup *parent, const char *name_str, if (!cgrp) return -ENOMEM; - name = cgroup_alloc_name(name_str); - if (!name) { - err = -ENOMEM; - goto err_free_cgrp; - } - rcu_assign_pointer(cgrp->name, name); - mutex_lock(&cgroup_tree_mutex); /* @@ -3781,7 +3692,7 @@ static long cgroup_create(struct cgroup *parent, const char *name_str, set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); /* create the directory */ - kn = kernfs_create_dir(parent->kn, name->name, mode, cgrp); + kn = kernfs_create_dir(parent->kn, name, mode, cgrp); if (IS_ERR(kn)) { err = PTR_ERR(kn); goto err_free_id; @@ -3839,8 +3750,6 @@ err_unlock: mutex_unlock(&cgroup_mutex); err_unlock_tree: mutex_unlock(&cgroup_tree_mutex); - kfree(rcu_dereference_raw(cgrp->name)); -err_free_cgrp: kfree(cgrp); return err; @@ -4304,12 +4213,12 @@ int proc_cgroup_show(struct seq_file *m, void *v) { struct pid *pid; struct task_struct *tsk; - char *buf; + char *buf, *path; int retval; struct cgroupfs_root *root; retval = -ENOMEM; - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + buf = kmalloc(PATH_MAX, GFP_KERNEL); if (!buf) goto out; @@ -4337,10 +4246,12 @@ int proc_cgroup_show(struct seq_file *m, void *v) root->name); seq_putc(m, ':'); cgrp = task_cgroup_from_root(tsk, root); - retval = cgroup_path(cgrp, buf, PAGE_SIZE); - if (retval < 0) + path = cgroup_path(cgrp, buf, PATH_MAX); + if (!path) { + retval = -ENAMETOOLONG; goto out_unlock; - seq_puts(m, buf); + } + seq_puts(m, path); seq_putc(m, '\n'); } @@ -4588,16 +4499,17 @@ static void cgroup_release_agent(struct work_struct *work) while (!list_empty(&release_list)) { char *argv[3], *envp[3]; int i; - char *pathbuf = NULL, *agentbuf = NULL; + char *pathbuf = NULL, *agentbuf = NULL, *path; struct cgroup *cgrp = list_entry(release_list.next, struct cgroup, release_list); list_del_init(&cgrp->release_list); raw_spin_unlock(&release_list_lock); - pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); if (!pathbuf) goto continue_free; - if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) + path = cgroup_path(cgrp, pathbuf, PATH_MAX); + if (!path) goto continue_free; agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); if (!agentbuf) @@ -4605,7 +4517,7 @@ static void cgroup_release_agent(struct work_struct *work) i = 0; argv[i++] = agentbuf; - argv[i++] = pathbuf; + argv[i++] = path; argv[i] = NULL; i = 0; @@ -4755,6 +4667,11 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) { struct cgrp_cset_link *link; struct css_set *cset; + char *name_buf; + + name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL); + if (!name_buf) + return -ENOMEM; read_lock(&css_set_lock); rcu_read_lock(); @@ -4763,14 +4680,17 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) struct cgroup *c = link->cgrp; const char *name = "?"; - if (c != cgroup_dummy_top) - name = cgroup_name(c); + if (c != cgroup_dummy_top) { + cgroup_name(c, name_buf, NAME_MAX + 1); + name = name_buf; + } seq_printf(seq, "Root %d group %s\n", c->root->hierarchy_id, name); } rcu_read_unlock(); read_unlock(&css_set_lock); + kfree(name_buf); return 0; } diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 2d018c795fea..e97a6e88d036 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2088,10 +2088,9 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) parent = parent_cs(parent); if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { - rcu_read_lock(); - printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n", - cgroup_name(cs->css.cgroup)); - rcu_read_unlock(); + printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset "); + pr_cont_cgroup_name(cs->css.cgroup); + pr_cont("\n"); } } @@ -2619,19 +2618,17 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk) /* Statically allocated to prevent using excess stack. */ static char cpuset_nodelist[CPUSET_NODELIST_LEN]; static DEFINE_SPINLOCK(cpuset_buffer_lock); - struct cgroup *cgrp = task_cs(tsk)->css.cgroup; - rcu_read_lock(); spin_lock(&cpuset_buffer_lock); nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, tsk->mems_allowed); - printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", - tsk->comm, cgroup_name(cgrp), cpuset_nodelist); + printk(KERN_INFO "%s cpuset=", tsk->comm); + pr_cont_cgroup_name(cgrp); + pr_cont(" mems_allowed=%s\n", cpuset_nodelist); spin_unlock(&cpuset_buffer_lock); - rcu_read_unlock(); } /* @@ -2681,12 +2678,12 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) { struct pid *pid; struct task_struct *tsk; - char *buf; + char *buf, *p; struct cgroup_subsys_state *css; int retval; retval = -ENOMEM; - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + buf = kmalloc(PATH_MAX, GFP_KERNEL); if (!buf) goto out; @@ -2696,14 +2693,16 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) if (!tsk) goto out_free; + retval = -ENAMETOOLONG; rcu_read_lock(); css = task_css(tsk, cpuset_cgrp_id); - retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); + p = cgroup_path(css->cgroup, buf, PATH_MAX); rcu_read_unlock(); - if (retval < 0) + if (!p) goto out_put_task; - seq_puts(m, buf); + seq_puts(m, p); seq_putc(m, '\n'); + retval = 0; out_put_task: put_task_struct(tsk); out_free: diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index dd52e7ffb10e..30eee3b5293d 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -111,8 +111,7 @@ static char *task_group_path(struct task_group *tg) if (autogroup_path(tg, group_path, PATH_MAX)) return group_path; - cgroup_path(tg->css.cgroup, group_path, PATH_MAX); - return group_path; + return cgroup_path(tg->css.cgroup, group_path, PATH_MAX); } #endif -- cgit v1.2.3 From 3c9c825b8b50de7dbb015e6bfc04bb2da79364d9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 09:29:50 -0500 Subject: cgroup: rename cgroupfs_root->number_of_cgroups to ->nr_cgrps and make it atomic_t root->number_of_cgroups is currently an integer protected with cgroup_mutex. Except for sanity checks and proc reporting, the only place it's used is to check whether the root has any child during remount; however, this is a bit flawed as the counter is not decremented when the cgroup is unlinked but when it's released, meaning that there could be an extended period where all cgroups are removed but remount is still not allowed because some internal objects are lingering. While not perfect either, it'd be better to use emptiness test on root->top_cgroup.children. This patch updates cgroup_remount() to test top_cgroup's children instead, which makes number_of_cgroups only actual usage statistics printing in proc implemented in proc_cgroupstats_show(). Let's shorten its name and make it an atomic_t so that we don't have to worry about its synchronization. It's purely auxiliary at this point. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 638df032fb94..cffdb6e2ad08 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -758,7 +758,7 @@ static void cgroup_put_root(struct cgroupfs_root *root) } mutex_lock(&cgroup_mutex); - BUG_ON(root->number_of_cgroups != 1); + BUG_ON(atomic_read(&root->nr_cgrps) != 1); BUG_ON(!list_empty(&cgrp->children)); /* Rebind all subsystems back to the default hierarchy */ @@ -928,9 +928,7 @@ static void cgroup_free_fn(struct work_struct *work) { struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); - mutex_lock(&cgroup_mutex); - cgrp->root->number_of_cgroups--; - mutex_unlock(&cgroup_mutex); + atomic_dec(&cgrp->root->nr_cgrps); /* * We get a ref to the parent, and put the ref when this cgroup is @@ -1320,7 +1318,7 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) } /* remounting is not allowed for populated hierarchies */ - if (root->number_of_cgroups > 1) { + if (!list_empty(&root->top_cgroup.children)) { ret = -EBUSY; goto out_unlock; } @@ -1360,7 +1358,7 @@ static void init_cgroup_root(struct cgroupfs_root *root) atomic_set(&root->refcnt, 1); INIT_LIST_HEAD(&root->root_list); - root->number_of_cgroups = 1; + atomic_set(&root->nr_cgrps, 1); cgrp->root = root; init_cgroup_housekeeping(cgrp); idr_init(&root->cgroup_idr); @@ -1463,7 +1461,7 @@ static int cgroup_setup_root(struct cgroupfs_root *root) write_unlock(&css_set_lock); BUG_ON(!list_empty(&root_cgrp->children)); - BUG_ON(root->number_of_cgroups != 1); + BUG_ON(atomic_read(&root->nr_cgrps) != 1); kernfs_activate(root_cgrp->kn); ret = 0; @@ -3709,7 +3707,7 @@ static long cgroup_create(struct cgroup *parent, const char *name, /* allocation complete, commit to creation */ list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); - root->number_of_cgroups++; + atomic_inc(&root->nr_cgrps); /* * Grab a reference on the root and parent so that they don't get @@ -4281,7 +4279,7 @@ static int proc_cgroupstats_show(struct seq_file *m, void *v) for_each_subsys(ss, i) seq_printf(m, "%s\t%d\t%d\t%d\n", ss->name, ss->root->hierarchy_id, - ss->root->number_of_cgroups, !ss->disabled); + atomic_read(&ss->root->nr_cgrps), !ss->disabled); mutex_unlock(&cgroup_mutex); return 0; -- cgit v1.2.3 From 776f02fa4e1ad70557c0318c70ce928e0642bee0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 09:29:50 -0500 Subject: cgroup: remove cgroupfs_root->refcnt Currently, cgroupfs_root and its ->top_cgroup are separated reference counted and the latter's is ignored. There's no reason to do this separately. This patch removes cgroupfs_root->refcnt and destroys cgroupfs_root when the top_cgroup is released. * cgroup_put() updated to ignore cgroup_is_dead() test for top cgroups. cgroup_free_fn() updated to handle root destruction when releasing a top cgroup. * As root destruction is now bounced through cgroup destruction, it is asynchronous. Update cgroup_mount() so that it waits for pending release which is currently implemented using msleep(). Converting this to proper wait_queue isn't hard but likely unnecessary. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 86 +++++++++++++++++++++++++-------------------------------- 1 file changed, 38 insertions(+), 48 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index cffdb6e2ad08..03845c5d082b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -53,6 +53,7 @@ #include /* TODO: replace with more sophisticated array */ #include /* used in cgroup_attach_task */ #include +#include #include @@ -728,37 +729,16 @@ static void cgroup_free_root(struct cgroupfs_root *root) } } -static void cgroup_get_root(struct cgroupfs_root *root) -{ - /* - * The caller must ensure that @root is alive, which can be - * achieved by holding a ref on one of the member cgroups or - * following a registered reference to @root while holding - * cgroup_tree_mutex. - */ - WARN_ON_ONCE(atomic_read(&root->refcnt) <= 0); - atomic_inc(&root->refcnt); -} - -static void cgroup_put_root(struct cgroupfs_root *root) +static void cgroup_destroy_root(struct cgroupfs_root *root) { struct cgroup *cgrp = &root->top_cgroup; struct cgrp_cset_link *link, *tmp_link; int ret; - /* - * @root's refcnt reaching zero and its deregistration should be - * atomic w.r.t. cgroup_tree_mutex. This ensures that - * cgroup_get_root() is safe to invoke if @root is registered. - */ mutex_lock(&cgroup_tree_mutex); - if (!atomic_dec_and_test(&root->refcnt)) { - mutex_unlock(&cgroup_tree_mutex); - return; - } mutex_lock(&cgroup_mutex); - BUG_ON(atomic_read(&root->nr_cgrps) != 1); + BUG_ON(atomic_read(&root->nr_cgrps)); BUG_ON(!list_empty(&cgrp->children)); /* Rebind all subsystems back to the default hierarchy */ @@ -929,21 +909,24 @@ static void cgroup_free_fn(struct work_struct *work) struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); atomic_dec(&cgrp->root->nr_cgrps); - - /* - * We get a ref to the parent, and put the ref when this cgroup is - * being freed, so it's guaranteed that the parent won't be - * destroyed before its children. - */ - cgroup_put(cgrp->parent); - - /* put the root reference that we took when we created the cgroup */ - cgroup_put_root(cgrp->root); - cgroup_pidlist_destroy_all(cgrp); - kernfs_put(cgrp->kn); - kfree(cgrp); + if (cgrp->parent) { + /* + * We get a ref to the parent, and put the ref when this + * cgroup is being freed, so it's guaranteed that the + * parent won't be destroyed before its children. + */ + cgroup_put(cgrp->parent); + kernfs_put(cgrp->kn); + kfree(cgrp); + } else { + /* + * This is top cgroup's refcnt reaching zero, which + * indicates that the root should be released. + */ + cgroup_destroy_root(cgrp->root); + } } static void cgroup_free_rcu(struct rcu_head *head) @@ -965,7 +948,7 @@ static void cgroup_put(struct cgroup *cgrp) { if (!atomic_dec_and_test(&cgrp->refcnt)) return; - if (WARN_ON_ONCE(!cgroup_is_dead(cgrp))) + if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp))) return; /* @@ -1356,7 +1339,6 @@ static void init_cgroup_root(struct cgroupfs_root *root) { struct cgroup *cgrp = &root->top_cgroup; - atomic_set(&root->refcnt, 1); INIT_LIST_HEAD(&root->root_list); atomic_set(&root->nr_cgrps, 1); cgrp->root = root; @@ -1485,7 +1467,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, struct cgroup_sb_opts opts; struct dentry *dentry; int ret; - +retry: mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); @@ -1531,7 +1513,21 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, } } - cgroup_get_root(root); + /* + * A root's lifetime is governed by its top cgroup. Zero + * ref indicate that the root is being destroyed. Wait for + * destruction to complete so that the subsystems are free. + * We can use wait_queue for the wait but this path is + * super cold. Let's just sleep for a bit and retry. + */ + if (!atomic_inc_not_zero(&root->top_cgroup.refcnt)) { + mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); + msleep(10); + goto retry; + } + + ret = 0; goto out_unlock; } @@ -1558,7 +1554,7 @@ out_unlock: dentry = kernfs_mount(fs_type, flags, root->kf_root); if (IS_ERR(dentry)) - cgroup_put_root(root); + cgroup_put(&root->top_cgroup); return dentry; } @@ -1567,7 +1563,7 @@ static void cgroup_kill_sb(struct super_block *sb) struct kernfs_root *kf_root = kernfs_root_from_sb(sb); struct cgroupfs_root *root = cgroup_root_from_kf(kf_root); - cgroup_put_root(root); + cgroup_put(&root->top_cgroup); kernfs_kill_sb(sb); } @@ -3708,12 +3704,6 @@ static long cgroup_create(struct cgroup *parent, const char *name, /* allocation complete, commit to creation */ list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children); atomic_inc(&root->nr_cgrps); - - /* - * Grab a reference on the root and parent so that they don't get - * deleted while there are child cgroups. - */ - cgroup_get_root(root); cgroup_get(parent); /* -- cgit v1.2.3 From d3ba07c3aa9ae3e03329b0a7f1a067c0647aa2af Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:38 -0500 Subject: cgroup: disallow xattr, release_agent and name if sane_behavior Disallow more mount options if sane_behavior. Note that xattr used to generate warning. While at it, simplify option check in cgroup_mount() and update sane_behavior comment in cgroup.h. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 03845c5d082b..079c478a4735 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1226,18 +1226,12 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); - if (opts->flags & CGRP_ROOT_NOPREFIX) { - pr_err("cgroup: sane_behavior: noprefix is not allowed\n"); + if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) || + opts->cpuset_clone_children || opts->release_agent || + opts->name) { + pr_err("cgroup: sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n"); return -EINVAL; } - - if (opts->cpuset_clone_children) { - pr_err("cgroup: sane_behavior: clone_children is not allowed\n"); - return -EINVAL; - } - - if (opts->flags & CGRP_ROOT_XATTR) - pr_warning("cgroup: sane_behavior: xattr is always available, flag unnecessary\n"); } /* -- cgit v1.2.3 From 35585573055f37837eb752ee22eb5523682ca742 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:38 -0500 Subject: cgroup: drop CGRP_ROOT_SUBSYS_BOUND Before kernfs conversion, due to the way super_block lookup works, cgroup roots were created and made visible before being fully initialized. This in turn required a special flag to mark that the root hasn't been fully initialized so that the destruction path can tell fully bound ones from half initialized. That flag is CGRP_ROOT_SUBSYS_BOUND and no longer necessary after the kernfs conversion as the lookup and creation of new root are atomic w.r.t. cgroup_mutex. This patch removes the flag and passes the requests subsystem mask to cgroup_setup_root() so that it can set the respective mask bits as subsystems are bound. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 079c478a4735..878cd1810ad1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -733,7 +733,6 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) { struct cgroup *cgrp = &root->top_cgroup; struct cgrp_cset_link *link, *tmp_link; - int ret; mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); @@ -742,11 +741,7 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) BUG_ON(!list_empty(&cgrp->children)); /* Rebind all subsystems back to the default hierarchy */ - if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { - ret = rebind_subsystems(root, 0, root->subsys_mask); - /* Shouldn't be able to fail ... */ - BUG_ON(ret); - } + WARN_ON(rebind_subsystems(root, 0, root->subsys_mask)); /* * Release all the links from cset_links to this hierarchy's @@ -1055,13 +1050,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, } } - /* - * Mark @root has finished binding subsystems. @root->subsys_mask - * now matches the bound subsystems. - */ - root->flags |= CGRP_ROOT_SUBSYS_BOUND; kernfs_activate(cgrp->kn); - return 0; } @@ -1353,15 +1342,6 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) init_cgroup_root(root); - /* - * We need to set @root->subsys_mask now so that @root can be - * matched by cgroup_test_super() before it finishes - * initialization; otherwise, competing mounts with the same - * options may try to bind the same subsystems instead of waiting - * for the first one leading to unexpected mount errors. - * SUBSYS_BOUND will be set once actual binding is complete. - */ - root->subsys_mask = opts->subsys_mask; root->flags = opts->flags; if (opts->release_agent) strcpy(root->release_agent_path, opts->release_agent); @@ -1372,7 +1352,7 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) return root; } -static int cgroup_setup_root(struct cgroupfs_root *root) +static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask) { LIST_HEAD(tmp_links); struct cgroup *root_cgrp = &root->top_cgroup; @@ -1415,7 +1395,7 @@ static int cgroup_setup_root(struct cgroupfs_root *root) if (ret) goto destroy_root; - ret = rebind_subsystems(root, root->subsys_mask, 0); + ret = rebind_subsystems(root, ss_mask, 0); if (ret) goto destroy_root; @@ -1532,7 +1512,7 @@ retry: goto out_unlock; } - ret = cgroup_setup_root(root); + ret = cgroup_setup_root(root, opts.subsys_mask); if (ret) cgroup_free_root(root); -- cgit v1.2.3 From 56fde9e01de45bcfabbb444d33e8bdd8388d2da0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:38 -0500 Subject: cgroup: enable task_cg_lists on the first cgroup mount Tasks are not linked on their css_sets until cgroup task iteration is actually used. This is to avoid incurring overhead on the fork and exit paths for systems which have cgroup compiled in but don't use it. This lazy binding also affects the task migration path. It has to be careful so that it doesn't link tasks to css_sets when task_cg_lists linking is not enabled yet. Unfortunately, this conditional linking in the migration path interferes with planned migration updates. This patch moves the lazy binding a bit earlier, to the first cgroup mount. It's a clear indication that cgroup is being used on the system and task_cg_lists linking is highly likely to be enabled soon anyway through "tasks" and "cgroup.procs" files. This allows cgroup_task_migrate() to always link @tsk->cg_list. Note that it may still race with cgroup_post_fork() but who wins that race is inconsequential. While at it, make use_task_css_set_links a bool, add sanity checks in cgroup_enable_task_cg_lists() and css_task_iter_start(), and update the former so that it's guaranteed and assumes to run only once. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 878cd1810ad1..506f6da67ad1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -173,6 +173,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp); static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], bool is_add); static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); +static void cgroup_enable_task_cg_lists(void); /** * cgroup_css - obtain a cgroup's css for the specified subsystem @@ -375,7 +376,7 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) * fork()/exit() overhead for people who have cgroups compiled into their * kernel but not actually in use. */ -static int use_task_css_set_links __read_mostly; +static bool use_task_css_set_links __read_mostly; static void __put_css_set(struct css_set *cset, int taskexit) { @@ -1441,6 +1442,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, struct cgroup_sb_opts opts; struct dentry *dentry; int ret; + + /* + * The first time anyone tries to mount a cgroup, enable the list + * linking each css_set to its tasks and fix up all existing tasks. + */ + if (!use_task_css_set_links) + cgroup_enable_task_cg_lists(); retry: mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); @@ -1692,10 +1700,8 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, rcu_assign_pointer(tsk->cgroups, new_cset); task_unlock(tsk); - /* Update the css_set linked lists if we're using them */ write_lock(&css_set_lock); - if (!list_empty(&tsk->cg_list)) - list_move(&tsk->cg_list, &new_cset->tasks); + list_move(&tsk->cg_list, &new_cset->tasks); write_unlock(&css_set_lock); /* @@ -2362,13 +2368,19 @@ int cgroup_task_count(const struct cgroup *cgrp) * To reduce the fork() overhead for systems that are not actually using * their cgroups capability, we don't maintain the lists running through * each css_set to its tasks until we see the list actually used - in other - * words after the first call to css_task_iter_start(). + * words after the first mount. */ static void cgroup_enable_task_cg_lists(void) { struct task_struct *p, *g; + write_lock(&css_set_lock); - use_task_css_set_links = 1; + + if (use_task_css_set_links) + goto out_unlock; + + use_task_css_set_links = true; + /* * We need tasklist_lock because RCU is not safe against * while_each_thread(). Besides, a forking task that has passed @@ -2379,16 +2391,22 @@ static void cgroup_enable_task_cg_lists(void) read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); + + WARN_ON_ONCE(!list_empty(&p->cg_list) || + task_css_set(p) != &init_css_set); + /* * We should check if the process is exiting, otherwise * it will race with cgroup_exit() in that the list * entry won't be deleted though the process has exited. */ - if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) + if (!(p->flags & PF_EXITING)) list_add(&p->cg_list, &task_css_set(p)->tasks); + task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); +out_unlock: write_unlock(&css_set_lock); } @@ -2621,13 +2639,8 @@ void css_task_iter_start(struct cgroup_subsys_state *css, struct css_task_iter *it) __acquires(css_set_lock) { - /* - * The first time anyone tries to iterate across a css, we need to - * enable the list linking each css_set to its tasks, and fix up - * all existing tasks. - */ - if (!use_task_css_set_links) - cgroup_enable_task_cg_lists(); + /* no one should try to iterate before mounting cgroups */ + WARN_ON_ONCE(!use_task_css_set_links); read_lock(&css_set_lock); -- cgit v1.2.3 From afeb0f9fd425239aa477c842480f240bfb6325b3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:39 -0500 Subject: cgroup: relocate cgroup_enable_task_cg_lists() Move it above so that prototype isn't necessary. Let's also move the definition of use_task_css_set_links next to it. This is purely cosmetic. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 103 ++++++++++++++++++++++++++------------------------------ 1 file changed, 48 insertions(+), 55 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 506f6da67ad1..2469699408bd 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -173,7 +173,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp); static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], bool is_add); static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); -static void cgroup_enable_task_cg_lists(void); /** * cgroup_css - obtain a cgroup's css for the specified subsystem @@ -370,14 +369,6 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) return key; } -/* - * We don't maintain the lists running through each css_set to its task - * until after the first call to css_task_iter_start(). This reduces the - * fork()/exit() overhead for people who have cgroups compiled into their - * kernel but not actually in use. - */ -static bool use_task_css_set_links __read_mostly; - static void __put_css_set(struct css_set *cset, int taskexit) { struct cgrp_cset_link *link, *tmp_link; @@ -1307,6 +1298,54 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) return ret; } +/* + * To reduce the fork() overhead for systems that are not actually using + * their cgroups capability, we don't maintain the lists running through + * each css_set to its tasks until we see the list actually used - in other + * words after the first mount. + */ +static bool use_task_css_set_links __read_mostly; + +static void cgroup_enable_task_cg_lists(void) +{ + struct task_struct *p, *g; + + write_lock(&css_set_lock); + + if (use_task_css_set_links) + goto out_unlock; + + use_task_css_set_links = true; + + /* + * We need tasklist_lock because RCU is not safe against + * while_each_thread(). Besides, a forking task that has passed + * cgroup_post_fork() without seeing use_task_css_set_links = 1 + * is not guaranteed to have its child immediately visible in the + * tasklist if we walk through it with RCU. + */ + read_lock(&tasklist_lock); + do_each_thread(g, p) { + task_lock(p); + + WARN_ON_ONCE(!list_empty(&p->cg_list) || + task_css_set(p) != &init_css_set); + + /* + * We should check if the process is exiting, otherwise + * it will race with cgroup_exit() in that the list + * entry won't be deleted though the process has exited. + */ + if (!(p->flags & PF_EXITING)) + list_add(&p->cg_list, &task_css_set(p)->tasks); + + task_unlock(p); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +out_unlock: + write_unlock(&css_set_lock); +} + static void init_cgroup_housekeeping(struct cgroup *cgrp) { atomic_set(&cgrp->refcnt, 1); @@ -2364,52 +2403,6 @@ int cgroup_task_count(const struct cgroup *cgrp) return count; } -/* - * To reduce the fork() overhead for systems that are not actually using - * their cgroups capability, we don't maintain the lists running through - * each css_set to its tasks until we see the list actually used - in other - * words after the first mount. - */ -static void cgroup_enable_task_cg_lists(void) -{ - struct task_struct *p, *g; - - write_lock(&css_set_lock); - - if (use_task_css_set_links) - goto out_unlock; - - use_task_css_set_links = true; - - /* - * We need tasklist_lock because RCU is not safe against - * while_each_thread(). Besides, a forking task that has passed - * cgroup_post_fork() without seeing use_task_css_set_links = 1 - * is not guaranteed to have its child immediately visible in the - * tasklist if we walk through it with RCU. - */ - read_lock(&tasklist_lock); - do_each_thread(g, p) { - task_lock(p); - - WARN_ON_ONCE(!list_empty(&p->cg_list) || - task_css_set(p) != &init_css_set); - - /* - * We should check if the process is exiting, otherwise - * it will race with cgroup_exit() in that the list - * entry won't be deleted though the process has exited. - */ - if (!(p->flags & PF_EXITING)) - list_add(&p->cg_list, &task_css_set(p)->tasks); - - task_unlock(p); - } while_each_thread(g, p); - read_unlock(&tasklist_lock); -out_unlock: - write_unlock(&css_set_lock); -} - /** * css_next_child - find the next child of a given css * @pos_css: the current position (%NULL to initiate traversal) -- cgit v1.2.3 From 07bc356ed2950048d33d667e933e1b913c6e6b6d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:39 -0500 Subject: cgroup: implement cgroup_has_tasks() and unexport cgroup_task_count() cgroup_task_count() read-locks css_set_lock and walks all tasks to count them and then returns the result. The only thing all the users want is determining whether the cgroup is empty or not. This patch implements cgroup_has_tasks() which tests whether cgroup->cset_links is empty, replaces all cgroup_task_count() usages and unexports it. Note that the test isn't synchronized. This is the same as before. The test has always been racy. This will help planned css_set locking update. Signed-off-by: Tejun Heo Acked-by: Li Zefan Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki --- kernel/cgroup.c | 2 +- kernel/cpuset.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2469699408bd..ec7746e5ded1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2391,7 +2391,7 @@ EXPORT_SYMBOL_GPL(cgroup_add_cftypes); * * Return the number of tasks in the cgroup. */ -int cgroup_task_count(const struct cgroup *cgrp) +static int cgroup_task_count(const struct cgroup *cgrp) { int count = 0; struct cgrp_cset_link *link; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e97a6e88d036..ae190b0a196a 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -467,7 +467,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) * be changed to have empty cpus_allowed or mems_allowed. */ ret = -ENOSPC; - if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) { + if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) { if (!cpumask_empty(cur->cpus_allowed) && cpumask_empty(trial->cpus_allowed)) goto out; -- cgit v1.2.3 From e406d1cfff6ab189c8676072d211809c94fecaf0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:39 -0500 Subject: cgroup: reimplement cgroup_transfer_tasks() without using css_scan_tasks() Reimplement cgroup_transfer_tasks() so that it repeatedly fetches the first task in the cgroup and then tranfers it. This achieves the same result without using css_scan_tasks() which is scheduled to be removed. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ec7746e5ded1..893b7b502e18 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2850,15 +2850,6 @@ int css_scan_tasks(struct cgroup_subsys_state *css, return 0; } -static void cgroup_transfer_one_task(struct task_struct *task, void *data) -{ - struct cgroup *new_cgroup = data; - - mutex_lock(&cgroup_mutex); - cgroup_attach_task(new_cgroup, task, false); - mutex_unlock(&cgroup_mutex); -} - /** * cgroup_trasnsfer_tasks - move tasks from one cgroup to another * @to: cgroup to which the tasks will be moved @@ -2866,8 +2857,26 @@ static void cgroup_transfer_one_task(struct task_struct *task, void *data) */ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) { - return css_scan_tasks(&from->dummy_css, NULL, cgroup_transfer_one_task, - to, NULL); + struct css_task_iter it; + struct task_struct *task; + int ret = 0; + + do { + css_task_iter_start(&from->dummy_css, &it); + task = css_task_iter_next(&it); + if (task) + get_task_struct(task); + css_task_iter_end(&it); + + if (task) { + mutex_lock(&cgroup_mutex); + ret = cgroup_attach_task(to, task, false); + mutex_unlock(&cgroup_mutex); + put_task_struct(task); + } + } while (task && !ret); + + return ret; } /* -- cgit v1.2.3 From 96d365e0b86ee7ec6366c99669687e54c9f145e3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:40 -0500 Subject: cgroup: make css_set_lock a rwsem and rename it to css_set_rwsem Currently there are two ways to walk tasks of a cgroup - css_task_iter_start/next/end() and css_scan_tasks(). The latter builds on the former but allows blocking while iterating. Unfortunately, the way css_scan_tasks() is implemented is rather nasty, it uses a priority heap of pointers to extract some number of tasks in task creation order and loops over them invoking the callback and repeats that until it reaches the end. It requires either preallocated heap or may fail under memory pressure, while unlikely to be problematic, the complexity is O(N^2), and in general just nasty. We're gonna convert all css_scan_users() to css_task_iter_start/next/end() and remove css_scan_users(). As css_scan_tasks() users may block, let's convert css_set_lock to a rwsem so that tasks can block during css_task_iter_*() is in progress. While this does increase the chance of possible deadlock scenarios, given the current usage, the probability is relatively low, and even if that happens, the right thing to do is updating the iteration in the similar way to css iterators so that it can handle blocking. Most conversions are trivial; however, task_cgroup_path() now expects to be called with css_set_rwsem locked instead of locking itself. This is because the function is called with RCU read lock held and rwsem locking should nest outside RCU read lock. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 104 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 57 insertions(+), 47 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 893b7b502e18..89428b9d9933 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -341,11 +342,10 @@ static struct css_set init_css_set; static struct cgrp_cset_link init_cgrp_cset_link; /* - * css_set_lock protects the list of css_set objects, and the chain of - * tasks off each css_set. Nests outside task->alloc_lock due to - * css_task_iter_start(). + * css_set_rwsem protects the list of css_set objects, and the chain of + * tasks off each css_set. */ -static DEFINE_RWLOCK(css_set_lock); +static DECLARE_RWSEM(css_set_rwsem); static int css_set_count; /* @@ -380,9 +380,9 @@ static void __put_css_set(struct css_set *cset, int taskexit) */ if (atomic_add_unless(&cset->refcount, -1, 1)) return; - write_lock(&css_set_lock); + down_write(&css_set_rwsem); if (!atomic_dec_and_test(&cset->refcount)) { - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); return; } @@ -396,7 +396,7 @@ static void __put_css_set(struct css_set *cset, int taskexit) list_del(&link->cset_link); list_del(&link->cgrp_link); - /* @cgrp can't go away while we're holding css_set_lock */ + /* @cgrp can't go away while we're holding css_set_rwsem */ if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) { if (taskexit) set_bit(CGRP_RELEASABLE, &cgrp->flags); @@ -406,7 +406,7 @@ static void __put_css_set(struct css_set *cset, int taskexit) kfree(link); } - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); kfree_rcu(cset, rcu_head); } @@ -627,11 +627,11 @@ static struct css_set *find_css_set(struct css_set *old_cset, /* First see if we already have a cgroup group that matches * the desired set */ - read_lock(&css_set_lock); + down_read(&css_set_rwsem); cset = find_existing_css_set(old_cset, cgrp, template); if (cset) get_css_set(cset); - read_unlock(&css_set_lock); + up_read(&css_set_rwsem); if (cset) return cset; @@ -655,7 +655,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, * find_existing_css_set() */ memcpy(cset->subsys, template, sizeof(cset->subsys)); - write_lock(&css_set_lock); + down_write(&css_set_rwsem); /* Add reference counts and links from the new css_set. */ list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; @@ -673,7 +673,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, key = css_set_hash(cset->subsys); hash_add(css_set_table, &cset->hlist, key); - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); return cset; } @@ -739,14 +739,14 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) * Release all the links from cset_links to this hierarchy's * root cgroup */ - write_lock(&css_set_lock); + down_write(&css_set_rwsem); list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { list_del(&link->cset_link); list_del(&link->cgrp_link); kfree(link); } - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); if (!list_empty(&root->root_list)) { list_del(&root->root_list); @@ -764,7 +764,7 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) /* * Return the cgroup for "task" from the given hierarchy. Must be - * called with cgroup_mutex held. + * called with cgroup_mutex and css_set_rwsem held. */ static struct cgroup *task_cgroup_from_root(struct task_struct *task, struct cgroupfs_root *root) @@ -772,8 +772,9 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, struct css_set *cset; struct cgroup *res = NULL; - BUG_ON(!mutex_is_locked(&cgroup_mutex)); - read_lock(&css_set_lock); + lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&css_set_rwsem); + /* * No need to lock the task - since we hold cgroup_mutex the * task can't change groups, so the only thing that can happen @@ -794,7 +795,7 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, } } } - read_unlock(&css_set_lock); + BUG_ON(!res); return res; } @@ -1310,7 +1311,7 @@ static void cgroup_enable_task_cg_lists(void) { struct task_struct *p, *g; - write_lock(&css_set_lock); + down_write(&css_set_rwsem); if (use_task_css_set_links) goto out_unlock; @@ -1343,7 +1344,7 @@ static void cgroup_enable_task_cg_lists(void) } while_each_thread(g, p); read_unlock(&tasklist_lock); out_unlock: - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); } static void init_cgroup_housekeeping(struct cgroup *cgrp) @@ -1408,7 +1409,7 @@ static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask) root_cgrp->id = ret; /* - * We're accessing css_set_count without locking css_set_lock here, + * We're accessing css_set_count without locking css_set_rwsem here, * but that's OK - it can only be increased by someone holding * cgroup_lock, and that's us. The worst that can happen is that we * have some link structures left over @@ -1451,10 +1452,10 @@ static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask) * Link the top cgroup in this hierarchy into all the css_set * objects. */ - write_lock(&css_set_lock); + down_write(&css_set_rwsem); hash_for_each(css_set_table, i, cset, hlist) link_css_set(&tmp_links, cset, root_cgrp); - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); BUG_ON(!list_empty(&root_cgrp->children)); BUG_ON(atomic_read(&root->nr_cgrps) != 1); @@ -1617,6 +1618,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) char *path = NULL; mutex_lock(&cgroup_mutex); + down_read(&css_set_rwsem); root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); @@ -1629,6 +1631,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) path = buf; } + up_read(&css_set_rwsem); mutex_unlock(&cgroup_mutex); return path; } @@ -1739,9 +1742,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, rcu_assign_pointer(tsk->cgroups, new_cset); task_unlock(tsk); - write_lock(&css_set_lock); + down_write(&css_set_rwsem); list_move(&tsk->cg_list, &new_cset->tasks); - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); /* * We just gained a reference on old_cset by taking it from the @@ -1799,6 +1802,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, * already PF_EXITING could be freed from underneath us unless we * take an rcu_read_lock. */ + down_read(&css_set_rwsem); rcu_read_lock(); do { struct task_and_cgroup ent; @@ -1826,6 +1830,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, break; } while_each_thread(leader, tsk); rcu_read_unlock(); + up_read(&css_set_rwsem); /* remember the number of threads in the array for later. */ group_size = i; tset.tc_array = group; @@ -2003,7 +2008,11 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) mutex_lock(&cgroup_mutex); for_each_active_root(root) { - struct cgroup *from_cgrp = task_cgroup_from_root(from, root); + struct cgroup *from_cgrp; + + down_read(&css_set_rwsem); + from_cgrp = task_cgroup_from_root(from, root); + up_read(&css_set_rwsem); retval = cgroup_attach_task(from_cgrp, tsk, false); if (retval) @@ -2396,10 +2405,10 @@ static int cgroup_task_count(const struct cgroup *cgrp) int count = 0; struct cgrp_cset_link *link; - read_lock(&css_set_lock); + down_read(&css_set_rwsem); list_for_each_entry(link, &cgrp->cset_links, cset_link) count += atomic_read(&link->cset->refcount); - read_unlock(&css_set_lock); + up_read(&css_set_rwsem); return count; } @@ -2630,12 +2639,12 @@ static void css_advance_task_iter(struct css_task_iter *it) */ void css_task_iter_start(struct cgroup_subsys_state *css, struct css_task_iter *it) - __acquires(css_set_lock) + __acquires(css_set_rwsem) { /* no one should try to iterate before mounting cgroups */ WARN_ON_ONCE(!use_task_css_set_links); - read_lock(&css_set_lock); + down_read(&css_set_rwsem); it->origin_css = css; it->cset_link = &css->cgroup->cset_links; @@ -2683,9 +2692,9 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) * Finish task iteration started by css_task_iter_start(). */ void css_task_iter_end(struct css_task_iter *it) - __releases(css_set_lock) + __releases(css_set_rwsem) { - read_unlock(&css_set_lock); + up_read(&css_set_rwsem); } static inline int started_after_time(struct task_struct *t1, @@ -2735,7 +2744,7 @@ static inline int started_after(void *p1, void *p2) * * @test may be NULL, meaning always true (select all tasks), which * effectively duplicates css_task_iter_{start,next,end}() but does not - * lock css_set_lock for the call to @process. + * lock css_set_rwsem for the call to @process. * * It is guaranteed that @process will act on every task that is a member * of @css for the duration of this call. This function may or may not @@ -3867,12 +3876,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) lockdep_assert_held(&cgroup_mutex); /* - * css_set_lock synchronizes access to ->cset_links and prevents + * css_set_rwsem synchronizes access to ->cset_links and prevents * @cgrp from being removed while __put_css_set() is in progress. */ - read_lock(&css_set_lock); + down_read(&css_set_rwsem); empty = list_empty(&cgrp->cset_links); - read_unlock(&css_set_lock); + up_read(&css_set_rwsem); if (!empty) return -EBUSY; @@ -4208,6 +4217,7 @@ int proc_cgroup_show(struct seq_file *m, void *v) retval = 0; mutex_lock(&cgroup_mutex); + down_read(&css_set_rwsem); for_each_active_root(root) { struct cgroup_subsys *ss; @@ -4233,6 +4243,7 @@ int proc_cgroup_show(struct seq_file *m, void *v) } out_unlock: + up_read(&css_set_rwsem); mutex_unlock(&cgroup_mutex); put_task_struct(tsk); out_free: @@ -4328,12 +4339,12 @@ void cgroup_post_fork(struct task_struct *child) * lock on fork. */ if (use_task_css_set_links) { - write_lock(&css_set_lock); + down_write(&css_set_rwsem); task_lock(child); if (list_empty(&child->cg_list)) list_add(&child->cg_list, &task_css_set(child)->tasks); task_unlock(child); - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); } /* @@ -4390,15 +4401,14 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) int i; /* - * Unlink from the css_set task list if necessary. - * Optimistically check cg_list before taking - * css_set_lock + * Unlink from the css_set task list if necessary. Optimistically + * check cg_list before taking css_set_rwsem. */ if (!list_empty(&tsk->cg_list)) { - write_lock(&css_set_lock); + down_write(&css_set_rwsem); if (!list_empty(&tsk->cg_list)) list_del_init(&tsk->cg_list); - write_unlock(&css_set_lock); + up_write(&css_set_rwsem); } /* Reassign the task to the init_css_set. */ @@ -4650,7 +4660,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) if (!name_buf) return -ENOMEM; - read_lock(&css_set_lock); + down_read(&css_set_rwsem); rcu_read_lock(); cset = rcu_dereference(current->cgroups); list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { @@ -4666,7 +4676,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) c->root->hierarchy_id, name); } rcu_read_unlock(); - read_unlock(&css_set_lock); + up_read(&css_set_rwsem); kfree(name_buf); return 0; } @@ -4677,7 +4687,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) struct cgroup_subsys_state *css = seq_css(seq); struct cgrp_cset_link *link; - read_lock(&css_set_lock); + down_read(&css_set_rwsem); list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { struct css_set *cset = link->cset; struct task_struct *task; @@ -4693,7 +4703,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) } } } - read_unlock(&css_set_lock); + up_read(&css_set_rwsem); return 0; } -- cgit v1.2.3 From d66393e54e0a9dc743e440eb36c58bd1158a560e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:40 -0500 Subject: cpuset: use css_task_iter_start/next/end() instead of css_scan_tasks() Now that css_task_iter_start/next_end() supports blocking while iterating, there's no reason to use css_scan_tasks() which is more cumbersome to use and scheduled to be removed. Convert all css_scan_tasks() usages in cpuset to css_task_iter_start/next/end(). This simplifies the code by removing heap allocation and callbacks. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cpuset.c | 186 ++++++++++++++++++-------------------------------------- 1 file changed, 58 insertions(+), 128 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index ae190b0a196a..65ae0bdf4af8 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -828,56 +828,37 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs) return cs; } -/** - * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's - * @tsk: task to test - * @data: cpuset to @tsk belongs to - * - * Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed - * mask needs to be changed. - * - * We don't need to re-check for the cgroup/cpuset membership, since we're - * holding cpuset_mutex at this point. - */ -static void cpuset_change_cpumask(struct task_struct *tsk, void *data) -{ - struct cpuset *cs = data; - struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); - - set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed); -} - /** * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed - * @heap: if NULL, defer allocating heap memory to css_scan_tasks() - * - * Called with cpuset_mutex held - * - * The css_scan_tasks() function will scan all the tasks in a cgroup, - * calling callback functions for each. * - * No return value. It's guaranteed that css_scan_tasks() always returns 0 - * if @heap != NULL. + * Iterate through each task of @cs updating its cpus_allowed to the + * effective cpuset's. As this function is called with cpuset_mutex held, + * cpuset membership stays stable. */ -static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) +static void update_tasks_cpumask(struct cpuset *cs) { - css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap); + struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); + struct css_task_iter it; + struct task_struct *task; + + css_task_iter_start(&cs->css, &it); + while ((task = css_task_iter_next(&it))) + set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed); + css_task_iter_end(&it); } /* * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy. * @root_cs: the root cpuset of the hierarchy * @update_root: update root cpuset or not? - * @heap: the heap used by css_scan_tasks() * * This will update cpumasks of tasks in @root_cs and all other empty cpusets * which take on cpumask of @root_cs. * * Called with cpuset_mutex held */ -static void update_tasks_cpumask_hier(struct cpuset *root_cs, - bool update_root, struct ptr_heap *heap) +static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root) { struct cpuset *cp; struct cgroup_subsys_state *pos_css; @@ -898,7 +879,7 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, continue; rcu_read_unlock(); - update_tasks_cpumask(cp, heap); + update_tasks_cpumask(cp); rcu_read_lock(); css_put(&cp->css); @@ -914,7 +895,6 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, const char *buf) { - struct ptr_heap heap; int retval; int is_load_balanced; @@ -947,19 +927,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) return retval; - retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); - if (retval) - return retval; - is_load_balanced = is_sched_load_balance(trialcs); mutex_lock(&callback_mutex); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); mutex_unlock(&callback_mutex); - update_tasks_cpumask_hier(cs, true, &heap); - - heap_free(&heap); + update_tasks_cpumask_hier(cs, true); if (is_load_balanced) rebuild_sched_domains_locked(); @@ -1052,53 +1026,22 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, task_unlock(tsk); } -struct cpuset_change_nodemask_arg { - struct cpuset *cs; - nodemask_t *newmems; -}; - -/* - * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy - * of it to cpuset's new mems_allowed, and migrate pages to new nodes if - * memory_migrate flag is set. Called with cpuset_mutex held. - */ -static void cpuset_change_nodemask(struct task_struct *p, void *data) -{ - struct cpuset_change_nodemask_arg *arg = data; - struct cpuset *cs = arg->cs; - struct mm_struct *mm; - int migrate; - - cpuset_change_task_nodemask(p, arg->newmems); - - mm = get_task_mm(p); - if (!mm) - return; - - migrate = is_memory_migrate(cs); - - mpol_rebind_mm(mm, &cs->mems_allowed); - if (migrate) - cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems); - mmput(mm); -} - static void *cpuset_being_rebound; /** * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. * @cs: the cpuset in which each task's mems_allowed mask needs to be changed - * @heap: if NULL, defer allocating heap memory to css_scan_tasks() * - * Called with cpuset_mutex held. No return value. It's guaranteed that - * css_scan_tasks() always returns 0 if @heap != NULL. + * Iterate through each task of @cs updating its mems_allowed to the + * effective cpuset's. As this function is called with cpuset_mutex held, + * cpuset membership stays stable. */ -static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) +static void update_tasks_nodemask(struct cpuset *cs) { static nodemask_t newmems; /* protected by cpuset_mutex */ struct cpuset *mems_cs = effective_nodemask_cpuset(cs); - struct cpuset_change_nodemask_arg arg = { .cs = cs, - .newmems = &newmems }; + struct css_task_iter it; + struct task_struct *task; cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ @@ -1114,7 +1057,25 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. */ - css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap); + css_task_iter_start(&cs->css, &it); + while ((task = css_task_iter_next(&it))) { + struct mm_struct *mm; + bool migrate; + + cpuset_change_task_nodemask(task, &newmems); + + mm = get_task_mm(task); + if (!mm) + continue; + + migrate = is_memory_migrate(cs); + + mpol_rebind_mm(mm, &cs->mems_allowed); + if (migrate) + cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); + mmput(mm); + } + css_task_iter_end(&it); /* * All the tasks' nodemasks have been updated, update @@ -1130,15 +1091,13 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy. * @cs: the root cpuset of the hierarchy * @update_root: update the root cpuset or not? - * @heap: the heap used by css_scan_tasks() * * This will update nodemasks of tasks in @root_cs and all other empty cpusets * which take on nodemask of @root_cs. * * Called with cpuset_mutex held */ -static void update_tasks_nodemask_hier(struct cpuset *root_cs, - bool update_root, struct ptr_heap *heap) +static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root) { struct cpuset *cp; struct cgroup_subsys_state *pos_css; @@ -1159,7 +1118,7 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs, continue; rcu_read_unlock(); - update_tasks_nodemask(cp, heap); + update_tasks_nodemask(cp); rcu_read_lock(); css_put(&cp->css); @@ -1184,7 +1143,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, const char *buf) { int retval; - struct ptr_heap heap; /* * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; @@ -1223,17 +1181,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) goto done; - retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); - if (retval < 0) - goto done; - mutex_lock(&callback_mutex); cs->mems_allowed = trialcs->mems_allowed; mutex_unlock(&callback_mutex); - update_tasks_nodemask_hier(cs, true, &heap); - - heap_free(&heap); + update_tasks_nodemask_hier(cs, true); done: return retval; } @@ -1260,39 +1212,23 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) return 0; } -/** - * cpuset_change_flag - make a task's spread flags the same as its cpuset's - * @tsk: task to be updated - * @data: cpuset to @tsk belongs to - * - * Called by css_scan_tasks() for each task in a cgroup. - * - * We don't need to re-check for the cgroup/cpuset membership, since we're - * holding cpuset_mutex at this point. - */ -static void cpuset_change_flag(struct task_struct *tsk, void *data) -{ - struct cpuset *cs = data; - - cpuset_update_task_spread_flag(cs, tsk); -} - /** * update_tasks_flags - update the spread flags of tasks in the cpuset. * @cs: the cpuset in which each task's spread flags needs to be changed - * @heap: if NULL, defer allocating heap memory to css_scan_tasks() - * - * Called with cpuset_mutex held * - * The css_scan_tasks() function will scan all the tasks in a cgroup, - * calling callback functions for each. - * - * No return value. It's guaranteed that css_scan_tasks() always returns 0 - * if @heap != NULL. + * Iterate through each task of @cs updating its spread flags. As this + * function is called with cpuset_mutex held, cpuset membership stays + * stable. */ -static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) +static void update_tasks_flags(struct cpuset *cs) { - css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap); + struct css_task_iter it; + struct task_struct *task; + + css_task_iter_start(&cs->css, &it); + while ((task = css_task_iter_next(&it))) + cpuset_update_task_spread_flag(cs, task); + css_task_iter_end(&it); } /* @@ -1310,7 +1246,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, struct cpuset *trialcs; int balance_flag_changed; int spread_flag_changed; - struct ptr_heap heap; int err; trialcs = alloc_trial_cpuset(cs); @@ -1326,10 +1261,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, if (err < 0) goto out; - err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); - if (err < 0) - goto out; - balance_flag_changed = (is_sched_load_balance(cs) != is_sched_load_balance(trialcs)); @@ -1344,8 +1275,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, rebuild_sched_domains_locked(); if (spread_flag_changed) - update_tasks_flags(cs, &heap); - heap_free(&heap); + update_tasks_flags(cs); out: free_trial_cpuset(trialcs); return err; @@ -2138,7 +2068,7 @@ retry: */ if ((sane && cpumask_empty(cs->cpus_allowed)) || (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed))) - update_tasks_cpumask(cs, NULL); + update_tasks_cpumask(cs); mutex_lock(&callback_mutex); nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); @@ -2152,7 +2082,7 @@ retry: */ if ((sane && nodes_empty(cs->mems_allowed)) || (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed))) - update_tasks_nodemask(cs, NULL); + update_tasks_nodemask(cs); is_empty = cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed); @@ -2214,7 +2144,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) mutex_lock(&callback_mutex); top_cpuset.mems_allowed = new_mems; mutex_unlock(&callback_mutex); - update_tasks_nodemask(&top_cpuset, NULL); + update_tasks_nodemask(&top_cpuset); } mutex_unlock(&cpuset_mutex); -- cgit v1.2.3 From 889ed9ceaa97bb02bf5d7349e24639f7fc5f4fa0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:40 -0500 Subject: cgroup: remove css_scan_tasks() css_scan_tasks() doesn't have any user left. Remove it. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 162 -------------------------------------------------------- 1 file changed, 162 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 89428b9d9933..05c0c23549f9 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2697,168 +2697,6 @@ void css_task_iter_end(struct css_task_iter *it) up_read(&css_set_rwsem); } -static inline int started_after_time(struct task_struct *t1, - struct timespec *time, - struct task_struct *t2) -{ - int start_diff = timespec_compare(&t1->start_time, time); - if (start_diff > 0) { - return 1; - } else if (start_diff < 0) { - return 0; - } else { - /* - * Arbitrarily, if two processes started at the same - * time, we'll say that the lower pointer value - * started first. Note that t2 may have exited by now - * so this may not be a valid pointer any longer, but - * that's fine - it still serves to distinguish - * between two tasks started (effectively) simultaneously. - */ - return t1 > t2; - } -} - -/* - * This function is a callback from heap_insert() and is used to order - * the heap. - * In this case we order the heap in descending task start time. - */ -static inline int started_after(void *p1, void *p2) -{ - struct task_struct *t1 = p1; - struct task_struct *t2 = p2; - return started_after_time(t1, &t2->start_time, t2); -} - -/** - * css_scan_tasks - iterate though all the tasks in a css - * @css: the css to iterate tasks of - * @test: optional test callback - * @process: process callback - * @data: data passed to @test and @process - * @heap: optional pre-allocated heap used for task iteration - * - * Iterate through all the tasks in @css, calling @test for each, and if it - * returns %true, call @process for it also. - * - * @test may be NULL, meaning always true (select all tasks), which - * effectively duplicates css_task_iter_{start,next,end}() but does not - * lock css_set_rwsem for the call to @process. - * - * It is guaranteed that @process will act on every task that is a member - * of @css for the duration of this call. This function may or may not - * call @process for tasks that exit or move to a different css during the - * call, or are forked or move into the css during the call. - * - * Note that @test may be called with locks held, and may in some - * situations be called multiple times for the same task, so it should be - * cheap. - * - * If @heap is non-NULL, a heap has been pre-allocated and will be used for - * heap operations (and its "gt" member will be overwritten), else a - * temporary heap will be used (allocation of which may cause this function - * to fail). - */ -int css_scan_tasks(struct cgroup_subsys_state *css, - bool (*test)(struct task_struct *, void *), - void (*process)(struct task_struct *, void *), - void *data, struct ptr_heap *heap) -{ - int retval, i; - struct css_task_iter it; - struct task_struct *p, *dropped; - /* Never dereference latest_task, since it's not refcounted */ - struct task_struct *latest_task = NULL; - struct ptr_heap tmp_heap; - struct timespec latest_time = { 0, 0 }; - - if (heap) { - /* The caller supplied our heap and pre-allocated its memory */ - heap->gt = &started_after; - } else { - /* We need to allocate our own heap memory */ - heap = &tmp_heap; - retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after); - if (retval) - /* cannot allocate the heap */ - return retval; - } - - again: - /* - * Scan tasks in the css, using the @test callback to determine - * which are of interest, and invoking @process callback on the - * ones which need an update. Since we don't want to hold any - * locks during the task updates, gather tasks to be processed in a - * heap structure. The heap is sorted by descending task start - * time. If the statically-sized heap fills up, we overflow tasks - * that started later, and in future iterations only consider tasks - * that started after the latest task in the previous pass. This - * guarantees forward progress and that we don't miss any tasks. - */ - heap->size = 0; - css_task_iter_start(css, &it); - while ((p = css_task_iter_next(&it))) { - /* - * Only affect tasks that qualify per the caller's callback, - * if he provided one - */ - if (test && !test(p, data)) - continue; - /* - * Only process tasks that started after the last task - * we processed - */ - if (!started_after_time(p, &latest_time, latest_task)) - continue; - dropped = heap_insert(heap, p); - if (dropped == NULL) { - /* - * The new task was inserted; the heap wasn't - * previously full - */ - get_task_struct(p); - } else if (dropped != p) { - /* - * The new task was inserted, and pushed out a - * different task - */ - get_task_struct(p); - put_task_struct(dropped); - } - /* - * Else the new task was newer than anything already in - * the heap and wasn't inserted - */ - } - css_task_iter_end(&it); - - if (heap->size) { - for (i = 0; i < heap->size; i++) { - struct task_struct *q = heap->ptrs[i]; - if (i == 0) { - latest_time = q->start_time; - latest_task = q; - } - /* Process the task per the caller's callback */ - process(q, data); - put_task_struct(q); - } - /* - * If we had to process any tasks at all, scan again - * in case some of them were in the middle of forking - * children that didn't get processed. - * Not the most efficient way to do it, but it avoids - * having to take callback_mutex in the fork path - */ - goto again; - } - if (heap == &tmp_heap) - heap_free(&tmp_heap); - return 0; -} - /** * cgroup_trasnsfer_tasks - move tasks from one cgroup to another * @to: cgroup to which the tasks will be moved -- cgit v1.2.3 From 89c5509b0d71d1609761bf72d33333ab206dac9f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:40 -0500 Subject: cgroup: separate out put_css_set_locked() and remove put_css_set_taskexit() put_css_set() is performed in two steps - it first tries to put without grabbing css_set_rwsem if such put wouldn't make the count zero. If that fails, it puts after write-locking css_set_rwsem. This patch separates out the second phase into put_css_set_locked() which should be called with css_set_rwsem locked. Also, put_css_set_taskexit() is droped and put_css_set() is made to take @taskexit. There are only a handful users of these functions. No point in providing different variants. put_css_locked() will be used by later changes. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 50 +++++++++++++++++++++++--------------------------- 1 file changed, 23 insertions(+), 27 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 05c0c23549f9..17b10b8efbcf 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -369,22 +369,14 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) return key; } -static void __put_css_set(struct css_set *cset, int taskexit) +static void put_css_set_locked(struct css_set *cset, bool taskexit) { struct cgrp_cset_link *link, *tmp_link; - /* - * Ensure that the refcount doesn't hit zero while any readers - * can see it. Similar to atomic_dec_and_lock(), but for an - * rwlock - */ - if (atomic_add_unless(&cset->refcount, -1, 1)) - return; - down_write(&css_set_rwsem); - if (!atomic_dec_and_test(&cset->refcount)) { - up_write(&css_set_rwsem); + lockdep_assert_held(&css_set_rwsem); + + if (!atomic_dec_and_test(&cset->refcount)) return; - } /* This css_set is dead. unlink it and release cgroup refcounts */ hash_del(&cset->hlist); @@ -406,10 +398,24 @@ static void __put_css_set(struct css_set *cset, int taskexit) kfree(link); } - up_write(&css_set_rwsem); kfree_rcu(cset, rcu_head); } +static void put_css_set(struct css_set *cset, bool taskexit) +{ + /* + * Ensure that the refcount doesn't hit zero while any readers + * can see it. Similar to atomic_dec_and_lock(), but for an + * rwlock + */ + if (atomic_add_unless(&cset->refcount, -1, 1)) + return; + + down_write(&css_set_rwsem); + put_css_set_locked(cset, taskexit); + up_write(&css_set_rwsem); +} + /* * refcounted get/put for css_set objects */ @@ -418,16 +424,6 @@ static inline void get_css_set(struct css_set *cset) atomic_inc(&cset->refcount); } -static inline void put_css_set(struct css_set *cset) -{ - __put_css_set(cset, 0); -} - -static inline void put_css_set_taskexit(struct css_set *cset) -{ - __put_css_set(cset, 1); -} - /** * compare_css_sets - helper function for find_existing_css_set(). * @cset: candidate css_set being tested @@ -1752,7 +1748,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, * we're safe to drop it here; it will be freed under RCU. */ set_bit(CGRP_RELEASABLE, &old_cgrp->flags); - put_css_set(old_cset); + put_css_set(old_cset, false); } /** @@ -1898,7 +1894,7 @@ out_put_css_set_refs: tc = flex_array_get(group, i); if (!tc->cset) break; - put_css_set(tc->cset); + put_css_set(tc->cset, false); } } out_cancel_attach: @@ -3715,7 +3711,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) /* * css_set_rwsem synchronizes access to ->cset_links and prevents - * @cgrp from being removed while __put_css_set() is in progress. + * @cgrp from being removed while put_css_set() is in progress. */ down_read(&css_set_rwsem); empty = list_empty(&cgrp->cset_links); @@ -4267,7 +4263,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) } task_unlock(tsk); - put_css_set_taskexit(cset); + put_css_set(cset, true); } static void check_for_release(struct cgroup *cgrp) -- cgit v1.2.3 From cb0f1fe9ba47c202a98a9d41ad5c12c0ac7732e9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:41 -0500 Subject: cgroup: move css_set_rwsem locking outside of cgroup_task_migrate() Instead of repeatedly locking and unlocking css_set_rwsem inside cgroup_task_migrate(), update cgroup_attach_task() to grab it outside of the loop and update cgroup_task_migrate() to use put_css_set_locked(). Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 17b10b8efbcf..704c590a81d7 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1715,10 +1715,13 @@ int cgroup_taskset_size(struct cgroup_taskset *tset) EXPORT_SYMBOL_GPL(cgroup_taskset_size); -/* +/** * cgroup_task_migrate - move a task from one cgroup to another. + * @old_cgrp; the cgroup @tsk is being migrated from + * @tsk: the task being migrated + * @new_cset: the new css_set @tsk is being attached to * - * Must be called with cgroup_mutex and threadgroup locked. + * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked. */ static void cgroup_task_migrate(struct cgroup *old_cgrp, struct task_struct *tsk, @@ -1726,6 +1729,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, { struct css_set *old_cset; + lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&css_set_rwsem); + /* * We are synchronized through threadgroup_lock() against PF_EXITING * setting such that we can't race against cgroup_exit() changing the @@ -1738,9 +1744,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, rcu_assign_pointer(tsk->cgroups, new_cset); task_unlock(tsk); - down_write(&css_set_rwsem); list_move(&tsk->cg_list, &new_cset->tasks); - up_write(&css_set_rwsem); /* * We just gained a reference on old_cset by taking it from the @@ -1748,7 +1752,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, * we're safe to drop it here; it will be freed under RCU. */ set_bit(CGRP_RELEASABLE, &old_cgrp->flags); - put_css_set(old_cset, false); + put_css_set_locked(old_cset, false); } /** @@ -1871,10 +1875,12 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, * proceed to move all tasks to the new cgroup. There are no * failure cases after here, so this is the commit point. */ + down_write(&css_set_rwsem); for (i = 0; i < group_size; i++) { tc = flex_array_get(group, i); cgroup_task_migrate(tc->cgrp, tc->task, tc->cset); } + up_write(&css_set_rwsem); /* nothing is sensitive to fork() after this point. */ /* -- cgit v1.2.3 From 924f0d9a2078f49ff331bb43196ec5afadc16b8f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:41 -0500 Subject: cgroup: drop @skip_css from cgroup_taskset_for_each() If !NULL, @skip_css makes cgroup_taskset_for_each() skip the matching css. The intention of the interface is to make it easy to skip css's (cgroup_subsys_states) which already match the migration target; however, this is entirely unnecessary as migration taskset doesn't include tasks which are already in the target cgroup. Drop @skip_css from cgroup_taskset_for_each(). Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Daniel Borkmann --- kernel/cgroup_freezer.c | 2 +- kernel/cpuset.c | 4 ++-- kernel/events/core.c | 2 +- kernel/sched/core.c | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 98ea26a99076..7201a637c405 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -187,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, * current state before executing the following - !frozen tasks may * be visible in a FROZEN cgroup and frozen tasks in a THAWED one. */ - cgroup_taskset_for_each(task, new_css, tset) { + cgroup_taskset_for_each(task, tset) { if (!(freezer->state & CGROUP_FREEZING)) { __thaw_task(task); } else { diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 65ae0bdf4af8..bf20e4ac2f75 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1398,7 +1398,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) goto out_unlock; - cgroup_taskset_for_each(task, css, tset) { + cgroup_taskset_for_each(task, tset) { /* * Kthreads which disallow setaffinity shouldn't be moved * to a new cpuset; we don't want to change their cpu @@ -1467,7 +1467,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); - cgroup_taskset_for_each(task, css, tset) { + cgroup_taskset_for_each(task, tset) { /* * can_attach beforehand should guarantee that this doesn't * fail. TODO: have a better way to handle failure here diff --git a/kernel/events/core.c b/kernel/events/core.c index a3c3ab50271a..6dd714955b04 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8021,7 +8021,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css, { struct task_struct *task; - cgroup_taskset_for_each(task, css, tset) + cgroup_taskset_for_each(task, tset) task_function_call(task, __perf_cgroup_move, task); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d4cfc5561830..ba386a06ab11 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7600,7 +7600,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, { struct task_struct *task; - cgroup_taskset_for_each(task, css, tset) { + cgroup_taskset_for_each(task, tset) { #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; @@ -7618,7 +7618,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css, { struct task_struct *task; - cgroup_taskset_for_each(task, css, tset) + cgroup_taskset_for_each(task, tset) sched_move_task(task); } -- cgit v1.2.3 From 57fce0a68e3aa71d223d9023aae66c7393970c34 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:41 -0500 Subject: cpuset: don't use cgroup_taskset_cur_css() cgroup_taskset_cur_css() will be removed during the planned resturcturing of migration path. The only use of cgroup_taskset_cur_css() is finding out the old cgroup_subsys_state of the leader in cpuset_attach(). This usage can easily be removed by remembering the old value from cpuset_can_attach(). Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cpuset.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index bf20e4ac2f75..d8bec21d7a11 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1379,6 +1379,8 @@ static int fmeter_getrate(struct fmeter *fmp) return val; } +static struct cpuset *cpuset_attach_old_cs; + /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ static int cpuset_can_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) @@ -1387,6 +1389,9 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, struct task_struct *task; int ret; + /* used later by cpuset_attach() */ + cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset)); + mutex_lock(&cpuset_mutex); /* @@ -1450,10 +1455,8 @@ static void cpuset_attach(struct cgroup_subsys_state *css, struct mm_struct *mm; struct task_struct *task; struct task_struct *leader = cgroup_taskset_first(tset); - struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset, - cpuset_cgrp_id); struct cpuset *cs = css_cs(css); - struct cpuset *oldcs = css_cs(oldcss); + struct cpuset *oldcs = cpuset_attach_old_cs; struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); struct cpuset *mems_cs = effective_nodemask_cpuset(cs); -- cgit v1.2.3 From bc668c7519ff8b4681af80e92f463bec7bf7cf9e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:43 -0500 Subject: cgroup: remove cgroup_taskset_cur_css() and cgroup_taskset_size() The two functions don't have any users left. Remove them along with cgroup_taskset->cur_cgrp. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 30 ------------------------------ 1 file changed, 30 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 704c590a81d7..a9d9bbb12310 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1647,7 +1647,6 @@ struct cgroup_taskset { struct flex_array *tc_array; int tc_array_len; int idx; - struct cgroup *cur_cgrp; }; /** @@ -1662,7 +1661,6 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) tset->idx = 0; return cgroup_taskset_next(tset); } else { - tset->cur_cgrp = tset->single.cgrp; return tset->single.task; } } @@ -1683,38 +1681,10 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) return NULL; tc = flex_array_get(tset->tc_array, tset->idx++); - tset->cur_cgrp = tc->cgrp; return tc->task; } EXPORT_SYMBOL_GPL(cgroup_taskset_next); -/** - * cgroup_taskset_cur_css - return the matching css for the current task - * @tset: taskset of interest - * @subsys_id: the ID of the target subsystem - * - * Return the css for the current (last returned) task of @tset for - * subsystem specified by @subsys_id. This function must be preceded by - * either cgroup_taskset_first() or cgroup_taskset_next(). - */ -struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset, - int subsys_id) -{ - return cgroup_css(tset->cur_cgrp, cgroup_subsys[subsys_id]); -} -EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css); - -/** - * cgroup_taskset_size - return the number of tasks in taskset - * @tset: taskset of interest - */ -int cgroup_taskset_size(struct cgroup_taskset *tset) -{ - return tset->tc_array ? tset->tc_array_len : 1; -} -EXPORT_SYMBOL_GPL(cgroup_taskset_size); - - /** * cgroup_task_migrate - move a task from one cgroup to another. * @old_cgrp; the cgroup @tsk is being migrated from -- cgit v1.2.3 From 9db8de3722d184b8a431afd6bef803d6867ac889 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:43 -0500 Subject: cgroup: cosmetic updates to cgroup_attach_task() cgroup_attach_task() is planned to go through restructuring. Let's tidy it up a bit in preparation. * Update cgroup_attach_task() to receive the target task argument in @leader instead of @tsk. * Rename @tsk to @task. * Rename @retval to @ret. This is purely cosmetic. v2: get_nr_threads() was using uninitialized @task instead of @leader. Fixed. Reported by Dan Carpenter. Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: Dan Carpenter --- kernel/cgroup.c | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a9d9bbb12310..9a890a2e58fc 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1728,20 +1728,20 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, /** * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup * @cgrp: the cgroup to attach to - * @tsk: the task or the leader of the threadgroup to be attached + * @leader: the task or the leader of the threadgroup to be attached * @threadgroup: attach the whole threadgroup? * * Call holding cgroup_mutex and the group_rwsem of the leader. Will take * task_lock of @tsk or each thread in the threadgroup individually in turn. */ -static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, +static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, bool threadgroup) { - int retval, i, group_size; + int ret, i, group_size; struct cgroupfs_root *root = cgrp->root; struct cgroup_subsys_state *css, *failed_css = NULL; /* threadgroup list cursor and array */ - struct task_struct *leader = tsk; + struct task_struct *task; struct task_and_cgroup *tc; struct flex_array *group; struct cgroup_taskset tset = { }; @@ -1754,7 +1754,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, * threads exit, this will just be an over-estimate. */ if (threadgroup) - group_size = get_nr_threads(tsk); + group_size = get_nr_threads(leader); else group_size = 1; /* flex_array supports very large thread-groups better than kmalloc. */ @@ -1762,8 +1762,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, if (!group) return -ENOMEM; /* pre-allocate to guarantee space while iterating in rcu read-side. */ - retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); - if (retval) + ret = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); + if (ret) goto out_free_group_list; i = 0; @@ -1774,17 +1774,18 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, */ down_read(&css_set_rwsem); rcu_read_lock(); + task = leader; do { struct task_and_cgroup ent; - /* @tsk either already exited or can't exit until the end */ - if (tsk->flags & PF_EXITING) + /* @task either already exited or can't exit until the end */ + if (task->flags & PF_EXITING) goto next; /* as per above, nr_threads may decrease, but not increase. */ BUG_ON(i >= group_size); - ent.task = tsk; - ent.cgrp = task_cgroup_from_root(tsk, root); + ent.task = task; + ent.cgrp = task_cgroup_from_root(task, root); /* nothing to do if this task is already in the cgroup */ if (ent.cgrp == cgrp) goto next; @@ -1792,13 +1793,13 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, * saying GFP_ATOMIC has no effect here because we did prealloc * earlier, but it's good form to communicate our expectations. */ - retval = flex_array_put(group, i, &ent, GFP_ATOMIC); - BUG_ON(retval != 0); + ret = flex_array_put(group, i, &ent, GFP_ATOMIC); + BUG_ON(ret != 0); i++; next: if (!threadgroup) break; - } while_each_thread(leader, tsk); + } while_each_thread(leader, task); rcu_read_unlock(); up_read(&css_set_rwsem); /* remember the number of threads in the array for later. */ @@ -1807,7 +1808,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, tset.tc_array_len = group_size; /* methods shouldn't be called if no task is actually migrating */ - retval = 0; + ret = 0; if (!group_size) goto out_free_group_list; @@ -1816,8 +1817,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, */ for_each_css(css, i, cgrp) { if (css->ss->can_attach) { - retval = css->ss->can_attach(css, &tset); - if (retval) { + ret = css->ss->can_attach(css, &tset); + if (ret) { failed_css = css; goto out_cancel_attach; } @@ -1835,7 +1836,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, old_cset = task_css_set(tc->task); tc->cset = find_css_set(old_cset, cgrp); if (!tc->cset) { - retval = -ENOMEM; + ret = -ENOMEM; goto out_put_css_set_refs; } } @@ -1863,9 +1864,9 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, /* * step 5: success! and cleanup */ - retval = 0; + ret = 0; out_put_css_set_refs: - if (retval) { + if (ret) { for (i = 0; i < group_size; i++) { tc = flex_array_get(group, i); if (!tc->cset) @@ -1874,7 +1875,7 @@ out_put_css_set_refs: } } out_cancel_attach: - if (retval) { + if (ret) { for_each_css(css, i, cgrp) { if (css == failed_css) break; @@ -1884,7 +1885,7 @@ out_cancel_attach: } out_free_group_list: flex_array_free(group); - return retval; + return ret; } /* -- cgit v1.2.3 From 8541fecc04a91842f023cbfe2c376d4de3b5047e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 13 Feb 2014 06:58:43 -0500 Subject: cgroup: unexport functions With module support gone, a lot of functions no longer need to be exported. Unexport them. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 9a890a2e58fc..750d0e1e7e56 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -242,7 +242,6 @@ bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor) } return false; } -EXPORT_SYMBOL_GPL(cgroup_is_descendant); static int cgroup_is_releasable(const struct cgroup *cgrp) { @@ -1664,7 +1663,6 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) return tset->single.task; } } -EXPORT_SYMBOL_GPL(cgroup_taskset_first); /** * cgroup_taskset_next - iterate to the next task in taskset @@ -1683,7 +1681,6 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) tc = flex_array_get(tset->tc_array, tset->idx++); return tc->task; } -EXPORT_SYMBOL_GPL(cgroup_taskset_next); /** * cgroup_task_migrate - move a task from one cgroup to another. @@ -2365,7 +2362,6 @@ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) mutex_unlock(&cgroup_tree_mutex); return ret; } -EXPORT_SYMBOL_GPL(cgroup_add_cftypes); /** * cgroup_task_count - count the number of tasks in a cgroup. @@ -2439,7 +2435,6 @@ css_next_child(struct cgroup_subsys_state *pos_css, return cgroup_css(next, parent_css->ss); } -EXPORT_SYMBOL_GPL(css_next_child); /** * css_next_descendant_pre - find the next descendant for pre-order walk @@ -2482,7 +2477,6 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos, return NULL; } -EXPORT_SYMBOL_GPL(css_next_descendant_pre); /** * css_rightmost_descendant - return the rightmost descendant of a css @@ -2514,7 +2508,6 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos) return last; } -EXPORT_SYMBOL_GPL(css_rightmost_descendant); static struct cgroup_subsys_state * css_leftmost_descendant(struct cgroup_subsys_state *pos) @@ -2568,7 +2561,6 @@ css_next_descendant_post(struct cgroup_subsys_state *pos, /* no sibling left, visit parent */ return css_parent(pos); } -EXPORT_SYMBOL_GPL(css_next_descendant_post); /** * css_advance_task_iter - advance a task itererator to the next css_set -- cgit v1.2.3 From 430af8ad9dad82d775d688155e1db1da385d3e7a Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Thu, 13 Feb 2014 16:42:43 -0500 Subject: cgroup: fix coccinelle warnings kernel/cgroup.c:2256:1-3: WARNING: PTR_RET can be used Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Generated by: coccinelle/api/ptr_ret.cocci Signed-off-by: Fengguang Wu Signed-off-by: Tejun Heo --- kernel/cgroup.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 750d0e1e7e56..15dcae74b510 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2171,9 +2171,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), cgroup_file_mode(cft), 0, cft->kf_ops, cft, NULL, false, key); - if (IS_ERR(kn)) - return PTR_ERR(kn); - return 0; + return PTR_ERR_OR_ZERO(kn); } /** -- cgit v1.2.3 From bad34660344f37db8b55ce8bc139bddc7d83af1b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 14 Feb 2014 16:54:28 +0800 Subject: cgroup: fix locking in cgroupstats_build() css_set_lock has been converted to css_set_rwsem, and rwsem can't nest inside rcu_read_lock. Signed-off-by: Li Zefan Signed-off-by: Tejun Heo --- kernel/cgroup.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 15dcae74b510..5606c0f08d95 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2995,6 +2995,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) kernfs_type(kn) != KERNFS_DIR) return -EINVAL; + mutex_lock(&cgroup_mutex); + /* * We aren't being called from kernfs and there's no guarantee on * @kn->priv's validity. For this and css_tryget_from_dir(), @@ -3002,10 +3004,12 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) */ rcu_read_lock(); cgrp = rcu_dereference(kn->priv); - if (!cgrp) { + if (!cgrp || cgroup_is_dead(cgrp)) { rcu_read_unlock(); + mutex_unlock(&cgroup_mutex); return -ENOENT; } + rcu_read_unlock(); css_task_iter_start(&cgrp->dummy_css, &it); while ((tsk = css_task_iter_next(&it))) { @@ -3030,7 +3034,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) } css_task_iter_end(&it); - rcu_read_unlock(); + mutex_unlock(&cgroup_mutex); return 0; } -- cgit v1.2.3 From 6534fd6c15858fe4ce4ae568106225e68d5afa81 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 14 Feb 2014 16:55:04 +0800 Subject: cgroup: fix memory leak in cgroup_mount() We should free the memory allocated in parse_cgroupfs_options() before calling this function again. Signed-off-by: Li Zefan Signed-off-by: Tejun Heo --- kernel/cgroup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5606c0f08d95..3fe01102607b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1540,6 +1540,8 @@ retry: if (!atomic_inc_not_zero(&root->top_cgroup.refcnt)) { mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); + kfree(opts.release_agent); + kfree(opts.name); msleep(10); goto retry; } -- cgit v1.2.3 From dc5736ed7aaf942caaac0c15af74a018e04ec79d Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 17 Feb 2014 10:41:50 +0800 Subject: cgroup: add a validation check to cgroup_add_cftyps() Fengguang reported this bug: BUG: unable to handle kernel NULL pointer dereference at 0000003c IP: [] cgroup_cfts_commit+0x27/0x1c1 ... Call Trace: [] ? kmem_cache_alloc_trace+0x33f/0x3b7 [] cgroup_add_cftypes+0x8f/0xca [] cgroup_init+0x6a/0x26a [] start_kernel+0x4d7/0x57a [] i386_start_kernel+0x92/0x96 This happens in a corner case. If CGROUP_SCHED=y but CFS_BANDWIDTH=n && FAIR_GROUP_SCHED=n && RT_GROUP_SCHED=n, we have: cpu_files[] = { { } /* terminate */ } When we pass cpu_files to cgroup_apply_cftypes(), as cpu_files[0].ss is NULL, we'll access NULL pointer. The bug was introduced by commit de00ffa56ea3132c6013fc8f07133b8a1014cf53 ("cgroup: make cgroup_subsys->base_cftypes use cgroup_add_cftypes()"). Reported-by: Fengguang Wu Signed-off-by: Li Zefan Signed-off-by: Tejun Heo --- kernel/cgroup.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3fe01102607b..771d1b8aaae9 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2348,6 +2348,9 @@ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) { int ret; + if (!cfts || cfts[0].name[0] == '\0') + return 0; + ret = cgroup_init_cftypes(ss, cfts); if (ret) return ret; -- cgit v1.2.3 From e227867f12302633737bd2a48a10a9a72c0630cb Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 18 Feb 2014 22:54:36 +0900 Subject: treewide: Fix typo in Documentation/DocBook This patch fix spelling typo in Documentation/DocBook. It is because .html and .xml files are generated by make htmldocs, I have to fix a typo within the source files. Signed-off-by: Masanari Iida Acked-by: Randy Dunlap Signed-off-by: Jiri Kosina --- kernel/relay.c | 2 +- kernel/signal.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/relay.c b/kernel/relay.c index 5001c9887db1..52d6a6f56261 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -227,7 +227,7 @@ static void relay_destroy_buf(struct rchan_buf *buf) * relay_remove_buf - remove a channel buffer * @kref: target kernel reference that contains the relay buffer * - * Removes the file from the fileystem, which also frees the + * Removes the file from the filesystem, which also frees the * rchan_buf_struct and the channel buffer. Should only be called from * kref_put(). */ diff --git a/kernel/signal.c b/kernel/signal.c index 940b30ee9a30..f4812283c6e9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2382,7 +2382,7 @@ relock: * @regs: user register state * @stepping: nonzero if debugger single-step or block-step in use * - * This function should be called when a signal has succesfully been + * This function should be called when a signal has successfully been * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask * is always blocked, and the signal itself is blocked unless %SA_NODEFER * is set in @ka->sa.sa_flags. Tracing is notified. -- cgit v1.2.3 From 392b21897d6cbff55c5b28910dfdc74e3020de6c Mon Sep 17 00:00:00 2001 From: Brian Campbell Date: Sun, 16 Feb 2014 22:58:12 -0500 Subject: user_namespace.c: Remove duplicated word in comment Signed-off-by: Brian Campbell Signed-off-by: Jiri Kosina --- kernel/user_namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 240fb62cf394..dd06439b9c84 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -225,7 +225,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id) * * When there is no mapping defined for the user-namespace uid * pair INVALID_UID is returned. Callers are expected to test - * for and handle handle INVALID_UID being returned. INVALID_UID + * for and handle INVALID_UID being returned. INVALID_UID * may be tested for using uid_valid(). */ kuid_t make_kuid(struct user_namespace *ns, uid_t uid) -- cgit v1.2.3 From 8c1a49aedb73fb2f15aaa32ad9e2e1c4289f45cb Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 10 Jan 2014 11:13:54 -0500 Subject: tracing: Pass trace_array to set_flag callback As options (flags) may affect instances instead of being global the set_flag() callbacks need to receive the trace_array descriptor of the instance they will be modifying. Signed-off-by: Steven Rostedt --- kernel/trace/blktrace.c | 3 ++- kernel/trace/trace.c | 18 ++++++++++-------- kernel/trace/trace.h | 3 ++- kernel/trace/trace_functions.c | 3 ++- kernel/trace/trace_functions_graph.c | 3 ++- kernel/trace/trace_irqsoff.c | 6 ++++-- kernel/trace/trace_nop.c | 2 +- kernel/trace/trace_sched_wakeup.c | 6 ++++-- 8 files changed, 27 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b418cb0d7242..0d758ca61933 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -1427,7 +1427,8 @@ static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) return print_one_line(iter, true); } -static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set) +static int +blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { /* don't output context-info for blk_classic output */ if (bit == TRACE_BLK_OPT_CLASSIC) { diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 815c878f409b..d7dfc7efc4bf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -73,7 +73,8 @@ static struct tracer_flags dummy_tracer_flags = { .opts = dummy_tracer_opt }; -static int dummy_set_flag(u32 old_flags, u32 bit, int set) +static int +dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return 0; } @@ -3339,13 +3340,14 @@ static int tracing_trace_options_show(struct seq_file *m, void *v) return 0; } -static int __set_tracer_option(struct tracer *trace, +static int __set_tracer_option(struct trace_array *tr, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { + struct tracer *trace = tr->current_trace; int ret; - ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); + ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); if (ret) return ret; @@ -3357,8 +3359,9 @@ static int __set_tracer_option(struct tracer *trace, } /* Try to assign a tracer specific option */ -static int set_tracer_option(struct tracer *trace, char *cmp, int neg) +static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) { + struct tracer *trace = tr->current_trace; struct tracer_flags *tracer_flags = trace->flags; struct tracer_opt *opts = NULL; int i; @@ -3367,8 +3370,7 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) - return __set_tracer_option(trace, trace->flags, - opts, neg); + return __set_tracer_option(tr, trace->flags, opts, neg); } return -EINVAL; @@ -3440,7 +3442,7 @@ static int trace_set_options(struct trace_array *tr, char *option) /* If no option could be set, test the specific tracer options */ if (!trace_options[i]) - ret = set_tracer_option(tr->current_trace, cmp, neg); + ret = set_tracer_option(tr, cmp, neg); mutex_unlock(&trace_types_lock); @@ -5689,7 +5691,7 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, if (!!(topt->flags->val & topt->opt->bit) != val) { mutex_lock(&trace_types_lock); - ret = __set_tracer_option(topt->tr->current_trace, topt->flags, + ret = __set_tracer_option(topt->tr, topt->flags, topt->opt, !val); mutex_unlock(&trace_types_lock); if (ret) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 02b592f2d4b7..649a23d421c1 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -355,7 +355,8 @@ struct tracer { void (*print_header)(struct seq_file *m); enum print_line_t (*print_line)(struct trace_iterator *iter); /* If you handled the flag setting, return 0 */ - int (*set_flag)(u32 old_flags, u32 bit, int set); + int (*set_flag)(struct trace_array *tr, + u32 old_flags, u32 bit, int set); /* Return 0 if OK with change, else return non-zero */ int (*flag_changed)(struct tracer *tracer, u32 mask, int set); diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 38fe1483c508..85e517e84f50 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -175,7 +175,8 @@ static void tracing_stop_function_trace(void) unregister_ftrace_function(&trace_ops); } -static int func_set_flag(u32 old_flags, u32 bit, int set) +static int +func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { switch (bit) { case TRACE_FUNC_OPT_STACK: diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 0b99120d395c..deff11200261 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -1476,7 +1476,8 @@ void graph_trace_close(struct trace_iterator *iter) } } -static int func_graph_set_flag(u32 old_flags, u32 bit, int set) +static int +func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { if (bit == TRACE_GRAPH_PRINT_IRQS) ftrace_graph_skip_irqs = !set; diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 2aefbee93a6d..fd99b0c183ac 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -160,7 +160,8 @@ static struct ftrace_ops trace_ops __read_mostly = #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER -static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) +static int +irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { int cpu; @@ -266,7 +267,8 @@ __trace_function(struct trace_array *tr, #else #define __trace_function trace_function -static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) +static int +irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return -EINVAL; } diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 394f94417e2f..f3984098c0d7 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c @@ -62,7 +62,7 @@ static void nop_trace_reset(struct trace_array *tr) * If you don't implement it, then the flag setting will be * automatically accepted. */ -static int nop_set_flag(u32 old_flags, u32 bit, int set) +static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { /* * Note that you don't need to update nop_flags.val yourself. diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 6e32635e5e57..f0bbdc261028 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -209,7 +209,8 @@ static void stop_func_tracer(int graph) } #ifdef CONFIG_FUNCTION_GRAPH_TRACER -static int wakeup_set_flag(u32 old_flags, u32 bit, int set) +static int +wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { if (!(bit & TRACE_DISPLAY_GRAPH)) @@ -311,7 +312,8 @@ __trace_function(struct trace_array *tr, #else #define __trace_function trace_function -static int wakeup_set_flag(u32 old_flags, u32 bit, int set) +static int +wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return -EINVAL; } -- cgit v1.2.3 From bf6065b5c7014ab30383405718c7a6b96d2cbdb2 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 10 Jan 2014 17:51:01 -0500 Subject: tracing: Pass trace_array to flag_changed callback As options (flags) may affect instances instead of being global the flag_changed() callbacks need to receive the trace_array descriptor of the instance they will be modifying. Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 2 +- kernel/trace/trace.h | 2 +- kernel/trace/trace_irqsoff.c | 4 +++- kernel/trace/trace_sched_wakeup.c | 4 +++- 4 files changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d7dfc7efc4bf..ee8da93e91e0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3393,7 +3393,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) /* Give the tracer a chance to approve the change */ if (tr->current_trace->flag_changed) - if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled)) + if (tr->current_trace->flag_changed(tr, mask, !!enabled)) return -EINVAL; if (enabled) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 649a23d421c1..36e44732c650 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -358,7 +358,7 @@ struct tracer { int (*set_flag)(struct trace_array *tr, u32 old_flags, u32 bit, int set); /* Return 0 if OK with change, else return non-zero */ - int (*flag_changed)(struct tracer *tracer, + int (*flag_changed)(struct trace_array *tr, u32 mask, int set); struct tracer *next; struct tracer_flags *flags; diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index fd99b0c183ac..4bf812f454e6 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -572,8 +572,10 @@ static void irqsoff_function_set(int set) unregister_irqsoff_function(is_graph()); } -static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set) +static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) { + struct tracer *tracer = tr->current_trace; + if (mask & TRACE_ITER_FUNCTION) irqsoff_function_set(set); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index f0bbdc261028..e14da5e97a69 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -179,8 +179,10 @@ static void wakeup_function_set(int set) unregister_wakeup_function(is_graph()); } -static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) +static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) { + struct tracer *tracer = tr->current_trace; + if (mask & TRACE_ITER_FUNCTION) wakeup_function_set(set); -- cgit v1.2.3 From 607e2ea167e56db84387f3ab97e59a862e101cab Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 6 Nov 2013 22:42:48 -0500 Subject: tracing: Set up infrastructure to allow tracers for instances Currently the tracers (function, function_graph, irqsoff, etc) can only be used by the top level tracing directory (not for instances). This sets up the infrastructure to allow instances to be able to run a separate tracer apart from the what the top level tracing is doing. As tracers need to adapt for being used by instances, the tracers must flag if they can be used by instances or not. Currently only the 'nop' tracer can be used by all instances. Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 72 ++++++++++++++++++++++++++++++++++++++---------- kernel/trace/trace.h | 1 + kernel/trace/trace_nop.c | 3 +- 3 files changed, 60 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ee8da93e91e0..944cd021aabf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -119,7 +119,7 @@ enum ftrace_dump_mode ftrace_dump_on_oops; /* When set, tracing will stop when a WARN*() is hit */ int __disable_trace_on_warning; -static int tracing_set_tracer(const char *buf); +static int tracing_set_tracer(struct trace_array *tr, const char *buf); #define MAX_TRACER_SIZE 100 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; @@ -1231,7 +1231,7 @@ int register_tracer(struct tracer *type) printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ - tracing_set_tracer(type->name); + tracing_set_tracer(&global_trace, type->name); default_bootup_tracer = NULL; /* disable other selftests, since this will break it. */ tracing_selftest_disabled = true; @@ -3122,27 +3122,52 @@ static int tracing_open(struct inode *inode, struct file *file) return ret; } +/* + * Some tracers are not suitable for instance buffers. + * A tracer is always available for the global array (toplevel) + * or if it explicitly states that it is. + */ +static bool +trace_ok_for_array(struct tracer *t, struct trace_array *tr) +{ + return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; +} + +/* Find the next tracer that this trace array may use */ +static struct tracer * +get_tracer_for_array(struct trace_array *tr, struct tracer *t) +{ + while (t && !trace_ok_for_array(t, tr)) + t = t->next; + + return t; +} + static void * t_next(struct seq_file *m, void *v, loff_t *pos) { + struct trace_array *tr = m->private; struct tracer *t = v; (*pos)++; if (t) - t = t->next; + t = get_tracer_for_array(tr, t->next); return t; } static void *t_start(struct seq_file *m, loff_t *pos) { + struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); - for (t = trace_types; t && l < *pos; t = t_next(m, t, &l)) - ; + + t = get_tracer_for_array(tr, trace_types); + for (; t && l < *pos; t = t_next(m, t, &l)) + ; return t; } @@ -3177,10 +3202,21 @@ static const struct seq_operations show_traces_seq_ops = { static int show_traces_open(struct inode *inode, struct file *file) { + struct trace_array *tr = inode->i_private; + struct seq_file *m; + int ret; + if (tracing_disabled) return -ENODEV; - return seq_open(file, &show_traces_seq_ops); + ret = seq_open(file, &show_traces_seq_ops); + if (ret) + return ret; + + m = file->private_data; + m->private = tr; + + return 0; } static ssize_t @@ -3871,10 +3907,9 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer); static void destroy_trace_option_files(struct trace_option_dentry *topts); -static int tracing_set_tracer(const char *buf) +static int tracing_set_tracer(struct trace_array *tr, const char *buf) { static struct trace_option_dentry *topts; - struct trace_array *tr = &global_trace; struct tracer *t; #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; @@ -3902,6 +3937,12 @@ static int tracing_set_tracer(const char *buf) if (t == tr->current_trace) goto out; + /* Some tracers are only allowed for the top level buffer */ + if (!trace_ok_for_array(t, tr)) { + ret = -EINVAL; + goto out; + } + trace_branch_disable(); tr->current_trace->enabled = false; @@ -3958,6 +3999,7 @@ static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { + struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+1]; int i; size_t ret; @@ -3977,7 +4019,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) buf[i] = 0; - err = tracing_set_tracer(buf); + err = tracing_set_tracer(tr, buf); if (err) return err; @@ -6193,6 +6235,12 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) { int cpu; + trace_create_file("available_tracers", 0444, d_tracer, + tr, &show_traces_fops); + + trace_create_file("current_tracer", 0644, d_tracer, + tr, &set_tracer_fops); + trace_create_file("tracing_cpumask", 0644, d_tracer, tr, &tracing_cpumask_fops); @@ -6245,12 +6293,6 @@ static __init int tracer_init_debugfs(void) init_tracer_debugfs(&global_trace, d_tracer); - trace_create_file("available_tracers", 0444, d_tracer, - &global_trace, &show_traces_fops); - - trace_create_file("current_tracer", 0644, d_tracer, - &global_trace, &set_tracer_fops); - #ifdef CONFIG_TRACER_MAX_TRACE trace_create_file("tracing_max_latency", 0644, d_tracer, &tracing_max_latency, &tracing_max_lat_fops); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 36e44732c650..ea51bb2004d2 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -364,6 +364,7 @@ struct tracer { struct tracer_flags *flags; bool print_max; bool enabled; + bool allow_instances; #ifdef CONFIG_TRACER_MAX_TRACE bool use_max_tr; #endif diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index f3984098c0d7..69a5cc94c01a 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c @@ -96,6 +96,7 @@ struct tracer nop_trace __read_mostly = .selftest = trace_selftest_startup_nop, #endif .flags = &nop_flags, - .set_flag = nop_set_flag + .set_flag = nop_set_flag, + .allow_instances = true, }; -- cgit v1.2.3 From f1b21c9a40704dfdf7b8423c7d2969ea31c9857d Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 14 Jan 2014 12:33:33 -0500 Subject: tracing: Only let top level have option files Currently, only the top level instance can have tracing options. Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 944cd021aabf..da9543cdbe7a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3968,9 +3968,11 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) free_snapshot(tr); } #endif - destroy_trace_option_files(topts); - - topts = create_trace_option_files(tr, t); + /* Currently, only the top instance has options */ + if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { + destroy_trace_option_files(topts); + topts = create_trace_option_files(tr, t); + } #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr && !had_max_tr) { -- cgit v1.2.3 From e6435e96ec6f31a05690876a19e63e451f7b37e2 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 10 Jan 2014 14:31:31 -0500 Subject: ftrace: Copy ops private to global_ops private If global_ops function is being called directly, instead of the global_ops list function, set the global_ops private to be the same as the ops private that's being called directly. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index cd7f76d1eb86..98ae4ed965db 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -244,7 +244,11 @@ static void control_ops_free(struct ftrace_ops *ops) static void update_global_ops(void) { - ftrace_func_t func; + ftrace_func_t func = ftrace_global_list_func; + void *private = NULL; + + /* The list has its own recursion protection. */ + global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; /* * If there's only one function registered, then call that @@ -254,23 +258,17 @@ static void update_global_ops(void) if (ftrace_global_list == &ftrace_list_end || ftrace_global_list->next == &ftrace_list_end) { func = ftrace_global_list->func; + private = ftrace_global_list->private; /* * As we are calling the function directly. * If it does not have recursion protection, * the function_trace_op needs to be updated * accordingly. */ - if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) - global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; - else + if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)) global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE; - } else { - func = ftrace_global_list_func; - /* The list has its own recursion protection. */ - global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE; } - /* If we filter on pids, update to use the pid function */ if (!list_empty(&ftrace_pids)) { set_ftrace_pid_function(func); @@ -278,6 +276,7 @@ static void update_global_ops(void) } global_ops.func = func; + global_ops.private = private; } static void ftrace_sync(struct work_struct *work) -- cgit v1.2.3 From 6b450d2533e2c1c71fbe7f1bdce0bb1c9f813030 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 14 Jan 2014 08:43:01 -0500 Subject: tracing: Disable tracers before deletion of instance When an instance is about to be deleted, make sure the tracer is set to nop. If it isn't reset the tracer and set it to the nop tracer, otherwise memory leaks and bad pointers may result. Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index da9543cdbe7a..7d5913bb46e8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3907,6 +3907,23 @@ create_trace_option_files(struct trace_array *tr, struct tracer *tracer); static void destroy_trace_option_files(struct trace_option_dentry *topts); +/* + * Used to clear out the tracer before deletion of an instance. + * Must have trace_types_lock held. + */ +static void tracing_set_nop(struct trace_array *tr) +{ + if (tr->current_trace == &nop_trace) + return; + + tr->current_trace->enabled = false; + + if (tr->current_trace->reset) + tr->current_trace->reset(tr); + + tr->current_trace = &nop_trace; +} + static int tracing_set_tracer(struct trace_array *tr, const char *buf) { static struct trace_option_dentry *topts; @@ -6142,6 +6159,7 @@ static int instance_delete(const char *name) list_del(&tr->list); + tracing_set_nop(tr); event_trace_del_tracer(tr); debugfs_remove_recursive(tr->dir); free_percpu(tr->trace_buffer.data); -- cgit v1.2.3 From 50512ab576e1ce29953c9259e1f36ce16f350f20 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 14 Jan 2014 08:52:35 -0500 Subject: tracing: Convert tracer->enabled to counter As tracers will soon be used by instances, the tracer enabled field needs to be converted to a counter instead of a boolean. This counter is protected by the trace_types_lock mutex. Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 6 +++--- kernel/trace/trace.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7d5913bb46e8..f9f22c435036 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3916,7 +3916,7 @@ static void tracing_set_nop(struct trace_array *tr) if (tr->current_trace == &nop_trace) return; - tr->current_trace->enabled = false; + tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); @@ -3962,7 +3962,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) trace_branch_disable(); - tr->current_trace->enabled = false; + tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); @@ -4006,7 +4006,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf) } tr->current_trace = t; - tr->current_trace->enabled = true; + tr->current_trace->enabled++; trace_branch_enable(tr); out: mutex_unlock(&trace_types_lock); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ea51bb2004d2..86915b220bbe 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -362,8 +362,8 @@ struct tracer { u32 mask, int set); struct tracer *next; struct tracer_flags *flags; + int enabled; bool print_max; - bool enabled; bool allow_instances; #ifdef CONFIG_TRACER_MAX_TRACE bool use_max_tr; -- cgit v1.2.3 From f20a580627f43e73e4e57cb37e3864080ca06088 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Thu, 7 Nov 2013 20:08:58 -0500 Subject: ftrace: Allow instances to use function tracing Allow instances (sub-buffers) to enable function tracing. Each instance will have its own function tracing capability. For now, instances will not have function stack tracing, or will they be able to pick and choose what functions they can trace. Picking and choosing their own functions will come later. Signed-off-by: Steven Rostedt --- kernel/trace/trace.h | 5 ++ kernel/trace/trace_functions.c | 116 +++++++++++++++++++++++++++-------------- 2 files changed, 81 insertions(+), 40 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 86915b220bbe..35cca055da0f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -210,6 +210,11 @@ struct trace_array { struct list_head events; cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ int ref; +#ifdef CONFIG_FUNCTION_TRACER + struct ftrace_ops *ops; + /* function tracing enabled */ + int function_enabled; +#endif }; enum { diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 85e517e84f50..3f8dc1ce8b9c 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -13,33 +13,83 @@ #include #include #include +#include #include #include "trace.h" -/* function tracing enabled */ -static int ftrace_function_enabled; +static void tracing_start_function_trace(struct trace_array *tr); +static void tracing_stop_function_trace(struct trace_array *tr); +static void +function_trace_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs); +static void +function_stack_trace_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs); +static struct ftrace_ops trace_ops; +static struct ftrace_ops trace_stack_ops; +static struct tracer_flags func_flags; + +/* Our option */ +enum { + TRACE_FUNC_OPT_STACK = 0x1, +}; + +static int allocate_ftrace_ops(struct trace_array *tr) +{ + struct ftrace_ops *ops; -static struct trace_array *func_trace; + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (!ops) + return -ENOMEM; -static void tracing_start_function_trace(void); -static void tracing_stop_function_trace(void); + /* Currently only the non stack verision is supported */ + ops->func = function_trace_call; + ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; + + tr->ops = ops; + ops->private = tr; + return 0; +} static int function_trace_init(struct trace_array *tr) { - func_trace = tr; + struct ftrace_ops *ops; + int ret; + + if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { + /* There's only one global tr */ + if (!trace_ops.private) { + trace_ops.private = tr; + trace_stack_ops.private = tr; + } + + if (func_flags.val & TRACE_FUNC_OPT_STACK) + ops = &trace_stack_ops; + else + ops = &trace_ops; + tr->ops = ops; + } else { + ret = allocate_ftrace_ops(tr); + if (ret) + return ret; + } + tr->trace_buffer.cpu = get_cpu(); put_cpu(); tracing_start_cmdline_record(); - tracing_start_function_trace(); + tracing_start_function_trace(tr); return 0; } static void function_trace_reset(struct trace_array *tr) { - tracing_stop_function_trace(); + tracing_stop_function_trace(tr); tracing_stop_cmdline_record(); + if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) + kfree(tr->ops); + tr->ops = NULL; } static void function_trace_start(struct trace_array *tr) @@ -47,25 +97,18 @@ static void function_trace_start(struct trace_array *tr) tracing_reset_online_cpus(&tr->trace_buffer); } -/* Our option */ -enum { - TRACE_FUNC_OPT_STACK = 0x1, -}; - -static struct tracer_flags func_flags; - static void function_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { - struct trace_array *tr = func_trace; + struct trace_array *tr = op->private; struct trace_array_cpu *data; unsigned long flags; int bit; int cpu; int pc; - if (unlikely(!ftrace_function_enabled)) + if (unlikely(!tr->function_enabled)) return; pc = preempt_count(); @@ -91,14 +134,14 @@ static void function_stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) { - struct trace_array *tr = func_trace; + struct trace_array *tr = op->private; struct trace_array_cpu *data; unsigned long flags; long disabled; int cpu; int pc; - if (unlikely(!ftrace_function_enabled)) + if (unlikely(!tr->function_enabled)) return; /* @@ -128,7 +171,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, local_irq_restore(flags); } - static struct ftrace_ops trace_ops __read_mostly = { .func = function_trace_call, @@ -153,26 +195,17 @@ static struct tracer_flags func_flags = { .opts = func_opts }; -static void tracing_start_function_trace(void) +static void tracing_start_function_trace(struct trace_array *tr) { - ftrace_function_enabled = 0; - - if (func_flags.val & TRACE_FUNC_OPT_STACK) - register_ftrace_function(&trace_stack_ops); - else - register_ftrace_function(&trace_ops); - - ftrace_function_enabled = 1; + tr->function_enabled = 0; + register_ftrace_function(tr->ops); + tr->function_enabled = 1; } -static void tracing_stop_function_trace(void) +static void tracing_stop_function_trace(struct trace_array *tr) { - ftrace_function_enabled = 0; - - if (func_flags.val & TRACE_FUNC_OPT_STACK) - unregister_ftrace_function(&trace_stack_ops); - else - unregister_ftrace_function(&trace_ops); + tr->function_enabled = 0; + unregister_ftrace_function(tr->ops); } static int @@ -184,12 +217,14 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) break; + unregister_ftrace_function(tr->ops); + if (set) { - unregister_ftrace_function(&trace_ops); - register_ftrace_function(&trace_stack_ops); + tr->ops = &trace_stack_ops; + register_ftrace_function(tr->ops); } else { - unregister_ftrace_function(&trace_stack_ops); - register_ftrace_function(&trace_ops); + tr->ops = &trace_ops; + register_ftrace_function(tr->ops); } break; @@ -209,6 +244,7 @@ static struct tracer function_trace __tracer_data = .wait_pipe = poll_wait_pipe, .flags = &func_flags, .set_flag = func_set_flag, + .allow_instances = true, #ifdef CONFIG_FTRACE_SELFTEST .selftest = trace_selftest_startup_function, #endif -- cgit v1.2.3 From e3b3e2e847080e3cc14bee778c6ced3d59bfd76c Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Mon, 11 Nov 2013 23:07:14 -0500 Subject: ftrace: Pass in global_ops for use with filtering files In preparation for having the function tracing instances be able to filter on functions, the generic filter functions must first be converted to take in the global_ops as a parameter. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 98ae4ed965db..2b3e23991c8a 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2870,7 +2870,9 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag, static int ftrace_filter_open(struct inode *inode, struct file *file) { - return ftrace_regex_open(&global_ops, + struct ftrace_ops *ops = inode->i_private; + + return ftrace_regex_open(ops, FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, inode, file); } @@ -2878,7 +2880,9 @@ ftrace_filter_open(struct inode *inode, struct file *file) static int ftrace_notrace_open(struct inode *inode, struct file *file) { - return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE, + struct ftrace_ops *ops = inode->i_private; + + return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, inode, file); } @@ -4118,10 +4122,10 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) d_tracer, NULL, &ftrace_enabled_fops); trace_create_file("set_ftrace_filter", 0644, d_tracer, - NULL, &ftrace_filter_fops); + &global_ops, &ftrace_filter_fops); trace_create_file("set_ftrace_notrace", 0644, d_tracer, - NULL, &ftrace_notrace_fops); + &global_ops, &ftrace_notrace_fops); #ifdef CONFIG_FUNCTION_GRAPH_TRACER trace_create_file("set_graph_function", 0444, d_tracer, -- cgit v1.2.3 From 591dffdade9f07692a7dd3ed16830ec24e901ece Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 10 Jan 2014 16:17:45 -0500 Subject: ftrace: Allow for function tracing instance to filter functions Create a "set_ftrace_filter" and "set_ftrace_notrace" files in the instance directories to let users filter of functions to trace for the given instance. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 39 ++++++++++++++++++++++++++++++++++----- kernel/trace/trace.c | 4 ++++ kernel/trace/trace.h | 25 ++++++++++++++++++++++++- kernel/trace/trace_functions.c | 40 ++++++++++++++++++++++++++++++++-------- 4 files changed, 94 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2b3e23991c8a..dcee546f21bc 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -436,6 +436,9 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, static int __register_ftrace_function(struct ftrace_ops *ops) { + if (ops->flags & FTRACE_OPS_FL_DELETED) + return -EINVAL; + if (FTRACE_WARN_ON(ops == &global_ops)) return -EINVAL; @@ -4112,6 +4115,36 @@ static const struct file_operations ftrace_graph_notrace_fops = { }; #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ +void ftrace_create_filter_files(struct ftrace_ops *ops, + struct dentry *parent) +{ + + trace_create_file("set_ftrace_filter", 0644, parent, + ops, &ftrace_filter_fops); + + trace_create_file("set_ftrace_notrace", 0644, parent, + ops, &ftrace_notrace_fops); +} + +/* + * The name "destroy_filter_files" is really a misnomer. Although + * in the future, it may actualy delete the files, but this is + * really intended to make sure the ops passed in are disabled + * and that when this function returns, the caller is free to + * free the ops. + * + * The "destroy" name is only to match the "create" name that this + * should be paired with. + */ +void ftrace_destroy_filter_files(struct ftrace_ops *ops) +{ + mutex_lock(&ftrace_lock); + if (ops->flags & FTRACE_OPS_FL_ENABLED) + ftrace_shutdown(ops, 0); + ops->flags |= FTRACE_OPS_FL_DELETED; + mutex_unlock(&ftrace_lock); +} + static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { @@ -4121,11 +4154,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) trace_create_file("enabled_functions", 0444, d_tracer, NULL, &ftrace_enabled_fops); - trace_create_file("set_ftrace_filter", 0644, d_tracer, - &global_ops, &ftrace_filter_fops); - - trace_create_file("set_ftrace_notrace", 0644, d_tracer, - &global_ops, &ftrace_notrace_fops); + ftrace_create_filter_files(&global_ops, d_tracer); #ifdef CONFIG_FUNCTION_GRAPH_TRACER trace_create_file("set_graph_function", 0444, d_tracer, diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f9f22c435036..d95ec2876bbb 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6161,6 +6161,7 @@ static int instance_delete(const char *name) tracing_set_nop(tr); event_trace_del_tracer(tr); + ftrace_destroy_function_files(tr); debugfs_remove_recursive(tr->dir); free_percpu(tr->trace_buffer.data); ring_buffer_free(tr->trace_buffer.buffer); @@ -6291,6 +6292,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) trace_create_file("tracing_on", 0644, d_tracer, tr, &rb_simple_fops); + if (ftrace_create_function_files(tr, d_tracer)) + WARN(1, "Could not allocate function filter files"); + #ifdef CONFIG_TRACER_SNAPSHOT trace_create_file("snapshot", 0644, d_tracer, tr, &snapshot_fops); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 35cca055da0f..ffc314b7e92b 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -819,13 +819,36 @@ static inline int ftrace_trace_task(struct task_struct *task) return test_tsk_trace_trace(task); } extern int ftrace_is_dead(void); +int ftrace_create_function_files(struct trace_array *tr, + struct dentry *parent); +void ftrace_destroy_function_files(struct trace_array *tr); #else static inline int ftrace_trace_task(struct task_struct *task) { return 1; } static inline int ftrace_is_dead(void) { return 0; } -#endif +static inline int +ftrace_create_function_files(struct trace_array *tr, + struct dentry *parent) +{ + return 0; +} +static inline void ftrace_destroy_function_files(struct trace_array *tr) { } +#endif /* CONFIG_FUNCTION_TRACER */ + +#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) +void ftrace_create_filter_files(struct ftrace_ops *ops, + struct dentry *parent); +void ftrace_destroy_filter_files(struct ftrace_ops *ops); +#else +/* + * The ops parameter passed in is usually undefined. + * This must be a macro. + */ +#define ftrace_create_filter_files(ops, parent) do { } while (0) +#define ftrace_destroy_filter_files(ops) do { } while (0) +#endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ int ftrace_event_is_function(struct ftrace_event_call *call); diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 3f8dc1ce8b9c..5b781d2be383 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -52,10 +52,34 @@ static int allocate_ftrace_ops(struct trace_array *tr) return 0; } + +int ftrace_create_function_files(struct trace_array *tr, + struct dentry *parent) +{ + int ret; + + /* The top level array uses the "global_ops". */ + if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { + ret = allocate_ftrace_ops(tr); + if (ret) + return ret; + } + + ftrace_create_filter_files(tr->ops, parent); + + return 0; +} + +void ftrace_destroy_function_files(struct trace_array *tr) +{ + ftrace_destroy_filter_files(tr->ops); + kfree(tr->ops); + tr->ops = NULL; +} + static int function_trace_init(struct trace_array *tr) { struct ftrace_ops *ops; - int ret; if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { /* There's only one global tr */ @@ -69,10 +93,13 @@ static int function_trace_init(struct trace_array *tr) else ops = &trace_ops; tr->ops = ops; - } else { - ret = allocate_ftrace_ops(tr); - if (ret) - return ret; + } else if (!tr->ops) { + /* + * Instance trace_arrays get their ops allocated + * at instance creation. Unless it failed + * the allocation. + */ + return -ENOMEM; } tr->trace_buffer.cpu = get_cpu(); @@ -87,9 +114,6 @@ static void function_trace_reset(struct trace_array *tr) { tracing_stop_function_trace(tr); tracing_stop_cmdline_record(); - if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) - kfree(tr->ops); - tr->ops = NULL; } static void function_trace_start(struct trace_array *tr) -- cgit v1.2.3 From a43b97043048eac1686f409af7ad3bb8071b9d83 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 17 Jan 2014 17:08:36 +0900 Subject: tracing/uprobes: Rename uprobe_{trace,perf}_print() functions The uprobe_{trace,perf}_print functions are misnomers since what they do is not printing. There's also a real print function named print_uprobe_event() so they'll only increase confusion IMHO. Rename them with double underscores to follow convention of kprobe. Link: http://lkml.kernel.org/r/1389946120-19610-2-git-send-email-namhyung@kernel.org Reviewed-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Oleg Nesterov Cc: Srikar Dronamraju Signed-off-by: Namhyung Kim Signed-off-by: Steven Rostedt --- kernel/trace/trace_uprobe.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 79e52d93860b..c5d2612bf233 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -758,7 +758,7 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) mutex_unlock(&ucb->mutex); } -static void uprobe_trace_print(struct trace_uprobe *tu, +static void __uprobe_trace_func(struct trace_uprobe *tu, unsigned long func, struct pt_regs *regs) { struct uprobe_trace_entry_head *entry; @@ -807,14 +807,14 @@ out: static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) { if (!is_ret_probe(tu)) - uprobe_trace_print(tu, 0, regs); + __uprobe_trace_func(tu, 0, regs); return 0; } static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, struct pt_regs *regs) { - uprobe_trace_print(tu, func, regs); + __uprobe_trace_func(tu, func, regs); } /* Event entry printers */ @@ -1014,7 +1014,7 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, return ret; } -static void uprobe_perf_print(struct trace_uprobe *tu, +static void __uprobe_perf_func(struct trace_uprobe *tu, unsigned long func, struct pt_regs *regs) { struct ftrace_event_call *call = &tu->tp.call; @@ -1078,14 +1078,14 @@ static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) return UPROBE_HANDLER_REMOVE; if (!is_ret_probe(tu)) - uprobe_perf_print(tu, 0, regs); + __uprobe_perf_func(tu, 0, regs); return 0; } static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, struct pt_regs *regs) { - uprobe_perf_print(tu, func, regs); + __uprobe_perf_func(tu, func, regs); } #endif /* CONFIG_PERF_EVENTS */ -- cgit v1.2.3 From dd9fa555d7bbfcc7dbc63eb744806e9f6cb62e9f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 17 Jan 2014 17:08:37 +0900 Subject: tracing/uprobes: Move argument fetching to uprobe_dispatcher() A single uprobe event might serve different users like ftrace and perf. And this is especially important for upcoming multi buffer support. But in this case it'll fetch (same) data from userspace multiple times. So move it to the beginning of the dispatcher function and reuse it for each users. Link: http://lkml.kernel.org/r/1389946120-19610-3-git-send-email-namhyung@kernel.org Reviewed-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Oleg Nesterov Cc: Srikar Dronamraju Cc: zhangwei(Jovi) Signed-off-by: Namhyung Kim Signed-off-by: Steven Rostedt --- kernel/trace/trace_uprobe.c | 93 +++++++++++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 37 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c5d2612bf233..d83155e0da78 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -759,30 +759,25 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) } static void __uprobe_trace_func(struct trace_uprobe *tu, - unsigned long func, struct pt_regs *regs) + unsigned long func, struct pt_regs *regs, + struct uprobe_cpu_buffer *ucb, int dsize) { struct uprobe_trace_entry_head *entry; struct ring_buffer_event *event; struct ring_buffer *buffer; - struct uprobe_cpu_buffer *ucb; void *data; - int size, dsize, esize; + int size, esize; struct ftrace_event_call *call = &tu->tp.call; - dsize = __get_data_size(&tu->tp, regs); - esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); - - if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) + if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) return; - ucb = uprobe_buffer_get(); - store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); - + esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); size = esize + tu->tp.size + dsize; event = trace_current_buffer_lock_reserve(&buffer, call->event.type, size, 0, 0); if (!event) - goto out; + return; entry = ring_buffer_event_data(event); if (is_ret_probe(tu)) { @@ -798,23 +793,22 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, if (!call_filter_check_discard(call, entry, buffer, event)) trace_buffer_unlock_commit(buffer, event, 0, 0); - -out: - uprobe_buffer_put(ucb); } /* uprobe handler */ -static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) +static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, + struct uprobe_cpu_buffer *ucb, int dsize) { if (!is_ret_probe(tu)) - __uprobe_trace_func(tu, 0, regs); + __uprobe_trace_func(tu, 0, regs, ucb, dsize); return 0; } static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, - struct pt_regs *regs) + struct pt_regs *regs, + struct uprobe_cpu_buffer *ucb, int dsize) { - __uprobe_trace_func(tu, func, regs); + __uprobe_trace_func(tu, func, regs, ucb, dsize); } /* Event entry printers */ @@ -1015,30 +1009,23 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, } static void __uprobe_perf_func(struct trace_uprobe *tu, - unsigned long func, struct pt_regs *regs) + unsigned long func, struct pt_regs *regs, + struct uprobe_cpu_buffer *ucb, int dsize) { struct ftrace_event_call *call = &tu->tp.call; struct uprobe_trace_entry_head *entry; struct hlist_head *head; - struct uprobe_cpu_buffer *ucb; void *data; - int size, dsize, esize; + int size, esize; int rctx; - dsize = __get_data_size(&tu->tp, regs); esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); - if (WARN_ON_ONCE(!uprobe_cpu_buffer)) - return; - size = esize + tu->tp.size + dsize; size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) return; - ucb = uprobe_buffer_get(); - store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); - preempt_disable(); head = this_cpu_ptr(call->perf_events); if (hlist_empty(head)) @@ -1068,24 +1055,25 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); out: preempt_enable(); - uprobe_buffer_put(ucb); } /* uprobe profile handler */ -static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) +static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, + struct uprobe_cpu_buffer *ucb, int dsize) { if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) return UPROBE_HANDLER_REMOVE; if (!is_ret_probe(tu)) - __uprobe_perf_func(tu, 0, regs); + __uprobe_perf_func(tu, 0, regs, ucb, dsize); return 0; } static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, - struct pt_regs *regs) + struct pt_regs *regs, + struct uprobe_cpu_buffer *ucb, int dsize) { - __uprobe_perf_func(tu, func, regs); + __uprobe_perf_func(tu, func, regs, ucb, dsize); } #endif /* CONFIG_PERF_EVENTS */ @@ -1127,8 +1115,11 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) { struct trace_uprobe *tu; struct uprobe_dispatch_data udd; + struct uprobe_cpu_buffer *ucb; + int dsize, esize; int ret = 0; + tu = container_of(con, struct trace_uprobe, consumer); tu->nhit++; @@ -1137,13 +1128,29 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) current->utask->vaddr = (unsigned long) &udd; +#ifdef CONFIG_PERF_EVENTS + if ((tu->tp.flags & TP_FLAG_TRACE) == 0 && + !uprobe_perf_filter(&tu->consumer, 0, current->mm)) + return UPROBE_HANDLER_REMOVE; +#endif + + if (WARN_ON_ONCE(!uprobe_cpu_buffer)) + return 0; + + dsize = __get_data_size(&tu->tp, regs); + esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); + + ucb = uprobe_buffer_get(); + store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); + if (tu->tp.flags & TP_FLAG_TRACE) - ret |= uprobe_trace_func(tu, regs); + ret |= uprobe_trace_func(tu, regs, ucb, dsize); #ifdef CONFIG_PERF_EVENTS if (tu->tp.flags & TP_FLAG_PROFILE) - ret |= uprobe_perf_func(tu, regs); + ret |= uprobe_perf_func(tu, regs, ucb, dsize); #endif + uprobe_buffer_put(ucb); return ret; } @@ -1152,6 +1159,8 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, { struct trace_uprobe *tu; struct uprobe_dispatch_data udd; + struct uprobe_cpu_buffer *ucb; + int dsize, esize; tu = container_of(con, struct trace_uprobe, consumer); @@ -1160,13 +1169,23 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, current->utask->vaddr = (unsigned long) &udd; + if (WARN_ON_ONCE(!uprobe_cpu_buffer)) + return 0; + + dsize = __get_data_size(&tu->tp, regs); + esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); + + ucb = uprobe_buffer_get(); + store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); + if (tu->tp.flags & TP_FLAG_TRACE) - uretprobe_trace_func(tu, func, regs); + uretprobe_trace_func(tu, func, regs, ucb, dsize); #ifdef CONFIG_PERF_EVENTS if (tu->tp.flags & TP_FLAG_PROFILE) - uretprobe_perf_func(tu, func, regs); + uretprobe_perf_func(tu, func, regs, ucb, dsize); #endif + uprobe_buffer_put(ucb); return 0; } -- cgit v1.2.3 From 70ed91c6ec7f8bf20369634017d887d48ac979d2 Mon Sep 17 00:00:00 2001 From: "zhangwei(Jovi)" Date: Fri, 17 Jan 2014 17:08:38 +0900 Subject: tracing/uprobes: Support ftrace_event_file base multibuffer Support multi-buffer on uprobe-based dynamic events by using ftrace_event_file. This patch is based kprobe-based dynamic events multibuffer support work initially, commited by Masami(commit 41a7dd420c), but revised as below: Oleg changed the kprobe-based multibuffer design from array-pointers of ftrace_event_file into simple list, so this patch also change to the list design. rcu_read_lock/unlock added into uprobe_trace_func/uretprobe_trace_func, to synchronize with ftrace_event_file list add and delete. Even though we allow multi-uprobes instances now, but TP_FLAG_PROFILE/TP_FLAG_TRACE are still mutually exclusive in probe_event_enable currently, this means we cannot allow one user is using uprobe-tracer, and another user is using perf-probe on same uprobe concurrently. (Perhaps this will be fix in future, kprobe don't have this limitation now) Link: http://lkml.kernel.org/r/1389946120-19610-4-git-send-email-namhyung@kernel.org Reviewed-by: Masami Hiramatsu Reviewed-by: Oleg Nesterov Cc: Ingo Molnar Cc: Frederic Weisbecker Cc: Srikar Dronamraju Signed-off-by: zhangwei(Jovi) Signed-off-by: Namhyung Kim Signed-off-by: Steven Rostedt --- kernel/trace/trace_kprobe.c | 17 ------- kernel/trace/trace_probe.h | 17 +++++++ kernel/trace/trace_uprobe.c | 105 +++++++++++++++++++++++++++++++++++--------- 3 files changed, 101 insertions(+), 38 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index bdbae450c13e..d021d21dd150 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -35,11 +35,6 @@ struct trace_kprobe { struct trace_probe tp; }; -struct event_file_link { - struct ftrace_event_file *file; - struct list_head list; -}; - #define SIZEOF_TRACE_KPROBE(n) \ (offsetof(struct trace_kprobe, tp.args) + \ (sizeof(struct probe_arg) * (n))) @@ -387,18 +382,6 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) return ret; } -static struct event_file_link * -find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) -{ - struct event_file_link *link; - - list_for_each_entry(link, &tp->files, list) - if (link->file == file) - return link; - - return NULL; -} - /* * Disable trace_probe * if the file is NULL, disable "perf" handler, or disable "trace" handler. diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index b73574a5f429..fb1ab5dfbd42 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -288,6 +288,11 @@ struct trace_probe { struct probe_arg args[]; }; +struct event_file_link { + struct ftrace_event_file *file; + struct list_head list; +}; + static inline bool trace_probe_is_enabled(struct trace_probe *tp) { return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); @@ -316,6 +321,18 @@ static inline int is_good_name(const char *name) return 1; } +static inline struct event_file_link * +find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) +{ + struct event_file_link *link; + + list_for_each_entry(link, &tp->files, list) + if (link->file == file) + return link; + + return NULL; +} + extern int traceprobe_parse_probe_arg(char *arg, ssize_t *size, struct probe_arg *parg, bool is_return, bool is_kprobe); diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index d83155e0da78..349c6df9e332 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -260,6 +260,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) goto error; INIT_LIST_HEAD(&tu->list); + INIT_LIST_HEAD(&tu->tp.files); tu->consumer.handler = uprobe_dispatcher; if (is_ret) tu->consumer.ret_handler = uretprobe_dispatcher; @@ -760,7 +761,8 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) static void __uprobe_trace_func(struct trace_uprobe *tu, unsigned long func, struct pt_regs *regs, - struct uprobe_cpu_buffer *ucb, int dsize) + struct uprobe_cpu_buffer *ucb, int dsize, + struct ftrace_event_file *ftrace_file) { struct uprobe_trace_entry_head *entry; struct ring_buffer_event *event; @@ -769,13 +771,15 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, int size, esize; struct ftrace_event_call *call = &tu->tp.call; + WARN_ON(call != ftrace_file->event_call); + if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) return; esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); size = esize + tu->tp.size + dsize; - event = trace_current_buffer_lock_reserve(&buffer, call->event.type, - size, 0, 0); + event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, + call->event.type, size, 0, 0); if (!event) return; @@ -799,8 +803,16 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, struct uprobe_cpu_buffer *ucb, int dsize) { - if (!is_ret_probe(tu)) - __uprobe_trace_func(tu, 0, regs, ucb, dsize); + struct event_file_link *link; + + if (is_ret_probe(tu)) + return 0; + + rcu_read_lock(); + list_for_each_entry_rcu(link, &tu->tp.files, list) + __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); + rcu_read_unlock(); + return 0; } @@ -808,7 +820,12 @@ static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, struct pt_regs *regs, struct uprobe_cpu_buffer *ucb, int dsize) { - __uprobe_trace_func(tu, func, regs, ucb, dsize); + struct event_file_link *link; + + rcu_read_lock(); + list_for_each_entry_rcu(link, &tu->tp.files, list) + __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); + rcu_read_unlock(); } /* Event entry printers */ @@ -855,12 +872,31 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, struct mm_struct *mm); static int -probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) +probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, + filter_func_t filter) { - int ret = 0; + bool enabled = trace_probe_is_enabled(&tu->tp); + struct event_file_link *link = NULL; + int ret; + + if (file) { + if (tu->tp.flags & TP_FLAG_PROFILE) + return -EINTR; - if (trace_probe_is_enabled(&tu->tp)) - return -EINTR; + link = kmalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + link->file = file; + list_add_tail_rcu(&link->list, &tu->tp.files); + + tu->tp.flags |= TP_FLAG_TRACE; + } else { + if (tu->tp.flags & TP_FLAG_TRACE) + return -EINTR; + + tu->tp.flags |= TP_FLAG_PROFILE; + } ret = uprobe_buffer_enable(); if (ret < 0) @@ -868,24 +904,49 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) WARN_ON(!uprobe_filter_is_empty(&tu->filter)); - tu->tp.flags |= flag; + if (enabled) + return 0; + tu->consumer.filter = filter; ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); - if (ret) - tu->tp.flags &= ~flag; + if (ret) { + if (file) { + list_del(&link->list); + kfree(link); + tu->tp.flags &= ~TP_FLAG_TRACE; + } else + tu->tp.flags &= ~TP_FLAG_PROFILE; + } return ret; } -static void probe_event_disable(struct trace_uprobe *tu, int flag) +static void +probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) { if (!trace_probe_is_enabled(&tu->tp)) return; + if (file) { + struct event_file_link *link; + + link = find_event_file_link(&tu->tp, file); + if (!link) + return; + + list_del_rcu(&link->list); + /* synchronize with u{,ret}probe_trace_func */ + synchronize_sched(); + kfree(link); + + if (!list_empty(&tu->tp.files)) + return; + } + WARN_ON(!uprobe_filter_is_empty(&tu->filter)); uprobe_unregister(tu->inode, tu->offset, &tu->consumer); - tu->tp.flags &= ~flag; + tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; uprobe_buffer_disable(); } @@ -1077,25 +1138,27 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, } #endif /* CONFIG_PERF_EVENTS */ -static -int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) +static int +trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, + void *data) { struct trace_uprobe *tu = event->data; + struct ftrace_event_file *file = data; switch (type) { case TRACE_REG_REGISTER: - return probe_event_enable(tu, TP_FLAG_TRACE, NULL); + return probe_event_enable(tu, file, NULL); case TRACE_REG_UNREGISTER: - probe_event_disable(tu, TP_FLAG_TRACE); + probe_event_disable(tu, file); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: - return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); + return probe_event_enable(tu, NULL, uprobe_perf_filter); case TRACE_REG_PERF_UNREGISTER: - probe_event_disable(tu, TP_FLAG_PROFILE); + probe_event_disable(tu, NULL); return 0; case TRACE_REG_PERF_OPEN: -- cgit v1.2.3 From ca3b162021a421b38a9cd7b66555b9b01568dc9d Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 17 Jan 2014 17:08:39 +0900 Subject: tracing/uprobes: Support event triggering Add support for event triggering to uprobes. This is same as kprobes support added by Tom (plus cleanup by Steven). Link: http://lkml.kernel.org/r/1389946120-19610-5-git-send-email-namhyung@kernel.org Reviewed-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Oleg Nesterov Cc: Srikar Dronamraju Cc: zhangwei(Jovi) Cc: Tom Zanussi Signed-off-by: Namhyung Kim Signed-off-by: Steven Rostedt --- kernel/trace/trace_uprobe.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 349c6df9e332..01fcb0db75cb 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -776,6 +776,9 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) return; + if (ftrace_trigger_soft_disabled(ftrace_file)) + return; + esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); size = esize + tu->tp.size + dsize; event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, @@ -795,8 +798,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, memcpy(data, ucb->buf, tu->tp.size + dsize); - if (!call_filter_check_discard(call, entry, buffer, event)) - trace_buffer_unlock_commit(buffer, event, 0, 0); + event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); } /* uprobe handler */ -- cgit v1.2.3 From 43fe98913c9f67e3b523615ee3316f9520a623e0 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 17 Jan 2014 17:08:40 +0900 Subject: tracing/uprobes: Support mix of ftrace and perf It seems there's no reason to prevent mixed used of ftrace and perf for a single uprobe event. At least the kprobes already support it. Link: http://lkml.kernel.org/r/1389946120-19610-6-git-send-email-namhyung@kernel.org Reviewed-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Oleg Nesterov Cc: Srikar Dronamraju Cc: zhangwei(Jovi) Signed-off-by: Namhyung Kim Signed-off-by: Steven Rostedt --- kernel/trace/trace_uprobe.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 01fcb0db75cb..e4473367e7a4 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -882,9 +882,6 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, int ret; if (file) { - if (tu->tp.flags & TP_FLAG_PROFILE) - return -EINTR; - link = kmalloc(sizeof(*link), GFP_KERNEL); if (!link) return -ENOMEM; @@ -893,12 +890,8 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, list_add_tail_rcu(&link->list, &tu->tp.files); tu->tp.flags |= TP_FLAG_TRACE; - } else { - if (tu->tp.flags & TP_FLAG_TRACE) - return -EINTR; - + } else tu->tp.flags |= TP_FLAG_PROFILE; - } ret = uprobe_buffer_enable(); if (ret < 0) -- cgit v1.2.3 From e1e232ca6b8faa210e5509f17d55519b4392524f Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 10 Feb 2014 23:38:46 -0500 Subject: tracing: Add trace_clock= kernel parameter Being able to change the trace clock at boot can be advantageous if you need a better source of when things happen across CPUs. The default trace clock is the fastest, but it uses local clocks which may not be synced across CPUs and it does not let you know when events took place with respect to events on other CPUs. The global trace clock can help in this case, and if you do not care about timings, the counter "clock" is the best, as that is just a simple atomic counter that is incremented for every event. Usage is to add "trace_clock=counter" on the kernel command line. You can replace counter with "global" or any of the clocks listed in /sys/kernel/debug/tracing/trace_clock Suggested-by: Thomas Gleixner Tested-by: Thomas Gleixner Reviewed-by: Thomas Gleixner Appreciated-by: Thomas Gleixner Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 61 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d95ec2876bbb..c90f55d80f86 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -181,6 +181,17 @@ static int __init set_trace_boot_options(char *str) } __setup("trace_options=", set_trace_boot_options); +static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; +static char *trace_boot_clock __initdata; + +static int __init set_trace_boot_clock(char *str) +{ + strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); + trace_boot_clock = trace_boot_clock_buf; + return 0; +} +__setup("trace_clock=", set_trace_boot_clock); + unsigned long long ns2usecs(cycle_t nsec) { @@ -4746,25 +4757,10 @@ static int tracing_clock_show(struct seq_file *m, void *v) return 0; } -static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, - size_t cnt, loff_t *fpos) +static int tracing_set_clock(struct trace_array *tr, const char *clockstr) { - struct seq_file *m = filp->private_data; - struct trace_array *tr = m->private; - char buf[64]; - const char *clockstr; int i; - if (cnt >= sizeof(buf)) - return -EINVAL; - - if (copy_from_user(&buf, ubuf, cnt)) - return -EFAULT; - - buf[cnt] = 0; - - clockstr = strstrip(buf); - for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; @@ -4792,6 +4788,32 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, mutex_unlock(&trace_types_lock); + return 0; +} + +static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *fpos) +{ + struct seq_file *m = filp->private_data; + struct trace_array *tr = m->private; + char buf[64]; + const char *clockstr; + int ret; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + clockstr = strstrip(buf); + + ret = tracing_set_clock(tr, clockstr); + if (ret) + return ret; + *fpos += cnt; return cnt; @@ -6574,6 +6596,13 @@ __init static int tracer_alloc_buffers(void) trace_init_cmdlines(); + if (trace_boot_clock) { + ret = tracing_set_clock(&global_trace, trace_boot_clock); + if (ret < 0) + pr_warning("Trace clock %s not defined, going back to default\n", + trace_boot_clock); + } + /* * register_tracer() might reference current_trace, so it * needs to be set before we register anything. This is -- cgit v1.2.3 From 1fcc155351f183e5044180eeb372a8ff47710855 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 19 Feb 2014 15:12:18 -0500 Subject: ftrace: Have static function trace clear ENABLED flag on unregister The ENABLED flag needs to be cleared when a ftrace_ops is unregistered otherwise it wont be able to be registered again. This is only for static tracing and does not affect DYNAMIC_FTRACE at all. Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index dcee546f21bc..5313c1100d30 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4463,7 +4463,13 @@ static inline void ftrace_startup_enable(int command) { } (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ ___ret; \ }) -# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops) +# define ftrace_shutdown(ops, command) \ + ({ \ + int ___ret = __unregister_ftrace_function(ops); \ + if (!___ret) \ + (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ + ___ret; \ + }) # define ftrace_startup_sysctl() do { } while (0) # define ftrace_shutdown_sysctl() do { } while (0) -- cgit v1.2.3 From f5645d3575fe9a2c468889146e152f11c199826e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 21 Feb 2014 14:19:30 -0800 Subject: capability: Use current logging styles Prefix logging output with "capability: " via pr_fmt. Convert printks to pr_. Use pr__once instead of guard flags. Coalesce formats. Signed-off-by: Joe Perches Acked-by: Serge E. Hallyn Signed-off-by: James Morris --- kernel/capability.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/capability.c b/kernel/capability.c index 4e66bf9275b0..d6a6c91863ff 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -7,6 +7,8 @@ * 30 May 2002: Cleanup, Robert M. Love */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -42,15 +44,10 @@ __setup("no_file_caps", file_caps_disable); static void warn_legacy_capability_use(void) { - static int warned; - if (!warned) { - char name[sizeof(current->comm)]; - - printk(KERN_INFO "warning: `%s' uses 32-bit capabilities" - " (legacy support in use)\n", - get_task_comm(name, current)); - warned = 1; - } + char name[sizeof(current->comm)]; + + pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n", + get_task_comm(name, current)); } /* @@ -71,16 +68,10 @@ static void warn_legacy_capability_use(void) static void warn_deprecated_v2(void) { - static int warned; + char name[sizeof(current->comm)]; - if (!warned) { - char name[sizeof(current->comm)]; - - printk(KERN_INFO "warning: `%s' uses deprecated v2" - " capabilities in a way that may be insecure.\n", - get_task_comm(name, current)); - warned = 1; - } + pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n", + get_task_comm(name, current)); } /* @@ -380,7 +371,7 @@ bool has_capability_noaudit(struct task_struct *t, int cap) bool ns_capable(struct user_namespace *ns, int cap) { if (unlikely(!cap_valid(cap))) { - printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); + pr_crit("capable() called with invalid cap=%u\n", cap); BUG(); } -- cgit v1.2.3 From c75611282cf1bf717c1866e7a7eb4d0743815187 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Feb 2014 10:04:01 -0500 Subject: cgroup: add css_set->mg_tasks Currently, while migrating tasks from one cgroup to another, cgroup_attach_task() builds a flex array of all target tasks; unfortunately, this has a couple issues. * Flex array has size limit. On 64bit, struct task_and_cgroup is 24bytes making the flex element limit around 87k. It is a high number but not impossible to hit. This means that the current cgroup implementation can't migrate a process with more than 87k threads. * Process migration involves memory allocation whose size is dependent on the number of threads the process has. This means that cgroup core can't guarantee success or failure of multi-process migrations as memory allocation failure can happen in the middle. This is in part because cgroup can't grab threadgroup locks of multiple processes at the same time, so when there are multiple processes to migrate, it is imposible to tell how many tasks are to be migrated beforehand. Note that this already affects cgroup_transfer_tasks(). cgroup currently cannot guarantee atomic success or failure of the operation. It may fail in the middle and after such failure cgroup doesn't have enough information to roll back properly. It just aborts with some tasks migrated and others not. To resolve the situation, we're going to use task->cg_list during migration too. Instead of building a separate array, target tasks will be linked into a dedicated migration list_head on the owning css_set. Tasks on the migration list are treated the same as tasks on the usual tasks list; however, being on a separate list allows cgroup migration code path to keep track of the target tasks by simply keeping the list of css_sets with tasks being migrated, making unpredictable dynamic allocation unnecessary. In prepartion of such migration path update, this patch introduces css_set->mg_tasks list and updates css_set task iterations so that they walk both css_set->tasks and ->mg_tasks. Note that ->mg_tasks isn't used yet. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 56 +++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 8ab800c7bac0..b80c611ff836 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -644,6 +644,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, atomic_set(&cset->refcount, 1); INIT_LIST_HEAD(&cset->cgrp_links); INIT_LIST_HEAD(&cset->tasks); + INIT_LIST_HEAD(&cset->mg_tasks); INIT_HLIST_NODE(&cset->hlist); /* Copy the set of subsystem state objects generated in @@ -2590,9 +2591,14 @@ static void css_advance_task_iter(struct css_task_iter *it) } link = list_entry(l, struct cgrp_cset_link, cset_link); cset = link->cset; - } while (list_empty(&cset->tasks)); + } while (list_empty(&cset->tasks) && list_empty(&cset->mg_tasks)); + it->cset_link = l; - it->task = cset->tasks.next; + + if (!list_empty(&cset->tasks)) + it->task = cset->tasks.next; + else + it->task = cset->mg_tasks.next; } /** @@ -2636,24 +2642,29 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) { struct task_struct *res; struct list_head *l = it->task; - struct cgrp_cset_link *link; + struct cgrp_cset_link *link = list_entry(it->cset_link, + struct cgrp_cset_link, cset_link); /* If the iterator cg is NULL, we have no tasks */ if (!it->cset_link) return NULL; res = list_entry(l, struct task_struct, cg_list); - /* Advance iterator to find next entry */ + + /* + * Advance iterator to find next entry. cset->tasks is consumed + * first and then ->mg_tasks. After ->mg_tasks, we move onto the + * next cset. + */ l = l->next; - link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link); - if (l == &link->cset->tasks) { - /* - * We reached the end of this task list - move on to the - * next cgrp_cset_link. - */ + + if (l == &link->cset->tasks) + l = link->cset->mg_tasks.next; + + if (l == &link->cset->mg_tasks) css_advance_task_iter(it); - } else { + else it->task = l; - } + return res; } @@ -4502,16 +4513,23 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) struct css_set *cset = link->cset; struct task_struct *task; int count = 0; + seq_printf(seq, "css_set %p\n", cset); + list_for_each_entry(task, &cset->tasks, cg_list) { - if (count++ > MAX_TASKS_SHOWN_PER_CSS) { - seq_puts(seq, " ...\n"); - break; - } else { - seq_printf(seq, " task %d\n", - task_pid_vnr(task)); - } + if (count++ > MAX_TASKS_SHOWN_PER_CSS) + goto overflow; + seq_printf(seq, " task %d\n", task_pid_vnr(task)); + } + + list_for_each_entry(task, &cset->mg_tasks, cg_list) { + if (count++ > MAX_TASKS_SHOWN_PER_CSS) + goto overflow; + seq_printf(seq, " task %d\n", task_pid_vnr(task)); } + continue; + overflow: + seq_puts(seq, " ...\n"); } up_read(&css_set_rwsem); return 0; -- cgit v1.2.3 From b3dc094e93905ae9c1bc0815402ad8e5b203d068 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Feb 2014 10:04:01 -0500 Subject: cgroup: use css_set->mg_tasks to track target tasks during migration Currently, while migrating tasks from one cgroup to another, cgroup_attach_task() builds a flex array of all target tasks; unfortunately, this has a couple issues. * Flex array has size limit. On 64bit, struct task_and_cgroup is 24bytes making the flex element limit around 87k. It is a high number but not impossible to hit. This means that the current cgroup implementation can't migrate a process with more than 87k threads. * Process migration involves memory allocation whose size is dependent on the number of threads the process has. This means that cgroup core can't guarantee success or failure of multi-process migrations as memory allocation failure can happen in the middle. This is in part because cgroup can't grab threadgroup locks of multiple processes at the same time, so when there are multiple processes to migrate, it is imposible to tell how many tasks are to be migrated beforehand. Note that this already affects cgroup_transfer_tasks(). cgroup currently cannot guarantee atomic success or failure of the operation. It may fail in the middle and after such failure cgroup doesn't have enough information to roll back properly. It just aborts with some tasks migrated and others not. To resolve the situation, this patch updates the migration path to use task->cg_list to track target tasks. The previous patch already added css_set->mg_tasks and updated iterations in non-migration paths to include them during task migration. This patch updates migration path to actually make use of it. Instead of putting onto a flex_array, each target task is moved from its css_set->tasks list to css_set->mg_tasks and the migration path keeps trace of all the source css_sets and the associated cgroups. Once all source css_sets are determined, the destination css_set for each is determined, linked to the matching source css_set and put on a separate list. To iterate the target tasks, migration path just needs to iterat through either the source or target css_sets, depending on whether migration has been committed or not, and the tasks on their ->mg_tasks lists. cgroup_taskset is updated to contain the list_heads for source and target css_sets and the iteration cursor. cgroup_taskset_*() are accordingly updated to walk through css_sets and their ->mg_tasks. This resolves the above listed issues with moderate additional complexity. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 223 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 115 insertions(+), 108 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b80c611ff836..5def4a800425 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -52,7 +52,6 @@ #include #include #include /* TODO: replace with more sophisticated array */ -#include /* used in cgroup_attach_task */ #include #include @@ -645,6 +644,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, INIT_LIST_HEAD(&cset->cgrp_links); INIT_LIST_HEAD(&cset->tasks); INIT_LIST_HEAD(&cset->mg_tasks); + INIT_LIST_HEAD(&cset->mg_node); INIT_HLIST_NODE(&cset->hlist); /* Copy the set of subsystem state objects generated in @@ -1639,20 +1639,26 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) } EXPORT_SYMBOL_GPL(task_cgroup_path); -/* - * Control Group taskset - */ -struct task_and_cgroup { - struct task_struct *task; - struct cgroup *cgrp; - struct css_set *cset; -}; - +/* used to track tasks and other necessary states during migration */ struct cgroup_taskset { - struct task_and_cgroup single; - struct flex_array *tc_array; - int tc_array_len; - int idx; + /* the src and dst cset list running through cset->mg_node */ + struct list_head src_csets; + struct list_head dst_csets; + + /* + * Fields for cgroup_taskset_*() iteration. + * + * Before migration is committed, the target migration tasks are on + * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of + * the csets on ->dst_csets. ->csets point to either ->src_csets + * or ->dst_csets depending on whether migration is committed. + * + * ->cur_csets and ->cur_task point to the current task position + * during iteration. + */ + struct list_head *csets; + struct css_set *cur_cset; + struct task_struct *cur_task; }; /** @@ -1663,12 +1669,10 @@ struct cgroup_taskset { */ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) { - if (tset->tc_array) { - tset->idx = 0; - return cgroup_taskset_next(tset); - } else { - return tset->single.task; - } + tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); + tset->cur_task = NULL; + + return cgroup_taskset_next(tset); } /** @@ -1680,13 +1684,27 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) */ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) { - struct task_and_cgroup *tc; + struct css_set *cset = tset->cur_cset; + struct task_struct *task = tset->cur_task; - if (!tset->tc_array || tset->idx >= tset->tc_array_len) - return NULL; + while (&cset->mg_node != tset->csets) { + if (!task) + task = list_first_entry(&cset->mg_tasks, + struct task_struct, cg_list); + else + task = list_next_entry(task, cg_list); - tc = flex_array_get(tset->tc_array, tset->idx++); - return tc->task; + if (&task->cg_list != &cset->mg_tasks) { + tset->cur_cset = cset; + tset->cur_task = task; + return task; + } + + cset = list_next_entry(cset, mg_node); + task = NULL; + } + + return NULL; } /** @@ -1714,11 +1732,13 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, WARN_ON_ONCE(tsk->flags & PF_EXITING); old_cset = task_css_set(tsk); + get_css_set(new_cset); + task_lock(tsk); rcu_assign_pointer(tsk->cgroups, new_cset); task_unlock(tsk); - list_move(&tsk->cg_list, &new_cset->tasks); + list_move(&tsk->cg_list, &new_cset->mg_tasks); /* * We just gained a reference on old_cset by taking it from the @@ -1741,80 +1761,58 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, bool threadgroup) { - int ret, i, group_size; - struct cgroupfs_root *root = cgrp->root; + struct cgroup_taskset tset = { + .src_csets = LIST_HEAD_INIT(tset.src_csets), + .dst_csets = LIST_HEAD_INIT(tset.dst_csets), + .csets = &tset.src_csets, + }; struct cgroup_subsys_state *css, *failed_css = NULL; - /* threadgroup list cursor and array */ - struct task_struct *task; - struct task_and_cgroup *tc; - struct flex_array *group; - struct cgroup_taskset tset = { }; - - /* - * step 0: in order to do expensive, possibly blocking operations for - * every thread, we cannot iterate the thread group list, since it needs - * rcu or tasklist locked. instead, build an array of all threads in the - * group - group_rwsem prevents new threads from appearing, and if - * threads exit, this will just be an over-estimate. - */ - if (threadgroup) - group_size = get_nr_threads(leader); - else - group_size = 1; - /* flex_array supports very large thread-groups better than kmalloc. */ - group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL); - if (!group) - return -ENOMEM; - /* pre-allocate to guarantee space while iterating in rcu read-side. */ - ret = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); - if (ret) - goto out_free_group_list; + struct css_set *cset, *tmp_cset; + struct task_struct *task, *tmp_task; + int i, ret; - i = 0; /* * Prevent freeing of tasks while we take a snapshot. Tasks that are * already PF_EXITING could be freed from underneath us unless we * take an rcu_read_lock. */ - down_read(&css_set_rwsem); + down_write(&css_set_rwsem); rcu_read_lock(); task = leader; do { - struct task_and_cgroup ent; + struct cgroup *src_cgrp; /* @task either already exited or can't exit until the end */ if (task->flags & PF_EXITING) goto next; - /* as per above, nr_threads may decrease, but not increase. */ - BUG_ON(i >= group_size); - ent.task = task; - ent.cgrp = task_cgroup_from_root(task, root); + cset = task_css_set(task); + src_cgrp = task_cgroup_from_root(task, cgrp->root); + /* nothing to do if this task is already in the cgroup */ - if (ent.cgrp == cgrp) + if (src_cgrp == cgrp) goto next; - /* - * saying GFP_ATOMIC has no effect here because we did prealloc - * earlier, but it's good form to communicate our expectations. - */ - ret = flex_array_put(group, i, &ent, GFP_ATOMIC); - BUG_ON(ret != 0); - i++; + + if (!cset->mg_src_cgrp) { + WARN_ON(!list_empty(&cset->mg_tasks)); + WARN_ON(!list_empty(&cset->mg_node)); + + cset->mg_src_cgrp = src_cgrp; + list_add(&cset->mg_node, &tset.src_csets); + get_css_set(cset); + } + + list_move(&task->cg_list, &cset->mg_tasks); next: if (!threadgroup) break; } while_each_thread(leader, task); rcu_read_unlock(); - up_read(&css_set_rwsem); - /* remember the number of threads in the array for later. */ - group_size = i; - tset.tc_array = group; - tset.tc_array_len = group_size; + up_write(&css_set_rwsem); /* methods shouldn't be called if no task is actually migrating */ - ret = 0; - if (!group_size) - goto out_free_group_list; + if (list_empty(&tset.src_csets)) + return 0; /* * step 1: check that we can legitimately attach to the cgroup. @@ -1833,16 +1831,21 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, * step 2: make sure css_sets exist for all threads to be migrated. * we use find_css_set, which allocates a new one if necessary. */ - for (i = 0; i < group_size; i++) { - struct css_set *old_cset; + list_for_each_entry(cset, &tset.src_csets, mg_node) { + struct css_set *dst_cset; - tc = flex_array_get(group, i); - old_cset = task_css_set(tc->task); - tc->cset = find_css_set(old_cset, cgrp); - if (!tc->cset) { + dst_cset = find_css_set(cset, cgrp); + if (!dst_cset) { ret = -ENOMEM; - goto out_put_css_set_refs; + goto out_release_tset; } + + if (list_empty(&dst_cset->mg_node)) + list_add(&dst_cset->mg_node, &tset.dst_csets); + else + put_css_set(dst_cset, false); + + cset->mg_dst_cset = dst_cset; } /* @@ -1851,12 +1854,17 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, * failure cases after here, so this is the commit point. */ down_write(&css_set_rwsem); - for (i = 0; i < group_size; i++) { - tc = flex_array_get(group, i); - cgroup_task_migrate(tc->cgrp, tc->task, tc->cset); + list_for_each_entry(cset, &tset.src_csets, mg_node) { + list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) + cgroup_task_migrate(cset->mg_src_cgrp, task, + cset->mg_dst_cset); } up_write(&css_set_rwsem); - /* nothing is sensitive to fork() after this point. */ + + /* migration is committed, all target tasks are now on dst_csets */ + tset.csets = &tset.dst_csets; + + /* nothing is sensitive to fork() after this point */ /* * step 4: do subsystem attach callbacks. @@ -1865,30 +1873,27 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, if (css->ss->attach) css->ss->attach(css, &tset); - /* - * step 5: success! and cleanup - */ ret = 0; -out_put_css_set_refs: - if (ret) { - for (i = 0; i < group_size; i++) { - tc = flex_array_get(group, i); - if (!tc->cset) - break; - put_css_set(tc->cset, false); - } - } + goto out_release_tset; + out_cancel_attach: - if (ret) { - for_each_css(css, i, cgrp) { - if (css == failed_css) - break; - if (css->ss->cancel_attach) - css->ss->cancel_attach(css, &tset); - } + for_each_css(css, i, cgrp) { + if (css == failed_css) + break; + if (css->ss->cancel_attach) + css->ss->cancel_attach(css, &tset); } -out_free_group_list: - flex_array_free(group); +out_release_tset: + down_write(&css_set_rwsem); + list_splice_init(&tset.dst_csets, &tset.src_csets); + list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) { + list_splice_init(&cset->mg_tasks, &cset->tasks); + cset->mg_dst_cset = NULL; + cset->mg_src_cgrp = NULL; + list_del_init(&cset->mg_node); + put_css_set_locked(cset, false); + } + up_write(&css_set_rwsem); return ret; } @@ -3895,6 +3900,8 @@ int __init cgroup_init_early(void) atomic_set(&init_css_set.refcount, 1); INIT_LIST_HEAD(&init_css_set.cgrp_links); INIT_LIST_HEAD(&init_css_set.tasks); + INIT_LIST_HEAD(&init_css_set.mg_tasks); + INIT_LIST_HEAD(&init_css_set.mg_node); INIT_HLIST_NODE(&init_css_set.hlist); css_set_count = 1; init_cgroup_root(&cgroup_dummy_root); -- cgit v1.2.3 From ceb6a081f6f52d17ec9e46e271cc26a1eb8a7573 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Feb 2014 10:04:02 -0500 Subject: cgroup: separate out cset_group_from_root() from task_cgroup_from_root() This will be used by the planned migration path update. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5def4a800425..23e3a8c74bd4 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -758,25 +758,15 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) cgroup_free_root(root); } -/* - * Return the cgroup for "task" from the given hierarchy. Must be - * called with cgroup_mutex and css_set_rwsem held. - */ -static struct cgroup *task_cgroup_from_root(struct task_struct *task, +/* look up cgroup associated with given css_set on the specified hierarchy */ +static struct cgroup *cset_cgroup_from_root(struct css_set *cset, struct cgroupfs_root *root) { - struct css_set *cset; struct cgroup *res = NULL; lockdep_assert_held(&cgroup_mutex); lockdep_assert_held(&css_set_rwsem); - /* - * No need to lock the task - since we hold cgroup_mutex the - * task can't change groups, so the only thing that can happen - * is that it exits and its css is set back to init_css_set. - */ - cset = task_css_set(task); if (cset == &init_css_set) { res = &root->top_cgroup; } else { @@ -796,6 +786,21 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, return res; } +/* + * Return the cgroup for "task" from the given hierarchy. Must be + * called with cgroup_mutex and css_set_rwsem held. + */ +static struct cgroup *task_cgroup_from_root(struct task_struct *task, + struct cgroupfs_root *root) +{ + /* + * No need to lock the task - since we hold cgroup_mutex the + * task can't change groups, so the only thing that can happen + * is that it exits and its css is set back to init_css_set. + */ + return cset_cgroup_from_root(task_css_set(task), root); +} + /* * There is one global cgroup mutex. We also require taking * task_lock() when dereferencing a task's cgroup subsys pointers. -- cgit v1.2.3 From 1958d2d53dadbb1c9aaf0b37741f13a60098b243 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Feb 2014 10:04:03 -0500 Subject: cgroup: split process / task migration into four steps Currently, process / task migration is a single operation which may fail depending on memory pressure or the involved controllers' ->can_attach() callbacks. One problem with this approach is migration of multiple targets. It's impossible to tell whether a given target will be successfully migrated beforehand and cgroup core can't keep track of enough states to roll back after intermediate failure. This is already an issue with cgroup_transfer_tasks(). Also, we're gonna need multiple target migration for unified hierarchy. This patch splits migration into four stages - cgroup_migrate_add_src(), cgroup_migrate_prepare_dst(), cgroup_migrate() and cgroup_migrate_finish(), where cgroup_migrate_prepare_dst() performs all the operations which may fail due to allocation failure without actually migrating the target. The four separate stages mean that, disregarding ->can_attach() failures, the success or failure of multi target migration can be determined before performing any actual migration. If preparations of all targets succeed, the whole thing will succeed. If not, the whole operation can fail without any side-effect. Since the previous patch to use css_set->mg_tasks to keep track of migration targets, the only thing which may need memory allocation during migration is the target css_sets. cgroup_migrate_prepare() pins all source and target css_sets and link them up. Note that this can be performed without holding threadgroup_lock even if the target is a process. As long as cgroup_mutex is held, no new css_set can be put into play. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 240 ++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 181 insertions(+), 59 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 23e3a8c74bd4..a93f6f1ebc69 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -644,6 +644,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, INIT_LIST_HEAD(&cset->cgrp_links); INIT_LIST_HEAD(&cset->tasks); INIT_LIST_HEAD(&cset->mg_tasks); + INIT_LIST_HEAD(&cset->mg_preload_node); INIT_LIST_HEAD(&cset->mg_node); INIT_HLIST_NODE(&cset->hlist); @@ -1755,16 +1756,137 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, } /** - * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup - * @cgrp: the cgroup to attach to - * @leader: the task or the leader of the threadgroup to be attached - * @threadgroup: attach the whole threadgroup? + * cgroup_migrate_finish - cleanup after attach + * @preloaded_csets: list of preloaded css_sets * - * Call holding cgroup_mutex and the group_rwsem of the leader. Will take - * task_lock of @tsk or each thread in the threadgroup individually in turn. + * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See + * those functions for details. */ -static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, - bool threadgroup) +static void cgroup_migrate_finish(struct list_head *preloaded_csets) +{ + struct css_set *cset, *tmp_cset; + + lockdep_assert_held(&cgroup_mutex); + + down_write(&css_set_rwsem); + list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { + cset->mg_src_cgrp = NULL; + cset->mg_dst_cset = NULL; + list_del_init(&cset->mg_preload_node); + put_css_set_locked(cset, false); + } + up_write(&css_set_rwsem); +} + +/** + * cgroup_migrate_add_src - add a migration source css_set + * @src_cset: the source css_set to add + * @dst_cgrp: the destination cgroup + * @preloaded_csets: list of preloaded css_sets + * + * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin + * @src_cset and add it to @preloaded_csets, which should later be cleaned + * up by cgroup_migrate_finish(). + * + * This function may be called without holding threadgroup_lock even if the + * target is a process. Threads may be created and destroyed but as long + * as cgroup_mutex is not dropped, no new css_set can be put into play and + * the preloaded css_sets are guaranteed to cover all migrations. + */ +static void cgroup_migrate_add_src(struct css_set *src_cset, + struct cgroup *dst_cgrp, + struct list_head *preloaded_csets) +{ + struct cgroup *src_cgrp; + + lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&css_set_rwsem); + + src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); + + /* nothing to do if this cset already belongs to the cgroup */ + if (src_cgrp == dst_cgrp) + return; + + if (!list_empty(&src_cset->mg_preload_node)) + return; + + WARN_ON(src_cset->mg_src_cgrp); + WARN_ON(!list_empty(&src_cset->mg_tasks)); + WARN_ON(!list_empty(&src_cset->mg_node)); + + src_cset->mg_src_cgrp = src_cgrp; + get_css_set(src_cset); + list_add(&src_cset->mg_preload_node, preloaded_csets); +} + +/** + * cgroup_migrate_prepare_dst - prepare destination css_sets for migration + * @dst_cgrp: the destination cgroup + * @preloaded_csets: list of preloaded source css_sets + * + * Tasks are about to be moved to @dst_cgrp and all the source css_sets + * have been preloaded to @preloaded_csets. This function looks up and + * pins all destination css_sets, links each to its source, and put them on + * @preloaded_csets. + * + * This function must be called after cgroup_migrate_add_src() has been + * called on each migration source css_set. After migration is performed + * using cgroup_migrate(), cgroup_migrate_finish() must be called on + * @preloaded_csets. + */ +static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp, + struct list_head *preloaded_csets) +{ + LIST_HEAD(csets); + struct css_set *src_cset; + + lockdep_assert_held(&cgroup_mutex); + + /* look up the dst cset for each src cset and link it to src */ + list_for_each_entry(src_cset, preloaded_csets, mg_preload_node) { + struct css_set *dst_cset; + + dst_cset = find_css_set(src_cset, dst_cgrp); + if (!dst_cset) + goto err; + + WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); + src_cset->mg_dst_cset = dst_cset; + + if (list_empty(&dst_cset->mg_preload_node)) + list_add(&dst_cset->mg_preload_node, &csets); + else + put_css_set(dst_cset, false); + } + + list_splice(&csets, preloaded_csets); + return 0; +err: + cgroup_migrate_finish(&csets); + return -ENOMEM; +} + +/** + * cgroup_migrate - migrate a process or task to a cgroup + * @cgrp: the destination cgroup + * @leader: the leader of the process or the task to migrate + * @threadgroup: whether @leader points to the whole process or a single task + * + * Migrate a process or task denoted by @leader to @cgrp. If migrating a + * process, the caller must be holding threadgroup_lock of @leader. The + * caller is also responsible for invoking cgroup_migrate_add_src() and + * cgroup_migrate_prepare_dst() on the targets before invoking this + * function and following up with cgroup_migrate_finish(). + * + * As long as a controller's ->can_attach() doesn't fail, this function is + * guaranteed to succeed. This means that, excluding ->can_attach() + * failure, when migrating multiple targets, the success or failure can be + * decided for all targets by invoking group_migrate_prepare_dst() before + * actually starting migrating. + */ +static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, + bool threadgroup) { struct cgroup_taskset tset = { .src_csets = LIST_HEAD_INIT(tset.src_csets), @@ -1785,29 +1907,17 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, rcu_read_lock(); task = leader; do { - struct cgroup *src_cgrp; - /* @task either already exited or can't exit until the end */ if (task->flags & PF_EXITING) goto next; cset = task_css_set(task); - src_cgrp = task_cgroup_from_root(task, cgrp->root); - - /* nothing to do if this task is already in the cgroup */ - if (src_cgrp == cgrp) + if (!cset->mg_src_cgrp) goto next; - if (!cset->mg_src_cgrp) { - WARN_ON(!list_empty(&cset->mg_tasks)); - WARN_ON(!list_empty(&cset->mg_node)); - - cset->mg_src_cgrp = src_cgrp; - list_add(&cset->mg_node, &tset.src_csets); - get_css_set(cset); - } - list_move(&task->cg_list, &cset->mg_tasks); + list_move(&cset->mg_node, &tset.src_csets); + list_move(&cset->mg_dst_cset->mg_node, &tset.dst_csets); next: if (!threadgroup) break; @@ -1819,9 +1929,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, if (list_empty(&tset.src_csets)) return 0; - /* - * step 1: check that we can legitimately attach to the cgroup. - */ + /* check that we can legitimately attach to the cgroup */ for_each_css(css, i, cgrp) { if (css->ss->can_attach) { ret = css->ss->can_attach(css, &tset); @@ -1833,30 +1941,9 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, } /* - * step 2: make sure css_sets exist for all threads to be migrated. - * we use find_css_set, which allocates a new one if necessary. - */ - list_for_each_entry(cset, &tset.src_csets, mg_node) { - struct css_set *dst_cset; - - dst_cset = find_css_set(cset, cgrp); - if (!dst_cset) { - ret = -ENOMEM; - goto out_release_tset; - } - - if (list_empty(&dst_cset->mg_node)) - list_add(&dst_cset->mg_node, &tset.dst_csets); - else - put_css_set(dst_cset, false); - - cset->mg_dst_cset = dst_cset; - } - - /* - * step 3: now that we're guaranteed success wrt the css_sets, - * proceed to move all tasks to the new cgroup. There are no - * failure cases after here, so this is the commit point. + * Now that we're guaranteed success, proceed to move all tasks to + * the new cgroup. There are no failure cases after here, so this + * is the commit point. */ down_write(&css_set_rwsem); list_for_each_entry(cset, &tset.src_csets, mg_node) { @@ -1866,14 +1953,13 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *leader, } up_write(&css_set_rwsem); - /* migration is committed, all target tasks are now on dst_csets */ - tset.csets = &tset.dst_csets; - - /* nothing is sensitive to fork() after this point */ - /* - * step 4: do subsystem attach callbacks. + * Migration is committed, all target tasks are now on dst_csets. + * Nothing is sensitive to fork() after this point. Notify + * controllers that migration is complete. */ + tset.csets = &tset.dst_csets; + for_each_css(css, i, cgrp) if (css->ss->attach) css->ss->attach(css, &tset); @@ -1893,15 +1979,50 @@ out_release_tset: list_splice_init(&tset.dst_csets, &tset.src_csets); list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) { list_splice_init(&cset->mg_tasks, &cset->tasks); - cset->mg_dst_cset = NULL; - cset->mg_src_cgrp = NULL; list_del_init(&cset->mg_node); - put_css_set_locked(cset, false); } up_write(&css_set_rwsem); return ret; } +/** + * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup + * @dst_cgrp: the cgroup to attach to + * @leader: the task or the leader of the threadgroup to be attached + * @threadgroup: attach the whole threadgroup? + * + * Call holding cgroup_mutex and the group_rwsem of the leader. Will take + * task_lock of @tsk or each thread in the threadgroup individually in turn. + */ +static int cgroup_attach_task(struct cgroup *dst_cgrp, + struct task_struct *leader, bool threadgroup) +{ + LIST_HEAD(preloaded_csets); + struct task_struct *task; + int ret; + + /* look up all src csets */ + down_read(&css_set_rwsem); + rcu_read_lock(); + task = leader; + do { + cgroup_migrate_add_src(task_css_set(task), dst_cgrp, + &preloaded_csets); + if (!threadgroup) + break; + } while_each_thread(leader, task); + rcu_read_unlock(); + up_read(&css_set_rwsem); + + /* prepare dst csets and commit */ + ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets); + if (!ret) + ret = cgroup_migrate(dst_cgrp, leader, threadgroup); + + cgroup_migrate_finish(&preloaded_csets); + return ret; +} + /* * Find the task_struct of the task to attach by vpid and pass it along to the * function to attach either it or all tasks in its threadgroup. Will lock @@ -3906,6 +4027,7 @@ int __init cgroup_init_early(void) INIT_LIST_HEAD(&init_css_set.cgrp_links); INIT_LIST_HEAD(&init_css_set.tasks); INIT_LIST_HEAD(&init_css_set.mg_tasks); + INIT_LIST_HEAD(&init_css_set.mg_preload_node); INIT_LIST_HEAD(&init_css_set.mg_node); INIT_HLIST_NODE(&init_css_set.hlist); css_set_count = 1; -- cgit v1.2.3 From eaf797abc53b0ab3f0a02d4ef873a565fcce6daa Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Feb 2014 10:04:03 -0500 Subject: cgroup: update how a newly forked task gets associated with css_set When a new process is forked, cgroup_fork() associates it with the css_set of its parent but doesn't link it into it. After the new process is linked to tasklist, cgroup_post_fork() does the linking. This is problematic for cgroup_transfer_tasks() as there's no way to tell whether there are tasks which are pointing to a css_set but not linked yet. It is impossible to implement an operation which transfer all tasks of a cgroup to another and the current cgroup_transfer_tasks() can easily be tricked into leaving a newly forked process behind if it gets called between cgroup_fork() and cgroup_post_fork(). Let's make association with a css_set and linking atomic by moving it to cgroup_post_fork(). cgroup_fork() sets child->cgroups to init_css_set as a placeholder and cgroup_post_fork() is updated to perform both the association with the parent's cgroup and linking there. This means that a newly created task will point to init_css_set without holding a ref to it much like what it does on the exit path. Empty cg_list is used to indicate that the task isn't holding a ref to the associated css_set. This fixes an actual bug with cgroup_transfer_tasks(); however, I'm not marking it for -stable. The whole thing is broken in multiple other ways which require invasive updates to fix and I don't think it's worthwhile to bother with backporting this particular one. Fortunately, the only user is cpuset and these bugs don't crash the machine. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 86 ++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 55 insertions(+), 31 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a93f6f1ebc69..fa0567f4eedd 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1342,8 +1342,12 @@ static void cgroup_enable_task_cg_lists(void) * racing against cgroup_exit(). */ spin_lock_irq(&p->sighand->siglock); - if (!(p->flags & PF_EXITING)) - list_add(&p->cg_list, &task_css_set(p)->tasks); + if (!(p->flags & PF_EXITING)) { + struct css_set *cset = task_css_set(p); + + list_add(&p->cg_list, &cset->tasks); + get_css_set(cset); + } spin_unlock_irq(&p->sighand->siglock); task_unlock(p); @@ -1911,6 +1915,10 @@ static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, if (task->flags & PF_EXITING) goto next; + /* leave @task alone if post_fork() hasn't linked it yet */ + if (list_empty(&task->cg_list)) + goto next; + cset = task_css_set(task); if (!cset->mg_src_cgrp) goto next; @@ -2815,6 +2823,12 @@ void css_task_iter_end(struct css_task_iter *it) * cgroup_trasnsfer_tasks - move tasks from one cgroup to another * @to: cgroup to which the tasks will be moved * @from: cgroup in which the tasks currently reside + * + * Locking rules between cgroup_post_fork() and the migration path + * guarantee that, if a task is forking while being migrated, the new child + * is guaranteed to be either visible in the source cgroup after the + * parent's migration is complete or put into the target cgroup. No task + * can slip out of migration through forking. */ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) { @@ -4243,27 +4257,16 @@ static const struct file_operations proc_cgroupstats_operations = { }; /** - * cgroup_fork - attach newly forked task to its parents cgroup. + * cgroup_fork - initialize cgroup related fields during copy_process() * @child: pointer to task_struct of forking parent process. * - * Description: A task inherits its parent's cgroup at fork(). - * - * A pointer to the shared css_set was automatically copied in - * fork.c by dup_task_struct(). However, we ignore that copy, since - * it was not made under the protection of RCU or cgroup_mutex, so - * might no longer be a valid cgroup pointer. cgroup_attach_task() might - * have already changed current->cgroups, allowing the previously - * referenced cgroup group to be removed and freed. - * - * At the point that cgroup_fork() is called, 'current' is the parent - * task, and the passed argument 'child' points to the child task. + * A task is associated with the init_css_set until cgroup_post_fork() + * attaches it to the parent's css_set. Empty cg_list indicates that + * @child isn't holding reference to its css_set. */ void cgroup_fork(struct task_struct *child) { - task_lock(current); - get_css_set(task_css_set(current)); - child->cgroups = current->cgroups; - task_unlock(current); + RCU_INIT_POINTER(child->cgroups, &init_css_set); INIT_LIST_HEAD(&child->cg_list); } @@ -4283,21 +4286,38 @@ void cgroup_post_fork(struct task_struct *child) int i; /* - * use_task_css_set_links is set to 1 before we walk the tasklist - * under the tasklist_lock and we read it here after we added the child - * to the tasklist under the tasklist_lock as well. If the child wasn't - * yet in the tasklist when we walked through it from - * cgroup_enable_task_cg_lists(), then use_task_css_set_links value - * should be visible now due to the paired locking and barriers implied - * by LOCK/UNLOCK: it is written before the tasklist_lock unlock - * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock - * lock on fork. + * This may race against cgroup_enable_task_cg_links(). As that + * function sets use_task_css_set_links before grabbing + * tasklist_lock and we just went through tasklist_lock to add + * @child, it's guaranteed that either we see the set + * use_task_css_set_links or cgroup_enable_task_cg_lists() sees + * @child during its iteration. + * + * If we won the race, @child is associated with %current's + * css_set. Grabbing css_set_rwsem guarantees both that the + * association is stable, and, on completion of the parent's + * migration, @child is visible in the source of migration or + * already in the destination cgroup. This guarantee is necessary + * when implementing operations which need to migrate all tasks of + * a cgroup to another. + * + * Note that if we lose to cgroup_enable_task_cg_links(), @child + * will remain in init_css_set. This is safe because all tasks are + * in the init_css_set before cg_links is enabled and there's no + * operation which transfers all tasks out of init_css_set. */ if (use_task_css_set_links) { + struct css_set *cset; + down_write(&css_set_rwsem); + cset = task_css_set_check(current, + lockdep_is_held(&css_set_rwsem)); task_lock(child); - if (list_empty(&child->cg_list)) - list_add(&child->cg_list, &task_css_set(child)->tasks); + if (list_empty(&child->cg_list)) { + rcu_assign_pointer(child->cgroups, cset); + list_add(&child->cg_list, &cset->tasks); + get_css_set(cset); + } task_unlock(child); up_write(&css_set_rwsem); } @@ -4353,6 +4373,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) { struct cgroup_subsys *ss; struct css_set *cset; + bool put_cset = false; int i; /* @@ -4361,8 +4382,10 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) */ if (!list_empty(&tsk->cg_list)) { down_write(&css_set_rwsem); - if (!list_empty(&tsk->cg_list)) + if (!list_empty(&tsk->cg_list)) { list_del_init(&tsk->cg_list); + put_cset = true; + } up_write(&css_set_rwsem); } @@ -4384,7 +4407,8 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) } task_unlock(tsk); - put_css_set(cset, true); + if (put_cset) + put_css_set(cset, true); } static void check_for_release(struct cgroup *cgrp) -- cgit v1.2.3 From 0e1d768f1b1873272ec4e8dc1482bb5281855017 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Feb 2014 10:04:03 -0500 Subject: cgroup: drop task_lock() protection around task->cgroups For optimization, task_lock() is additionally used to protect task->cgroups. The optimization is pretty dubious as either css_set_rwsem is grabbed anyway or PF_EXITING already protects task->cgroups. It adds only overhead and confusion at this point. Let's drop task_[un]lock() and update comments accordingly. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 97 ++++++++++++++------------------------------------------- 1 file changed, 24 insertions(+), 73 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fa0567f4eedd..f783af900208 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -80,12 +80,21 @@ static DEFINE_MUTEX(cgroup_tree_mutex); /* * cgroup_mutex is the master lock. Any modification to cgroup or its * hierarchy must be performed while holding it. + * + * css_set_rwsem protects task->cgroups pointer, the list of css_set + * objects, and the chain of tasks off each css_set. + * + * These locks are exported if CONFIG_PROVE_RCU so that accessors in + * cgroup.h can use them for lockdep annotations. */ #ifdef CONFIG_PROVE_RCU DEFINE_MUTEX(cgroup_mutex); -EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for lockdep */ +DECLARE_RWSEM(css_set_rwsem); +EXPORT_SYMBOL_GPL(cgroup_mutex); +EXPORT_SYMBOL_GPL(css_set_rwsem); #else static DEFINE_MUTEX(cgroup_mutex); +static DECLARE_RWSEM(css_set_rwsem); #endif /* @@ -338,12 +347,6 @@ struct cgrp_cset_link { static struct css_set init_css_set; static struct cgrp_cset_link init_cgrp_cset_link; - -/* - * css_set_rwsem protects the list of css_set objects, and the chain of - * tasks off each css_set. - */ -static DECLARE_RWSEM(css_set_rwsem); static int css_set_count; /* @@ -803,10 +806,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, } /* - * There is one global cgroup mutex. We also require taking - * task_lock() when dereferencing a task's cgroup subsys pointers. - * See "The task_lock() exception", at the end of this comment. - * * A task must hold cgroup_mutex to modify cgroups. * * Any task can increment and decrement the count field without lock. @@ -836,18 +835,6 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, * always has either children cgroups and/or using tasks. So we don't * need a special hack to ensure that top_cgroup cannot be deleted. * - * The task_lock() exception - * - * The need for this exception arises from the action of - * cgroup_attach_task(), which overwrites one task's cgroup pointer with - * another. It does so using cgroup_mutex, however there are - * several performance critical places that need to reference - * task->cgroup without the expense of grabbing a system global - * mutex. Therefore except as noted below, when dereferencing or, as - * in cgroup_attach_task(), modifying a task's cgroup pointer we use - * task_lock(), which acts on a spinlock (task->alloc_lock) already in - * the task_struct routinely used for such matters. - * * P.S. One more locking exception. RCU is used to guard the * update of a tasks cgroup pointer by cgroup_attach_task() */ @@ -1329,8 +1316,6 @@ static void cgroup_enable_task_cg_lists(void) */ read_lock(&tasklist_lock); do_each_thread(g, p) { - task_lock(p); - WARN_ON_ONCE(!list_empty(&p->cg_list) || task_css_set(p) != &init_css_set); @@ -1349,8 +1334,6 @@ static void cgroup_enable_task_cg_lists(void) get_css_set(cset); } spin_unlock_irq(&p->sighand->siglock); - - task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); out_unlock: @@ -1743,11 +1726,7 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, old_cset = task_css_set(tsk); get_css_set(new_cset); - - task_lock(tsk); rcu_assign_pointer(tsk->cgroups, new_cset); - task_unlock(tsk); - list_move(&tsk->cg_list, &new_cset->mg_tasks); /* @@ -1999,8 +1978,7 @@ out_release_tset: * @leader: the task or the leader of the threadgroup to be attached * @threadgroup: attach the whole threadgroup? * - * Call holding cgroup_mutex and the group_rwsem of the leader. Will take - * task_lock of @tsk or each thread in the threadgroup individually in turn. + * Call holding cgroup_mutex and threadgroup_lock of @leader. */ static int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, bool threadgroup) @@ -2034,7 +2012,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp, /* * Find the task_struct of the task to attach by vpid and pass it along to the * function to attach either it or all tasks in its threadgroup. Will lock - * cgroup_mutex and threadgroup; may take task_lock of task. + * cgroup_mutex and threadgroup. */ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) { @@ -4155,12 +4133,6 @@ core_initcall(cgroup_wq_init); * proc_cgroup_show() * - Print task's cgroup paths into seq_file, one line for each hierarchy * - Used for /proc//cgroup. - * - No need to task_lock(tsk) on this tsk->cgroup reference, as it - * doesn't really matter if tsk->cgroup changes after we read it, - * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it - * anyway. No need to check that tsk->cgroup != NULL, thanks to - * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks - * cgroup to top_cgroup. */ /* TODO: Use a proper seq_file iterator */ @@ -4310,15 +4282,12 @@ void cgroup_post_fork(struct task_struct *child) struct css_set *cset; down_write(&css_set_rwsem); - cset = task_css_set_check(current, - lockdep_is_held(&css_set_rwsem)); - task_lock(child); + cset = task_css_set(current); if (list_empty(&child->cg_list)) { rcu_assign_pointer(child->cgroups, cset); list_add(&child->cg_list, &cset->tasks); get_css_set(cset); } - task_unlock(child); up_write(&css_set_rwsem); } @@ -4347,27 +4316,13 @@ void cgroup_post_fork(struct task_struct *child) * use notify_on_release cgroups where very high task exit scaling * is required on large systems. * - * the_top_cgroup_hack: - * - * Set the exiting tasks cgroup to the root cgroup (top_cgroup). - * - * We call cgroup_exit() while the task is still competent to - * handle notify_on_release(), then leave the task attached to the - * root cgroup in each hierarchy for the remainder of its exit. - * - * To do this properly, we would increment the reference count on - * top_cgroup, and near the very end of the kernel/exit.c do_exit() - * code we would add a second cgroup function call, to drop that - * reference. This would just create an unnecessary hot spot on - * the top_cgroup reference count, to no avail. - * - * Normally, holding a reference to a cgroup without bumping its - * count is unsafe. The cgroup could go away, or someone could - * attach us to a different cgroup, decrementing the count on - * the first cgroup that we never incremented. But in this case, - * top_cgroup isn't going away, and either task has PF_EXITING set, - * which wards off any cgroup_attach_task() attempts, or task is a failed - * fork, never visible to cgroup_attach_task. + * We set the exiting tasks cgroup to the root cgroup (top_cgroup). We + * call cgroup_exit() while the task is still competent to handle + * notify_on_release(), then leave the task attached to the root cgroup in + * each hierarchy for the remainder of its exit. No need to bother with + * init_css_set refcnting. init_css_set never goes away and we can't race + * with migration path - either PF_EXITING is visible to migration path or + * @tsk never got on the tasklist. */ void cgroup_exit(struct task_struct *tsk, int run_callbacks) { @@ -4377,20 +4332,17 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) int i; /* - * Unlink from the css_set task list if necessary. Optimistically - * check cg_list before taking css_set_rwsem. + * Unlink from @tsk from its css_set. As migration path can't race + * with us, we can check cg_list without grabbing css_set_rwsem. */ if (!list_empty(&tsk->cg_list)) { down_write(&css_set_rwsem); - if (!list_empty(&tsk->cg_list)) { - list_del_init(&tsk->cg_list); - put_cset = true; - } + list_del_init(&tsk->cg_list); up_write(&css_set_rwsem); + put_cset = true; } /* Reassign the task to the init_css_set. */ - task_lock(tsk); cset = task_css_set(tsk); RCU_INIT_POINTER(tsk->cgroups, &init_css_set); @@ -4405,7 +4357,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) } } } - task_unlock(tsk); if (put_cset) put_css_set(cset, true); -- cgit v1.2.3 From 952aaa125428fae883670a2c2e40ea8044ca1eaa Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 25 Feb 2014 10:04:03 -0500 Subject: cgroup: update cgroup_transfer_tasks() to either succeed or fail cgroup_transfer_tasks() can currently fail in the middle due to memory allocation failure. When that happens, the function just aborts and returns error code and there's no way to tell how many actually got migrated at the point of failure and or to revert the partial migration. Update it to use cgroup_migrate{_add_src|prepare_dst|migrate|finish}() so that the function either succeeds or fails as a whole as long as ->can_attach() doesn't fail. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index f783af900208..306ad0ed19ef 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2810,10 +2810,28 @@ void css_task_iter_end(struct css_task_iter *it) */ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) { + LIST_HEAD(preloaded_csets); + struct cgrp_cset_link *link; struct css_task_iter it; struct task_struct *task; - int ret = 0; + int ret; + + mutex_lock(&cgroup_mutex); + + /* all tasks in @from are being moved, all csets are source */ + down_read(&css_set_rwsem); + list_for_each_entry(link, &from->cset_links, cset_link) + cgroup_migrate_add_src(link->cset, to, &preloaded_csets); + up_read(&css_set_rwsem); + ret = cgroup_migrate_prepare_dst(to, &preloaded_csets); + if (ret) + goto out_err; + + /* + * Migrate tasks one-by-one until @form is empty. This fails iff + * ->can_attach() fails. + */ do { css_task_iter_start(&from->dummy_css, &it); task = css_task_iter_next(&it); @@ -2822,13 +2840,13 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) css_task_iter_end(&it); if (task) { - mutex_lock(&cgroup_mutex); - ret = cgroup_attach_task(to, task, false); - mutex_unlock(&cgroup_mutex); + ret = cgroup_migrate(to, task, false); put_task_struct(task); } } while (task && !ret); - +out_err: + cgroup_migrate_finish(&preloaded_csets); + mutex_unlock(&cgroup_mutex); return ret; } -- cgit v1.2.3 From a60bed296ac67b9e2765646dec8e36e3b4d7c395 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 12 Feb 2014 16:07:59 -0500 Subject: cgroup_freezer: document freezer_fork() subtleties cgroup_subsys->fork() callback is special in that it's called outside the usual cgroup locking and may race with on-going migration. freezer_fork() currently doesn't consider such race condition; however, it is still correct thanks to the fact that freeze_task() may be called spuriously. This is quite subtle. Let's explain what's going on and add test to detect racing and losing to task migration and skip freeze_task() in such cases for documentation. This doesn't make any behavior difference meaningful to userland. Signed-off-by: Tejun Heo Cc: Li Zefan Cc: "Rafael J. Wysocki" --- kernel/cgroup_freezer.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 7201a637c405..2ea98b216bff 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -214,6 +214,16 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, } } +/** + * freezer_fork - cgroup post fork callback + * @task: a task which has just been forked + * + * @task has just been created and should conform to the current state of + * the cgroup_freezer it belongs to. This function may race against + * freezer_attach(). Losing to freezer_attach() means that we don't have + * to do anything as freezer_attach() will put @task into the appropriate + * state. + */ static void freezer_fork(struct task_struct *task) { struct freezer *freezer; @@ -222,14 +232,26 @@ static void freezer_fork(struct task_struct *task) freezer = task_freezer(task); /* - * The root cgroup is non-freezable, so we can skip the - * following check. + * The root cgroup is non-freezable, so we can skip locking the + * freezer. This is safe regardless of race with task migration. + * If we didn't race or won, skipping is obviously the right thing + * to do. If we lost and root is the new cgroup, noop is still the + * right thing to do. */ if (!parent_freezer(freezer)) goto out; + /* + * Grab @freezer->lock and freeze @task after verifying @task still + * belongs to @freezer and it's freezing. The former is for the + * case where we have raced against task migration and lost and + * @task is already in a different cgroup which may not be frozen. + * This isn't strictly necessary as freeze_task() is allowed to be + * called spuriously but let's do it anyway for, if nothing else, + * documentation. + */ spin_lock_irq(&freezer->lock); - if (freezer->state & CGROUP_FREEZING) + if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING)) freeze_task(task); spin_unlock_irq(&freezer->lock); out: -- cgit v1.2.3 From 864f32a52b53341c54a25953afb5b66ed79a7f76 Mon Sep 17 00:00:00 2001 From: Rashika Kheria Date: Thu, 27 Feb 2014 17:50:19 +0530 Subject: kernel: Mark function as static in kernel/seccomp.c Mark function as static in kernel/seccomp.c because it is not used outside this file. This eliminates the following warning in kernel/seccomp.c: kernel/seccomp.c:296:6: warning: no previous prototype for ?seccomp_attach_user_filter? [-Wmissing-prototypes] Signed-off-by: Rashika Kheria Reviewed-by: Josh Triplett Acked-by: Kees Cook Acked-by: Will Drewry Signed-off-by: James Morris --- kernel/seccomp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/seccomp.c b/kernel/seccomp.c index b7a10048a32c..0e004a70f63a 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -293,7 +293,7 @@ fail: * * Returns 0 on success and non-zero otherwise. */ -long seccomp_attach_user_filter(char __user *user_filter) +static long seccomp_attach_user_filter(char __user *user_filter) { struct sock_fprog fprog; long ret = -EFAULT; -- cgit v1.2.3 From b8dadcb58d542ecbf1d3dae5fefcd3fd8cb26539 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 3 Mar 2014 17:28:36 -0500 Subject: cpuset: use rcu_read_lock() to protect task_cs() We no longer use task_lock() to protect tsk->cgroups. Reported-by: Fengguang Wu Signed-off-by: Li Zefan Signed-off-by: Tejun Heo --- kernel/cpuset.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index d8bec21d7a11..8d5324583aa4 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2239,10 +2239,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) struct cpuset *cpus_cs; mutex_lock(&callback_mutex); - task_lock(tsk); + rcu_read_lock(); cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); guarantee_online_cpus(cpus_cs, pmask); - task_unlock(tsk); + rcu_read_unlock(); mutex_unlock(&callback_mutex); } @@ -2295,10 +2295,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) nodemask_t mask; mutex_lock(&callback_mutex); - task_lock(tsk); + rcu_read_lock(); mems_cs = effective_nodemask_cpuset(task_cs(tsk)); guarantee_online_mems(mems_cs, &mask); - task_unlock(tsk); + rcu_read_unlock(); mutex_unlock(&callback_mutex); return mask; @@ -2414,9 +2414,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) /* Not hardwall and node outside mems_allowed: scan up cpusets */ mutex_lock(&callback_mutex); - task_lock(current); + rcu_read_lock(); cs = nearest_hardwall_ancestor(task_cs(current)); - task_unlock(current); + rcu_read_unlock(); allowed = node_isset(node, cs->mems_allowed); mutex_unlock(&callback_mutex); @@ -2543,24 +2543,26 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, * @task: pointer to task_struct of some task. * * Description: Prints @task's name, cpuset name, and cached copy of its - * mems_allowed to the kernel log. Must hold task_lock(task) to allow - * dereferencing task_cs(task). + * mems_allowed to the kernel log. */ void cpuset_print_task_mems_allowed(struct task_struct *tsk) { /* Statically allocated to prevent using excess stack. */ static char cpuset_nodelist[CPUSET_NODELIST_LEN]; static DEFINE_SPINLOCK(cpuset_buffer_lock); - struct cgroup *cgrp = task_cs(tsk)->css.cgroup; + struct cgroup *cgrp; spin_lock(&cpuset_buffer_lock); + rcu_read_lock(); + cgrp = task_cs(tsk)->css.cgroup; nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, tsk->mems_allowed); printk(KERN_INFO "%s cpuset=", tsk->comm); pr_cont_cgroup_name(cgrp); pr_cont(" mems_allowed=%s\n", cpuset_nodelist); + rcu_read_unlock(); spin_unlock(&cpuset_buffer_lock); } @@ -2592,9 +2594,9 @@ int cpuset_memory_pressure_enabled __read_mostly; void __cpuset_memory_pressure_bump(void) { - task_lock(current); + rcu_read_lock(); fmeter_markevent(&task_cs(current)->fmeter); - task_unlock(current); + rcu_read_unlock(); } #ifdef CONFIG_PROC_PID_CPUSET -- cgit v1.2.3 From 7dec935a3aa04412cba2cebe1524ae0d34a30c24 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 26 Feb 2014 10:54:36 -0500 Subject: tracepoint: Do not waste memory on mods with no tracepoints No reason to allocate tp_module structures for modules that have no tracepoints. This just wastes memory. Fixes: b75ef8b44b1c "Tracepoint: Dissociate from module mutex" Acked-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- kernel/tracepoint.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 29f26540e9c9..0d4ef26574ff 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -636,6 +636,9 @@ static int tracepoint_module_coming(struct module *mod) struct tp_module *tp_mod, *iter; int ret = 0; + if (!mod->num_tracepoints) + return 0; + /* * We skip modules that taint the kernel, especially those with different * module headers (for forced load), to make sure we don't cause a crash. @@ -679,6 +682,9 @@ static int tracepoint_module_going(struct module *mod) { struct tp_module *pos; + if (!mod->num_tracepoints) + return 0; + mutex_lock(&tracepoints_mutex); tracepoint_update_probe_range(mod->tracepoints_ptrs, mod->tracepoints_ptrs + mod->num_tracepoints); -- cgit v1.2.3 From 1d6bae966e90134bcfd7807b8f9488d55198de91 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 9 Aug 2012 19:16:14 -0400 Subject: tracing: Move raw output code from macro to standalone function The code for trace events to format the raw recorded event data into human readable format in the 'trace' file is repeated for every event in the system. When you have over 500 events, this can add up quite a bit. By making helper functions in the core kernel to do the work instead, we can shrink the size of the kernel down a bit. With a kernel configured with 502 events, the change in size was: text data bss dec hex filename 12991007 1913568 9785344 24689919 178bcff /tmp/vmlinux.orig 12990946 1913568 9785344 24689858 178bcc2 /tmp/vmlinux.patched Note, this version does not save as much as the version of this patch I had a few years ago. That is because in the mean time, commit f71130de5c7f ("tracing: Add a helper function for event print functions") did a lot of the work my original patch did. But this change helps slightly, and is part of a larger clean up to reduce the size much further. Link: http://lkml.kernel.org/r/20120810034707.378538034@goodmis.org Cc: Li Zefan Signed-off-by: Steven Rostedt --- kernel/trace/trace_output.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ed32284fbe32..ca0e79e2abaa 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -439,6 +439,37 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, } EXPORT_SYMBOL(ftrace_raw_output_prep); +static int ftrace_output_raw(struct trace_iterator *iter, char *name, + char *fmt, va_list ap) +{ + struct trace_seq *s = &iter->seq; + int ret; + + ret = trace_seq_printf(s, "%s: ", name); + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + ret = trace_seq_vprintf(s, fmt, ap); + + if (!ret) + return TRACE_TYPE_PARTIAL_LINE; + + return TRACE_TYPE_HANDLED; +} + +int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = ftrace_output_raw(iter, name, fmt, ap); + va_end(ap); + + return ret; +} +EXPORT_SYMBOL_GPL(ftrace_output_call); + #ifdef CONFIG_KRETPROBES static inline const char *kretprobed(const char *name) { -- cgit v1.2.3 From 35bb4399bd0ef16b8a57fccea0047d98b6b0e7fb Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 9 Aug 2012 22:26:46 -0400 Subject: tracing: Move event storage for array from macro to standalone function The code that shows array fields for events is defined for all events. This can add up quite a bit when you have over 500 events. By making helper functions in the core kernel to do the work instead, we can shrink the size of the kernel down a bit. With a kernel configured with 502 events, the change in size was: text data bss dec hex filename 12990946 1913568 9785344 24689858 178bcc2 /tmp/vmlinux 12987390 1913504 9785344 24686238 178ae9e /tmp/vmlinux.patched That's a total of 3556 bytes, which comes down to 7 bytes per event. Although it's not much, this code is just called at initialization of the events. Link: http://lkml.kernel.org/r/20120810034708.084036335@goodmis.org Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 6 ------ kernel/trace/trace_export.c | 12 ++++-------- kernel/trace/trace_output.c | 21 +++++++++++++++++++++ 3 files changed, 25 insertions(+), 14 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4eccb5..22826c73a9da 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -27,12 +27,6 @@ DEFINE_MUTEX(event_mutex); -DEFINE_MUTEX(event_storage_mutex); -EXPORT_SYMBOL_GPL(event_storage_mutex); - -char event_storage[EVENT_STORAGE_SIZE]; -EXPORT_SYMBOL_GPL(event_storage); - LIST_HEAD(ftrace_events); static LIST_HEAD(ftrace_common_fields); diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 7c3e3e72e2b6..39c746c5ae73 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -96,14 +96,10 @@ static void __always_unused ____ftrace_check_##name(void) \ #define __array(type, item, len) \ do { \ BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ - mutex_lock(&event_storage_mutex); \ - snprintf(event_storage, sizeof(event_storage), \ - "%s[%d]", #type, len); \ - ret = trace_define_field(event_call, event_storage, #item, \ - offsetof(typeof(field), item), \ - sizeof(field.item), \ - is_signed_type(type), filter_type); \ - mutex_unlock(&event_storage_mutex); \ + ret = ftrace_event_define_field(event_call, #type, len, \ + #item, offsetof(typeof(field), item), \ + sizeof(field.item), \ + is_signed_type(type), filter_type); \ if (ret) \ return ret; \ } while (0); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ca0e79e2abaa..ee8d74840b88 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -20,6 +20,10 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static int next_event_type = __TRACE_LAST_TYPE + 1; +#define EVENT_STORAGE_SIZE 128 +static DEFINE_MUTEX(event_storage_mutex); +static char event_storage[EVENT_STORAGE_SIZE]; + int trace_print_seq(struct seq_file *m, struct trace_seq *s) { int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; @@ -470,6 +474,23 @@ int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) } EXPORT_SYMBOL_GPL(ftrace_output_call); +int ftrace_event_define_field(struct ftrace_event_call *call, + char *type, int len, char *item, int offset, + int field_size, int sign, int filter) +{ + int ret; + + mutex_lock(&event_storage_mutex); + snprintf(event_storage, sizeof(event_storage), + "%s[%d]", type, len); + ret = trace_define_field(call, event_storage, item, offset, + field_size, sign, filter); + mutex_unlock(&event_storage_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(ftrace_event_define_field); + #ifdef CONFIG_KRETPROBES static inline const char *kretprobed(const char *name) { -- cgit v1.2.3 From 3fd40d1ee6a317523172ab95b6f7ea41ba8fcee3 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 9 Aug 2012 22:42:57 -0400 Subject: tracing: Use helper functions in event assignment to shrink macro size The functions that assign the contents for the ftrace events are defined by the TRACE_EVENT() macros. Each event has its own unique way to assign data to its buffer. When you have over 500 events, that means there's 500 functions assigning data uniquely for each event (not really that many, as DECLARE_EVENT_CLASS() and multiple DEFINE_EVENT()s will only need a single function). By making helper functions in the core kernel to do some of the work instead, we can shrink the size of the kernel down a bit. With a kernel configured with 502 events, the change in size was: text data bss dec hex filename 12987390 1913504 9785344 24686238 178ae9e /tmp/vmlinux 12959102 1913504 9785344 24657950 178401e /tmp/vmlinux.patched That's a total of 28288 bytes, which comes down to 56 bytes per event. Link: http://lkml.kernel.org/r/20120810034708.370808175@goodmis.org Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 22826c73a9da..b8f73b333a3c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -188,6 +188,36 @@ int trace_event_raw_init(struct ftrace_event_call *call) } EXPORT_SYMBOL_GPL(trace_event_raw_init); +void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, + struct ftrace_event_file *ftrace_file, + unsigned long len) +{ + struct ftrace_event_call *event_call = ftrace_file->event_call; + + local_save_flags(fbuffer->flags); + fbuffer->pc = preempt_count(); + fbuffer->ftrace_file = ftrace_file; + + fbuffer->event = + trace_event_buffer_lock_reserve(&fbuffer->buffer, ftrace_file, + event_call->event.type, len, + fbuffer->flags, fbuffer->pc); + if (!fbuffer->event) + return NULL; + + fbuffer->entry = ring_buffer_event_data(fbuffer->event); + return fbuffer->entry; +} +EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); + +void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) +{ + event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, + fbuffer->event, fbuffer->entry, + fbuffer->flags, fbuffer->pc); +} +EXPORT_SYMBOL_GPL(ftrace_event_buffer_commit); + int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type, void *data) { -- cgit v1.2.3 From b196e2b9e262be01737dc2bbf9e3c7c87340fa4d Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 13 Feb 2014 15:45:07 -0500 Subject: tracing: Warn if a tracepoint is not set via debugfs Tracepoints were made to allow enabling a tracepoint in a module before that module was loaded. When a tracepoint is enabled and it does not exist, the name is stored and will be enabled when the tracepoint is created. The problem with this approach is that when a tracepoint is enabled when it expects to be there, it gives no warning that it does not exist. To add salt to the wound, if a module is added and sets the FORCED flag, which can happen if it isn't signed properly, the tracepoint code will not enabled the tracepoints, but they will be created in the debugfs system! When a user goes to enable the tracepoint, the tracepoint code will not see it existing and will think it is to be enabled later AND WILL NOT GIVE A WARNING. The tracing will look like it succeeded but will actually be doing nothing. This will cause lots of confusion and headaches for developers trying to figure out why they are not seeing their tracepoints. Link: http://lkml.kernel.org/r/20140213154507.4040fb06@gandalf.local.home Reported-by: Mathieu Desnoyers Reported-by: Johannes Berg Signed-off-by: Steven Rostedt --- kernel/tracepoint.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 0d4ef26574ff..0058f33d05c1 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -62,6 +62,7 @@ struct tracepoint_entry { struct hlist_node hlist; struct tracepoint_func *funcs; int refcount; /* Number of times armed. 0 if disarmed. */ + int enabled; /* Tracepoint enabled */ char name[0]; }; @@ -237,6 +238,7 @@ static struct tracepoint_entry *add_tracepoint(const char *name) memcpy(&e->name[0], name, name_len); e->funcs = NULL; e->refcount = 0; + e->enabled = 0; hlist_add_head(&e->hlist, head); return e; } @@ -316,6 +318,7 @@ static void tracepoint_update_probe_range(struct tracepoint * const *begin, if (mark_entry) { set_tracepoint(&mark_entry, *iter, !!mark_entry->refcount); + mark_entry->enabled = !!mark_entry->refcount; } else { disable_tracepoint(*iter); } @@ -380,6 +383,8 @@ tracepoint_add_probe(const char *name, void *probe, void *data) int tracepoint_probe_register(const char *name, void *probe, void *data) { struct tracepoint_func *old; + struct tracepoint_entry *entry; + int ret = 0; mutex_lock(&tracepoints_mutex); old = tracepoint_add_probe(name, probe, data); @@ -388,9 +393,13 @@ int tracepoint_probe_register(const char *name, void *probe, void *data) return PTR_ERR(old); } tracepoint_update_probes(); /* may update entry */ + entry = get_tracepoint(name); + /* Make sure the entry was enabled */ + if (!entry || !entry->enabled) + ret = -ENODEV; mutex_unlock(&tracepoints_mutex); release_probes(old); - return 0; + return ret; } EXPORT_SYMBOL_GPL(tracepoint_probe_register); -- cgit v1.2.3 From 1dc43cf0be9a94a6a7273db284152db15c526106 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 24 Feb 2014 19:59:56 +0100 Subject: ftrace: Cleanup of global variables ftrace_new_pgs and ftrace_update_cnt Some of them can be local to functions, so make them local and pass them as parameters where needed: * __start_mcount_loc+__stop_mcount_loc are local to ftrace_init * ftrace_new_pgs -> new_pgs/start_pg * ftrace_update_cnt -> local update_cnt in ftrace_update_code Link: http://lkml.kernel.org/r/1393268401-24379-1-git-send-email-jslaby@suse.cz Signed-off-by: Jiri Slaby Cc: Frederic Weisbecker Cc: Ingo Molnar Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5313c1100d30..3f95bbeb8e8d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1174,8 +1174,6 @@ struct ftrace_page { int size; }; -static struct ftrace_page *ftrace_new_pgs; - #define ENTRY_SIZE sizeof(struct dyn_ftrace) #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) @@ -2246,7 +2244,6 @@ static void ftrace_shutdown_sysctl(void) } static cycle_t ftrace_update_time; -static unsigned long ftrace_update_cnt; unsigned long ftrace_update_tot_cnt; static inline int ops_traces_mod(struct ftrace_ops *ops) @@ -2302,11 +2299,12 @@ static int referenced_filters(struct dyn_ftrace *rec) return cnt; } -static int ftrace_update_code(struct module *mod) +static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) { struct ftrace_page *pg; struct dyn_ftrace *p; cycle_t start, stop; + unsigned long update_cnt = 0; unsigned long ref = 0; bool test = false; int i; @@ -2332,9 +2330,8 @@ static int ftrace_update_code(struct module *mod) } start = ftrace_now(raw_smp_processor_id()); - ftrace_update_cnt = 0; - for (pg = ftrace_new_pgs; pg; pg = pg->next) { + for (pg = new_pgs; pg; pg = pg->next) { for (i = 0; i < pg->index; i++) { int cnt = ref; @@ -2355,7 +2352,7 @@ static int ftrace_update_code(struct module *mod) if (!ftrace_code_disable(mod, p)) break; - ftrace_update_cnt++; + update_cnt++; /* * If the tracing is enabled, go ahead and enable the record. @@ -2374,11 +2371,9 @@ static int ftrace_update_code(struct module *mod) } } - ftrace_new_pgs = NULL; - stop = ftrace_now(raw_smp_processor_id()); ftrace_update_time = stop - start; - ftrace_update_tot_cnt += ftrace_update_cnt; + ftrace_update_tot_cnt += update_cnt; return 0; } @@ -4270,9 +4265,6 @@ static int ftrace_process_locs(struct module *mod, /* Assign the last page to ftrace_pages */ ftrace_pages = pg; - /* These new locations need to be initialized */ - ftrace_new_pgs = start_pg; - /* * We only need to disable interrupts on start up * because we are modifying code that an interrupt @@ -4283,7 +4275,7 @@ static int ftrace_process_locs(struct module *mod, */ if (!mod) local_irq_save(flags); - ftrace_update_code(mod); + ftrace_update_code(mod, start_pg); if (!mod) local_irq_restore(flags); ret = 0; @@ -4392,11 +4384,10 @@ struct notifier_block ftrace_module_exit_nb = { .priority = INT_MIN, /* Run after anything that can remove kprobes */ }; -extern unsigned long __start_mcount_loc[]; -extern unsigned long __stop_mcount_loc[]; - void __init ftrace_init(void) { + extern unsigned long __start_mcount_loc[]; + extern unsigned long __stop_mcount_loc[]; unsigned long count, addr, flags; int ret; -- cgit v1.2.3 From c867ccd8388d1c1a31bef9c54544b2ef32f0ebca Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 24 Feb 2014 19:59:57 +0100 Subject: ftrace: Inline the code from ftrace_dyn_table_alloc() The function used to do allocations some time ago. This no longer happens and it only checks the count and prints some info. This patch inlines the body to the only caller. There are two reasons: * the name of the function was misleading * it's clear what is going on in ftrace_init now Link: http://lkml.kernel.org/r/1393268401-24379-2-git-send-email-jslaby@suse.cz Signed-off-by: Jiri Slaby Cc: Frederic Weisbecker Cc: Ingo Molnar Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 3f95bbeb8e8d..76b6ed29d856 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2465,22 +2465,6 @@ ftrace_allocate_pages(unsigned long num_to_init) return NULL; } -static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) -{ - int cnt; - - if (!num_to_init) { - pr_info("ftrace: No functions to be traced?\n"); - return -1; - } - - cnt = num_to_init / ENTRIES_PER_PAGE; - pr_info("ftrace: allocating %ld entries in %d pages\n", - num_to_init, cnt + 1); - - return 0; -} - #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ struct ftrace_iterator { @@ -4403,10 +4387,13 @@ void __init ftrace_init(void) goto failed; count = __stop_mcount_loc - __start_mcount_loc; - - ret = ftrace_dyn_table_alloc(count); - if (ret) + if (!count) { + pr_info("ftrace: No functions to be traced?\n"); goto failed; + } + + pr_info("ftrace: allocating %ld entries in %ld pages\n", + count, count / ENTRIES_PER_PAGE + 1); last_ftrace_enabled = ftrace_enabled = 1; -- cgit v1.2.3 From af64a7cb09db77344c596a0bf3d57d77257e8bf5 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 24 Feb 2014 19:59:58 +0100 Subject: ftrace: Pass retval through return in ftrace_dyn_arch_init() No architecture uses the "data" parameter in ftrace_dyn_arch_init() in any way, it just sets the value to 0. And this is used as a return value in the caller -- ftrace_init, which just checks the retval against zero. Note there is also "return 0" in every ftrace_dyn_arch_init. So it is enough to check the retval and remove all the indirect sets of data on all archs. Link: http://lkml.kernel.org/r/1393268401-24379-3-git-send-email-jslaby@suse.cz Cc: linux-arch@vger.kernel.org Signed-off-by: Jiri Slaby Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 76b6ed29d856..083c6d5fce25 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4379,11 +4379,9 @@ void __init ftrace_init(void) addr = (unsigned long)ftrace_stub; local_irq_save(flags); - ftrace_dyn_arch_init(&addr); + ret = ftrace_dyn_arch_init(&addr); local_irq_restore(flags); - - /* ftrace_dyn_arch_init places the return code in addr */ - if (addr) + if (ret) goto failed; count = __stop_mcount_loc - __start_mcount_loc; -- cgit v1.2.3 From 3a36cb11ca65cd6804972eaf1000378ba4384ea7 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 24 Feb 2014 19:59:59 +0100 Subject: ftrace: Do not pass data to ftrace_dyn_arch_init As the data parameter is not really used by any ftrace_dyn_arch_init, remove that from ftrace_dyn_arch_init. This also removes the addr local variable from ftrace_init which is now unused. Note the documentation was imprecise as it did not suggest to set (*data) to 0. Link: http://lkml.kernel.org/r/1393268401-24379-4-git-send-email-jslaby@suse.cz Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: linux-arch@vger.kernel.org Signed-off-by: Jiri Slaby Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 083c6d5fce25..5bd70e8b09b0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4372,14 +4372,11 @@ void __init ftrace_init(void) { extern unsigned long __start_mcount_loc[]; extern unsigned long __stop_mcount_loc[]; - unsigned long count, addr, flags; + unsigned long count, flags; int ret; - /* Keep the ftrace pointer to the stub */ - addr = (unsigned long)ftrace_stub; - local_irq_save(flags); - ret = ftrace_dyn_arch_init(&addr); + ret = ftrace_dyn_arch_init(); local_irq_restore(flags); if (ret) goto failed; -- cgit v1.2.3 From cd21067f69240041d36e491ff5597e0217615465 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Mon, 24 Feb 2014 17:12:21 +0100 Subject: ftrace: Warn on error when modifying ftrace function We should print some warning and kill ftrace functionality when the ftrace function is not set correctly. Otherwise, ftrace might do crazy things without an explanation. The error value has been ignored so far. Note that an error that happens during updating all the traced calls is handled in ftrace_replace_code(). We print more details about the particular failing address via ftrace_bug() there. Link: http://lkml.kernel.org/r/1393258342-29978-3-git-send-email-pmladek@suse.cz Signed-off-by: Petr Mladek Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5bd70e8b09b0..0e48ff4cefa5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1994,6 +1994,7 @@ int __weak ftrace_arch_code_modify_post_process(void) void ftrace_modify_all_code(int command) { int update = command & FTRACE_UPDATE_TRACE_FUNC; + int err = 0; /* * If the ftrace_caller calls a ftrace_ops func directly, @@ -2005,8 +2006,11 @@ void ftrace_modify_all_code(int command) * to make sure the ops are having the right functions * traced. */ - if (update) - ftrace_update_ftrace_func(ftrace_ops_list_func); + if (update) { + err = ftrace_update_ftrace_func(ftrace_ops_list_func); + if (FTRACE_WARN_ON(err)) + return; + } if (command & FTRACE_UPDATE_CALLS) ftrace_replace_code(1); @@ -2019,13 +2023,16 @@ void ftrace_modify_all_code(int command) /* If irqs are disabled, we are in stop machine */ if (!irqs_disabled()) smp_call_function(ftrace_sync_ipi, NULL, 1); - ftrace_update_ftrace_func(ftrace_trace_function); + err = ftrace_update_ftrace_func(ftrace_trace_function); + if (FTRACE_WARN_ON(err)) + return; } if (command & FTRACE_START_FUNC_RET) - ftrace_enable_ftrace_graph_caller(); + err = ftrace_enable_ftrace_graph_caller(); else if (command & FTRACE_STOP_FUNC_RET) - ftrace_disable_ftrace_graph_caller(); + err = ftrace_disable_ftrace_graph_caller(); + FTRACE_WARN_ON(err); } static int __ftrace_modify_code(void *data) -- cgit v1.2.3 From f952d10ff40b436a8ef156a74ec327abe303823d Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Mon, 27 Jan 2014 17:38:42 -0500 Subject: audit: Use more current logging style again Add pr_fmt to prefix "audit: " to output Convert printk(KERN_ to pr_ Coalesce formats Signed-off-by: Richard Guy Briggs --- kernel/auditfilter.c | 12 +++++++----- kernel/auditsc.c | 31 +++++++++++++++---------------- 2 files changed, 22 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 14a78cca384e..3152d1aea164 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -19,6 +19,8 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -247,7 +249,7 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) ; } if (unlikely(rule->action == AUDIT_POSSIBLE)) { - printk(KERN_ERR "AUDIT_POSSIBLE is deprecated\n"); + pr_err("AUDIT_POSSIBLE is deprecated\n"); goto exit_err; } if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) @@ -477,8 +479,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, /* Keep currently invalid fields around in case they * become valid after a policy reload. */ if (err == -EINVAL) { - printk(KERN_WARNING "audit rule for LSM " - "\'%s\' is invalid\n", str); + pr_warn("audit rule for LSM \'%s\' is invalid\n", + str); err = 0; } if (err) { @@ -707,8 +709,8 @@ static inline int audit_dupe_lsm_field(struct audit_field *df, /* Keep currently invalid fields around in case they * become valid after a policy reload. */ if (ret == -EINVAL) { - printk(KERN_WARNING "audit rule for LSM \'%s\' is " - "invalid\n", df->lsm_str); + pr_warn("audit rule for LSM \'%s\' is invalid\n", + df->lsm_str); ret = 0; } diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 10176cd5956a..6874c1fd453d 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -42,6 +42,8 @@ * and for LSPP certification compliance. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -850,16 +852,15 @@ static inline void audit_free_names(struct audit_context *context) if (context->put_count + context->ino_count != context->name_count) { int i = 0; - printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d" - " name_count=%d put_count=%d" - " ino_count=%d [NOT freeing]\n", - __FILE__, __LINE__, + pr_err("%s:%d(:%d): major=%d in_syscall=%d" + " name_count=%d put_count=%d ino_count=%d" + " [NOT freeing]\n", __FILE__, __LINE__, context->serial, context->major, context->in_syscall, context->name_count, context->put_count, context->ino_count); list_for_each_entry(n, &context->names_list, list) { - printk(KERN_ERR "names[%d] = %p = %s\n", i++, - n->name, n->name->name ?: "(null)"); + pr_err("names[%d] = %p = %s\n", i++, n->name, + n->name->name ?: "(null)"); } dump_stack(); return; @@ -1550,7 +1551,7 @@ static inline void handle_one(const struct inode *inode) if (likely(put_tree_ref(context, chunk))) return; if (unlikely(!grow_tree_refs(context))) { - printk(KERN_WARNING "out of memory, audit has lost a tree reference\n"); + pr_warn("out of memory, audit has lost a tree reference\n"); audit_set_auditable(context); audit_put_chunk(chunk); unroll_tree_refs(context, p, count); @@ -1609,8 +1610,7 @@ retry: goto retry; } /* too bad */ - printk(KERN_WARNING - "out of memory, audit has lost a tree reference\n"); + pr_warn("out of memory, audit has lost a tree reference\n"); unroll_tree_refs(context, p, count); audit_set_auditable(context); return; @@ -1682,7 +1682,7 @@ void __audit_getname(struct filename *name) if (!context->in_syscall) { #if AUDIT_DEBUG == 2 - printk(KERN_ERR "%s:%d(:%d): ignoring getname(%p)\n", + pr_err("%s:%d(:%d): ignoring getname(%p)\n", __FILE__, __LINE__, context->serial, name); dump_stack(); #endif @@ -1721,15 +1721,15 @@ void audit_putname(struct filename *name) BUG_ON(!context); if (!context->in_syscall) { #if AUDIT_DEBUG == 2 - printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", + pr_err("%s:%d(:%d): final_putname(%p)\n", __FILE__, __LINE__, context->serial, name); if (context->name_count) { struct audit_names *n; int i = 0; list_for_each_entry(n, &context->names_list, list) - printk(KERN_ERR "name[%d] = %p = %s\n", i++, - n->name, n->name->name ?: "(null)"); + pr_err("name[%d] = %p = %s\n", i++, n->name, + n->name->name ?: "(null)"); } #endif final_putname(name); @@ -1738,9 +1738,8 @@ void audit_putname(struct filename *name) else { ++context->put_count; if (context->put_count > context->name_count) { - printk(KERN_ERR "%s:%d(:%d): major=%d" - " in_syscall=%d putname(%p) name_count=%d" - " put_count=%d\n", + pr_err("%s:%d(:%d): major=%d in_syscall=%d putname(%p)" + " name_count=%d put_count=%d\n", __FILE__, __LINE__, context->serial, context->major, context->in_syscall, name->name, -- cgit v1.2.3 From 30cdd69e2a266505ca8229c944d361ff350a6959 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 3 Mar 2014 08:48:51 +0100 Subject: cpuidle/idle: Move the cpuidle_idle_call function to idle.c The cpuidle_idle_call does nothing more than calling the three individuals function and is no longer used by any arch specific code but only in the cpuidle framework code. We can move this function into the idle task code to ensure better proximity to the scheduler code. Signed-off-by: Daniel Lezcano Acked-by: Nicolas Pitre Signed-off-by: Peter Zijlstra Cc: rjw@rjwysocki.net Cc: preeti@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1393832934-11625-2-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/idle.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index b7976a127178..d5aaf5eb4531 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -63,6 +63,62 @@ void __weak arch_cpu_idle(void) local_irq_enable(); } +#ifdef CONFIG_CPU_IDLE +/** + * cpuidle_idle_call - the main idle function + * + * NOTE: no locks or semaphores should be used here + * return non-zero on failure + */ +static int cpuidle_idle_call(void) +{ + struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + int next_state, entered_state, ret; + bool broadcast; + + ret = cpuidle_enabled(drv, dev); + if (ret < 0) + return ret; + + /* ask the governor for the next state */ + next_state = cpuidle_select(drv, dev); + + if (need_resched()) { + dev->last_residency = 0; + /* give the governor an opportunity to reflect on the outcome */ + cpuidle_reflect(dev, next_state); + local_irq_enable(); + return 0; + } + + broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); + + if (broadcast && + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu)) + return -EBUSY; + + trace_cpu_idle_rcuidle(next_state, dev->cpu); + + entered_state = cpuidle_enter(drv, dev, next_state); + + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + + if (broadcast) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + + /* give the governor an opportunity to reflect on the outcome */ + cpuidle_reflect(dev, entered_state); + + return 0; +} +#else +static inline int cpuidle_idle_call(void) +{ + return -ENODEV; +} +#endif + /* * Generic idle loop implementation */ -- cgit v1.2.3 From c8cc7d4de7a4f2fb1f8774ec2de5b49c46c42e64 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 3 Mar 2014 08:48:52 +0100 Subject: sched/idle: Reorganize the idle loop Now that we have the main cpuidle function in idle.c, move some code from the idle mainloop to this function for the sake of clarity. That removes if then else indentation difficult to follow when looking at the code. This patch does not change the current behavior. Signed-off-by: Daniel Lezcano Acked-by: Nicolas Pitre Signed-off-by: Peter Zijlstra Cc: tglx@linutronix.de Cc: rjw@rjwysocki.net Cc: preeti@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1393832934-11625-3-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/idle.c | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index d5aaf5eb4531..dc8a2466418f 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -63,7 +63,6 @@ void __weak arch_cpu_idle(void) local_irq_enable(); } -#ifdef CONFIG_CPU_IDLE /** * cpuidle_idle_call - the main idle function * @@ -77,9 +76,14 @@ static int cpuidle_idle_call(void) int next_state, entered_state, ret; bool broadcast; + stop_critical_timings(); + rcu_idle_enter(); + ret = cpuidle_enabled(drv, dev); - if (ret < 0) - return ret; + if (ret < 0) { + arch_cpu_idle(); + goto out; + } /* ask the governor for the next state */ next_state = cpuidle_select(drv, dev); @@ -89,7 +93,7 @@ static int cpuidle_idle_call(void) /* give the governor an opportunity to reflect on the outcome */ cpuidle_reflect(dev, next_state); local_irq_enable(); - return 0; + goto out; } broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); @@ -109,15 +113,15 @@ static int cpuidle_idle_call(void) /* give the governor an opportunity to reflect on the outcome */ cpuidle_reflect(dev, entered_state); +out: + if (WARN_ON_ONCE(irqs_disabled())) + local_irq_enable(); + + rcu_idle_exit(); + start_critical_timings(); return 0; } -#else -static inline int cpuidle_idle_call(void) -{ - return -ENODEV; -} -#endif /* * Generic idle loop implementation @@ -150,14 +154,7 @@ static void cpu_idle_loop(void) cpu_idle_poll(); } else { if (!current_clr_polling_and_test()) { - stop_critical_timings(); - rcu_idle_enter(); - if (cpuidle_idle_call()) - arch_cpu_idle(); - if (WARN_ON_ONCE(irqs_disabled())) - local_irq_enable(); - rcu_idle_exit(); - start_critical_timings(); + cpuidle_idle_call(); } else { local_irq_enable(); } -- cgit v1.2.3 From 8ca3c6424f4988fc19ed1067b121fbaf2e884d77 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 3 Mar 2014 08:48:53 +0100 Subject: sched/idle: Move idle conditions in cpuidle_idle main function This patch moves the condition before entering idle into the cpuidle main function located in idle.c. That simplify the idle mainloop functions and increase the readibility of the conditions to enter truly idle. This patch is code reorganization and does not change the behavior of the function. Signed-off-by: Daniel Lezcano Signed-off-by: Peter Zijlstra Cc: tglx@linutronix.de Cc: rjw@rjwysocki.net Cc: nicolas.pitre@linaro.org Cc: preeti@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1393832934-11625-4-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/idle.c | 78 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 44 insertions(+), 34 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index dc8a2466418f..cc7a6f3801ff 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -76,44 +76,59 @@ static int cpuidle_idle_call(void) int next_state, entered_state, ret; bool broadcast; + if (current_clr_polling_and_test()) { + local_irq_enable(); + __current_set_polling(); + return 0; + } + stop_critical_timings(); rcu_idle_enter(); ret = cpuidle_enabled(drv, dev); - if (ret < 0) { - arch_cpu_idle(); - goto out; - } - /* ask the governor for the next state */ - next_state = cpuidle_select(drv, dev); + if (!ret) { + /* ask the governor for the next state */ + next_state = cpuidle_select(drv, dev); - if (need_resched()) { - dev->last_residency = 0; - /* give the governor an opportunity to reflect on the outcome */ - cpuidle_reflect(dev, next_state); - local_irq_enable(); - goto out; - } + if (current_clr_polling_and_test()) { + dev->last_residency = 0; + entered_state = next_state; + local_irq_enable(); + } else { + broadcast = !!(drv->states[next_state].flags & + CPUIDLE_FLAG_TIMER_STOP); + + if (broadcast) + ret = clockevents_notify( + CLOCK_EVT_NOTIFY_BROADCAST_ENTER, + &dev->cpu); - broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP); + if (!ret) { + trace_cpu_idle_rcuidle(next_state, dev->cpu); - if (broadcast && - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu)) - return -EBUSY; + entered_state = cpuidle_enter(drv, dev, + next_state); - trace_cpu_idle_rcuidle(next_state, dev->cpu); + trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, + dev->cpu); - entered_state = cpuidle_enter(drv, dev, next_state); + if (broadcast) + clockevents_notify( + CLOCK_EVT_NOTIFY_BROADCAST_EXIT, + &dev->cpu); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); + /* give the governor an opportunity to reflect on the outcome */ + cpuidle_reflect(dev, entered_state); + } + } + } + + if (ret) + arch_cpu_idle(); - if (broadcast) - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); + __current_set_polling(); - /* give the governor an opportunity to reflect on the outcome */ - cpuidle_reflect(dev, entered_state); -out: if (WARN_ON_ONCE(irqs_disabled())) local_irq_enable(); @@ -150,16 +165,11 @@ static void cpu_idle_loop(void) * know that the IPI is going to arrive right * away */ - if (cpu_idle_force_poll || tick_check_broadcast_expired()) { + if (cpu_idle_force_poll || tick_check_broadcast_expired()) cpu_idle_poll(); - } else { - if (!current_clr_polling_and_test()) { - cpuidle_idle_call(); - } else { - local_irq_enable(); - } - __current_set_polling(); - } + else + cpuidle_idle_call(); + arch_cpu_idle_exit(); } -- cgit v1.2.3 From a1d028bd6d2b7789d15eddfd07c5bea2aaf36040 Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 3 Mar 2014 08:48:54 +0100 Subject: sched/idle: Add more comments to the code The idle main function is a complex and a critical function. Added more comments to the code. Signed-off-by: Daniel Lezcano Acked-by: Nicolas Pitre Signed-off-by: Peter Zijlstra Cc: tglx@linutronix.de Cc: rjw@rjwysocki.net Cc: preeti@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1393832934-11625-5-git-send-email-daniel.lezcano@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/idle.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index cc7a6f3801ff..8f4390a079c7 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -76,21 +76,49 @@ static int cpuidle_idle_call(void) int next_state, entered_state, ret; bool broadcast; + /* + * Check if the idle task must be rescheduled. If it is the + * case, exit the function after re-enabling the local irq and + * set again the polling flag + */ if (current_clr_polling_and_test()) { local_irq_enable(); __current_set_polling(); return 0; } + /* + * During the idle period, stop measuring the disabled irqs + * critical sections latencies + */ stop_critical_timings(); + + /* + * Tell the RCU framework we are entering an idle section, + * so no more rcu read side critical sections and one more + * step to the grace period + */ rcu_idle_enter(); + /* + * Check if the cpuidle framework is ready, otherwise fallback + * to the default arch specific idle method + */ ret = cpuidle_enabled(drv, dev); if (!ret) { - /* ask the governor for the next state */ + /* + * Ask the governor to choose an idle state it thinks + * it is convenient to go to. There is *always* a + * convenient idle state + */ next_state = cpuidle_select(drv, dev); + /* + * The idle task must be scheduled, it is pointless to + * go to idle, just update no idle residency and get + * out of this function + */ if (current_clr_polling_and_test()) { dev->last_residency = 0; entered_state = next_state; @@ -100,6 +128,14 @@ static int cpuidle_idle_call(void) CPUIDLE_FLAG_TIMER_STOP); if (broadcast) + /* + * Tell the time framework to switch + * to a broadcast timer because our + * local timer will be shutdown. If a + * local timer is used from another + * cpu as a broadcast timer, this call + * may fail if it is not available + */ ret = clockevents_notify( CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu); @@ -107,6 +143,14 @@ static int cpuidle_idle_call(void) if (!ret) { trace_cpu_idle_rcuidle(next_state, dev->cpu); + /* + * Enter the idle state previously + * returned by the governor + * decision. This function will block + * until an interrupt occurs and will + * take care of re-enabling the local + * interrupts + */ entered_state = cpuidle_enter(drv, dev, next_state); @@ -118,17 +162,28 @@ static int cpuidle_idle_call(void) CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu); - /* give the governor an opportunity to reflect on the outcome */ + /* + * Give the governor an opportunity to reflect on the + * outcome + */ cpuidle_reflect(dev, entered_state); } } } + /* + * We can't use the cpuidle framework, let's use the default + * idle routine + */ if (ret) arch_cpu_idle(); __current_set_polling(); + /* + * It is up to the idle functions to enable back the local + * interrupt + */ if (WARN_ON_ONCE(irqs_disabled())) local_irq_enable(); -- cgit v1.2.3 From db0fbadcbd0c288525ea9f76488b324642a78c7f Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 10 Mar 2014 21:42:11 +0100 Subject: ftrace: Fix compilation warning about control_ops_free With CONFIG_DYNAMIC_FTRACE=n, I see a warning: kernel/trace/ftrace.c:240:13: warning: 'control_ops_free' defined but not used static void control_ops_free(struct ftrace_ops *ops) ^ Move that function around to an already existing #ifdef CONFIG_DYNAMIC_FTRACE block as the function is used solely from the dynamic function tracing functions. Link: http://lkml.kernel.org/r/1394484131-5107-1-git-send-email-jslaby@suse.cz Signed-off-by: Jiri Slaby Cc: Frederic Weisbecker Cc: Ingo Molnar Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 0e48ff4cefa5..b4531b228180 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -237,11 +237,6 @@ static int control_ops_alloc(struct ftrace_ops *ops) return 0; } -static void control_ops_free(struct ftrace_ops *ops) -{ - free_percpu(ops->disabled); -} - static void update_global_ops(void) { ftrace_func_t func = ftrace_global_list_func; @@ -2100,6 +2095,11 @@ static ftrace_func_t saved_ftrace_func; static int ftrace_start_up; static int global_start_up; +static void control_ops_free(struct ftrace_ops *ops) +{ + free_percpu(ops->disabled); +} + static void ftrace_startup_enable(int command) { if (saved_ftrace_func != ftrace_trace_function) { -- cgit v1.2.3 From 4c11628a16506a8a8e030515f601771df07bba97 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 11 Mar 2014 21:32:28 -0400 Subject: tracepoints: API doc update to data argument Describe the @data argument (probe private data). Link: http://lkml.kernel.org/r/1394587948-27878-1-git-send-email-mathieu.desnoyers@efficios.com Fixes: 38516ab59fbc "tracing: Let tracepoints have data passed to tracepoint callbacks" CC: Ingo Molnar CC: Frederic Weisbecker CC: Andrew Morton Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- kernel/tracepoint.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 0058f33d05c1..a4f629da3011 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -376,6 +376,7 @@ tracepoint_add_probe(const char *name, void *probe, void *data) * tracepoint_probe_register - Connect a probe to a tracepoint * @name: tracepoint name * @probe: probe handler + * @data: probe private data * * Returns 0 if ok, error value on error. * The probe address must at least be aligned on the architecture pointer size. @@ -424,6 +425,7 @@ tracepoint_remove_probe(const char *name, void *probe, void *data) * tracepoint_probe_unregister - Disconnect a probe from a tracepoint * @name: tracepoint name * @probe: probe function pointer + * @data: probe private data * * We do not need to call a synchronize_sched to make sure the probes have * finished running before doing a module unload, because the module unload @@ -464,6 +466,7 @@ static void tracepoint_add_old_probes(void *old) * tracepoint_probe_register_noupdate - register a probe but not connect * @name: tracepoint name * @probe: probe handler + * @data: probe private data * * caller must call tracepoint_probe_update_all() */ @@ -488,6 +491,7 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect * @name: tracepoint name * @probe: probe function pointer + * @data: probe private data * * caller must call tracepoint_probe_update_all() */ -- cgit v1.2.3 From 3bbc8db341773eb6aa5576eaabca4e95170fbe34 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Mon, 10 Mar 2014 21:04:58 -0400 Subject: tracepoints: API doc update to tracepoint_probe_register() return value Describe the return values of tracepoint_probe_register(), including -ENODEV added by commit: Author: Steven Rostedt tracing: Warn if a tracepoint is not set via debugfs Link: http://lkml.kernel.org/r/1394499898-1537-2-git-send-email-mathieu.desnoyers@efficios.com CC: Ingo Molnar CC: Frederic Weisbecker CC: Andrew Morton Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- kernel/tracepoint.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index a4f629da3011..e2a58a22b0f4 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -378,7 +378,17 @@ tracepoint_add_probe(const char *name, void *probe, void *data) * @probe: probe handler * @data: probe private data * - * Returns 0 if ok, error value on error. + * Returns: + * - 0 if the probe was successfully registered, and tracepoint + * callsites are currently loaded for that probe, + * - -ENODEV if the probe was successfully registered, but no tracepoint + * callsite is currently loaded for that probe, + * - other negative error value on error. + * + * When tracepoint_probe_register() returns either 0 or -ENODEV, + * parameters @name, @probe, and @data may be used by the tracepoint + * infrastructure until the probe is unregistered. + * * The probe address must at least be aligned on the architecture pointer size. */ int tracepoint_probe_register(const char *name, void *probe, void *data) -- cgit v1.2.3 From d88471cb8b17a72b1edf5ab62e1704d78373c066 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Wed, 9 Jan 2013 18:09:20 -0500 Subject: ftrace: Constify ftrace_text_reserved Link: http://lkml.kernel.org/r/1357772960-4436-5-git-send-email-sasha.levin@oracle.com Signed-off-by: Sasha Levin Signed-off-by: Steven Rostedt --- kernel/trace/ftrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b4531b228180..1fd4b9479210 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1555,7 +1555,7 @@ unsigned long ftrace_location(unsigned long ip) * the function tracer. It checks the ftrace internal tables to * determine if the address belongs or not. */ -int ftrace_text_reserved(void *start, void *end) +int ftrace_text_reserved(const void *start, const void *end) { unsigned long ret; -- cgit v1.2.3 From 27bba4d6bb3779a6678b31f9c9b9c1553c63fa95 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Mon, 3 Feb 2014 11:13:13 +1030 Subject: module: use pr_cont When dumping loaded modules, we print them one by one in separate printks. Let's use pr_cont as they are continuation prints. Signed-off-by: Jiri Slaby Cc: Rusty Russell Signed-off-by: Rusty Russell --- kernel/module.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index d24fcf29cb64..efa1e6031950 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -3809,12 +3809,12 @@ void print_modules(void) list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; - printk(" %s%s", mod->name, module_flags(mod, buf)); + pr_cont(" %s%s", mod->name, module_flags(mod, buf)); } preempt_enable(); if (last_unloaded_module[0]) - printk(" [last unloaded: %s]", last_unloaded_module); - printk("\n"); + pr_cont(" [last unloaded: %s]", last_unloaded_module); + pr_cont("\n"); } #ifdef CONFIG_MODVERSIONS -- cgit v1.2.3 From 66cc69e34e86a231fbe68d8918c6119e3b7549a3 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 13 Mar 2014 12:11:30 +1030 Subject: Fix: module signature vs tracepoints: add new TAINT_UNSIGNED_MODULE Users have reported being unable to trace non-signed modules loaded within a kernel supporting module signature. This is caused by tracepoint.c:tracepoint_module_coming() refusing to take into account tracepoints sitting within force-loaded modules (TAINT_FORCED_MODULE). The reason for this check, in the first place, is that a force-loaded module may have a struct module incompatible with the layout expected by the kernel, and can thus cause a kernel crash upon forced load of that module on a kernel with CONFIG_TRACEPOINTS=y. Tracepoints, however, specifically accept TAINT_OOT_MODULE and TAINT_CRAP, since those modules do not lead to the "very likely system crash" issue cited above for force-loaded modules. With kernels having CONFIG_MODULE_SIG=y (signed modules), a non-signed module is tainted re-using the TAINT_FORCED_MODULE taint flag. Unfortunately, this means that Tracepoints treat that module as a force-loaded module, and thus silently refuse to consider any tracepoint within this module. Since an unsigned module does not fit within the "very likely system crash" category of tainting, add a new TAINT_UNSIGNED_MODULE taint flag to specifically address this taint behavior, and accept those modules within Tracepoints. We use the letter 'X' as a taint flag character for a module being loaded that doesn't know how to sign its name (proposed by Steven Rostedt). Also add the missing 'O' entry to trace event show_module_flags() list for the sake of completeness. Signed-off-by: Mathieu Desnoyers Acked-by: Steven Rostedt NAKed-by: Ingo Molnar CC: Thomas Gleixner CC: David Howells CC: Greg Kroah-Hartman Signed-off-by: Rusty Russell --- kernel/module.c | 4 +++- kernel/panic.c | 2 ++ kernel/tracepoint.c | 5 +++-- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index efa1e6031950..c1acb0c5b637 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1013,6 +1013,8 @@ static size_t module_flags_taint(struct module *mod, char *buf) buf[l++] = 'F'; if (mod->taints & (1 << TAINT_CRAP)) buf[l++] = 'C'; + if (mod->taints & (1 << TAINT_UNSIGNED_MODULE)) + buf[l++] = 'X'; /* * TAINT_FORCED_RMMOD: could be added. * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't @@ -3214,7 +3216,7 @@ static int load_module(struct load_info *info, const char __user *uargs, pr_notice_once("%s: module verification failed: signature " "and/or required key missing - tainting " "kernel\n", mod->name); - add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK); + add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK); } #endif diff --git a/kernel/panic.c b/kernel/panic.c index 6d6300375090..0e25fe10871e 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -210,6 +210,7 @@ static const struct tnt tnts[] = { { TAINT_CRAP, 'C', ' ' }, { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, { TAINT_OOT_MODULE, 'O', ' ' }, + { TAINT_UNSIGNED_MODULE, 'X', ' ' }, }; /** @@ -228,6 +229,7 @@ static const struct tnt tnts[] = { * 'C' - modules from drivers/staging are loaded. * 'I' - Working around severe firmware bug. * 'O' - Out-of-tree module has been loaded. + * 'X' - Unsigned module has been loaded. * * The string is overwritten by the next call to print_tainted(). */ diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 031cc5655a51..3cdbed1fbdc7 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -633,7 +633,8 @@ EXPORT_SYMBOL_GPL(tracepoint_iter_reset); #ifdef CONFIG_MODULES bool trace_module_has_bad_taint(struct module *mod) { - return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); + return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) | + (1 << TAINT_UNSIGNED_MODULE)); } static int tracepoint_module_coming(struct module *mod) @@ -644,7 +645,7 @@ static int tracepoint_module_coming(struct module *mod) /* * We skip modules that taint the kernel, especially those with different * module headers (for forced load), to make sure we don't cause a crash. - * Staging and out-of-tree GPL modules are fine. + * Staging, out-of-tree, and unsigned GPL modules are fine. */ if (trace_module_has_bad_taint(mod)) return 0; -- cgit v1.2.3 From 09294e31b1779dda22f420c195994a0db54c9a92 Mon Sep 17 00:00:00 2001 From: "David A. Long" Date: Fri, 7 Mar 2014 10:32:22 -0500 Subject: uprobes: Kconfig dependency fix Suggested change from Oleg Nesterov. Fixes incomplete dependencies for uprobes feature. Signed-off-by: David A. Long Acked-by: Oleg Nesterov --- kernel/trace/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 015f85aaca08..8639819f6cef 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -424,6 +424,7 @@ config UPROBE_EVENT bool "Enable uprobes-based dynamic events" depends on ARCH_SUPPORTS_UPROBES depends on MMU + depends on PERF_EVENTS select UPROBES select PROBE_EVENTS select TRACING -- cgit v1.2.3 From 6fe50a28ba6e5fafb4a549dea666dd15297dd8bd Mon Sep 17 00:00:00 2001 From: "David A. Long" Date: Mon, 3 Feb 2014 14:25:49 -0500 Subject: uprobes: allow ignoring of probe hits Allow arches to decided to ignore a probe hit. ARM will use this to only call handlers if the conditions to execute a conditionally executed instruction are satisfied. Signed-off-by: David A. Long Acked-by: Oleg Nesterov --- kernel/events/uprobes.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 307d87c0991a..04709b66369d 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1804,6 +1804,11 @@ static bool handle_trampoline(struct pt_regs *regs) return true; } +bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs) +{ + return false; +} + /* * Run handler and ask thread to singlestep. * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. @@ -1858,7 +1863,11 @@ static void handle_swbp(struct pt_regs *regs) if (!get_utask()) goto out; + if (arch_uprobe_ignore(&uprobe->arch, regs)) + goto out; + handler_chain(uprobe, regs); + if (can_skip_sstep(uprobe, regs)) goto out; -- cgit v1.2.3 From 5d77381fd8aa631a8fda718c395da1319afb5d2d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:53 -0400 Subject: cgroup: relocate setting of CGRP_DEAD In cgroup_destroy_locked(), move setting of CGRP_DEAD above invocations of kill_css(). This doesn't make any visible behavior difference now but will be used to inhibit manipulating controller enable states of a dying cgroup on the unified hierarchy. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 306ad0ed19ef..b604c7e0cfc6 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3867,6 +3867,15 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) if (!empty) return -EBUSY; + /* + * Mark @cgrp dead. This prevents further task migration and child + * creation by disabling cgroup_lock_live_group(). Note that + * CGRP_DEAD assertion is depended upon by css_next_child() to + * resume iteration after dropping RCU read lock. See + * css_next_child() for details. + */ + set_bit(CGRP_DEAD, &cgrp->flags); + /* * Initiate massacre of all css's. cgroup_destroy_css_killed() * will be invoked to perform the rest of destruction once the @@ -3878,15 +3887,6 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) kill_css(css); mutex_lock(&cgroup_mutex); - /* - * Mark @cgrp dead. This prevents further task migration and child - * creation by disabling cgroup_lock_live_group(). Note that - * CGRP_DEAD assertion is depended upon by css_next_child() to - * resume iteration after dropping RCU read lock. See - * css_next_child() for details. - */ - set_bit(CGRP_DEAD, &cgrp->flags); - /* CGRP_DEAD is set, remove from ->release_list for the last time */ raw_spin_lock(&release_list_lock); if (!list_empty(&cgrp->release_list)) -- cgit v1.2.3 From 172a2c0685ff3bc0b7a611b308aac0694de34594 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:53 -0400 Subject: cgroup: reorganize cgroup bootstrapping * Fields of init_css_set and css_set_count are now set using initializer instead of programmatically from cgroup_init_early(). * init_cgroup_root() now also takes @opts and performs the optional part of initialization too. The leftover part of cgroup_root_from_opts() is collapsed into its only caller - cgroup_mount(). * Initialization of cgroup_root_count and linking of init_css_set are moved from cgroup_init_early() to to cgroup_init(). None of the early_init users depends on init_css_set being linked. * Subsystem initializations are moved after dummy hierarchy init and init_css_set linking. These changes reorganize the bootstrap logic so that the dummy hierarchy can share the usual hierarchy init path and be made more normal. These changes don't make noticeable behavior changes. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 100 +++++++++++++++++++++++++++----------------------------- 1 file changed, 49 insertions(+), 51 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b604c7e0cfc6..e66b9ee5ecc1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -338,16 +338,24 @@ struct cgrp_cset_link { struct list_head cgrp_link; }; -/* The default css_set - used by init and its children prior to any +/* + * The default css_set - used by init and its children prior to any * hierarchies being mounted. It contains a pointer to the root state * for each subsystem. Also used to anchor the list of css_sets. Not * reference-counted, to improve performance when child cgroups * haven't been created. */ +static struct css_set init_css_set = { + .refcount = ATOMIC_INIT(1), + .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), + .tasks = LIST_HEAD_INIT(init_css_set.tasks), + .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), + .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), + .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), +}; -static struct css_set init_css_set; static struct cgrp_cset_link init_cgrp_cset_link; -static int css_set_count; +static int css_set_count = 1; /* 1 for init_css_set */ /* * hash table for cgroup groups. This improves the performance to find @@ -1352,7 +1360,8 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) cgrp->dummy_css.cgroup = cgrp; } -static void init_cgroup_root(struct cgroupfs_root *root) +static void init_cgroup_root(struct cgroupfs_root *root, + struct cgroup_sb_opts *opts) { struct cgroup *cgrp = &root->top_cgroup; @@ -1361,20 +1370,6 @@ static void init_cgroup_root(struct cgroupfs_root *root) cgrp->root = root; init_cgroup_housekeeping(cgrp); idr_init(&root->cgroup_idr); -} - -static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) -{ - struct cgroupfs_root *root; - - if (!opts->subsys_mask && !opts->none) - return ERR_PTR(-EINVAL); - - root = kzalloc(sizeof(*root), GFP_KERNEL); - if (!root) - return ERR_PTR(-ENOMEM); - - init_cgroup_root(root); root->flags = opts->flags; if (opts->release_agent) @@ -1383,7 +1378,6 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) strcpy(root->name, opts->name); if (opts->cpuset_clone_children) set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags); - return root; } static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask) @@ -1548,13 +1542,24 @@ retry: goto out_unlock; } - /* no such thing, create a new one */ - root = cgroup_root_from_opts(&opts); - if (IS_ERR(root)) { - ret = PTR_ERR(root); + /* + * No such thing, create a new one. name= matching without subsys + * specification is allowed for already existing hierarchies but we + * can't create new one without subsys specification. + */ + if (!opts.subsys_mask && !opts.none) { + ret = -EINVAL; + goto out_unlock; + } + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) { + ret = -ENOMEM; goto out_unlock; } + init_cgroup_root(root, &opts); + ret = cgroup_setup_root(root, opts.subsys_mask); if (ret) cgroup_free_root(root); @@ -4030,26 +4035,13 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) */ int __init cgroup_init_early(void) { + static struct cgroup_sb_opts __initdata opts = { }; struct cgroup_subsys *ss; int i; - atomic_set(&init_css_set.refcount, 1); - INIT_LIST_HEAD(&init_css_set.cgrp_links); - INIT_LIST_HEAD(&init_css_set.tasks); - INIT_LIST_HEAD(&init_css_set.mg_tasks); - INIT_LIST_HEAD(&init_css_set.mg_preload_node); - INIT_LIST_HEAD(&init_css_set.mg_node); - INIT_HLIST_NODE(&init_css_set.hlist); - css_set_count = 1; - init_cgroup_root(&cgroup_dummy_root); - cgroup_root_count = 1; + init_cgroup_root(&cgroup_dummy_root, &opts); RCU_INIT_POINTER(init_task.cgroups, &init_css_set); - init_cgrp_cset_link.cset = &init_css_set; - init_cgrp_cset_link.cgrp = cgroup_dummy_top; - list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); - list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); - for_each_subsys(ss, i) { WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id, "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n", @@ -4077,22 +4069,10 @@ int __init cgroup_init(void) { struct cgroup_subsys *ss; unsigned long key; - int i, err; + int ssid, err; BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); - for_each_subsys(ss, i) { - if (!ss->early_init) - cgroup_init_subsys(ss); - - /* - * cftype registration needs kmalloc and can't be done - * during early_init. Register base cftypes separately. - */ - if (ss->base_cftypes) - WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes)); - } - /* allocate id for the dummy hierarchy */ mutex_lock(&cgroup_mutex); @@ -4106,8 +4086,26 @@ int __init cgroup_init(void) 0, 1, GFP_KERNEL); BUG_ON(err < 0); + cgroup_root_count = 1; + init_cgrp_cset_link.cset = &init_css_set; + init_cgrp_cset_link.cgrp = cgroup_dummy_top; + list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); + list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); + mutex_unlock(&cgroup_mutex); + for_each_subsys(ss, ssid) { + if (!ss->early_init) + cgroup_init_subsys(ss); + + /* + * cftype registration needs kmalloc and can't be done + * during early_init. Register base cftypes separately. + */ + if (ss->base_cftypes) + WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes)); + } + cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj); if (!cgroup_kobj) return -ENOMEM; -- cgit v1.2.3 From 985ed670144c25058f235276f69d687de1b7c7ba Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:53 -0400 Subject: cgroup: use cgroup_setup_root() to initialize cgroup_dummy_root cgroup_dummy_root is used to host controllers which aren't attached to any other hierarchy. The root is minimally set up during kernfs bootstrap and didn't go through full hierarchy initialization. We're planning to use cgroup_dummy_root for the default unified hierarchy and thus want it to be fully functional. Replace the special initialization, which was collected into cgroup_init() by the previous patch, with an invocation of cgroup_setup_root(). This simplifies the init path and makes cgroup_dummy_root a full hierarchy with its own kernfs_root and all. As this puts the dummy hierarchy on the cgroup_roots list, rename for_each_active_root() to for_each_root() and update its users to skip the dummy root for now. This patch doesn't cause any userland visible behavior changes at this point. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 43 ++++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 23 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e66b9ee5ecc1..78017f52c69b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -289,8 +289,8 @@ static int notify_on_release(const struct cgroup *cgrp) for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) -/* iterate across the active hierarchies */ -#define for_each_active_root(root) \ +/* iterate across the hierarchies */ +#define for_each_root(root) \ list_for_each_entry((root), &cgroup_roots, root_list) /** @@ -354,7 +354,6 @@ static struct css_set init_css_set = { .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), }; -static struct cgrp_cset_link init_cgrp_cset_link; static int css_set_count = 1; /* 1 for init_css_set */ /* @@ -693,14 +692,13 @@ static struct cgroupfs_root *cgroup_root_from_kf(struct kernfs_root *kf_root) return top_cgrp->root; } -static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) +static int cgroup_init_root_id(struct cgroupfs_root *root) { int id; lockdep_assert_held(&cgroup_mutex); - id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end, - GFP_KERNEL); + id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL); if (id < 0) return id; @@ -1405,8 +1403,7 @@ static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask) if (ret) goto out; - /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ - ret = cgroup_init_root_id(root, 2, 0); + ret = cgroup_init_root_id(root); if (ret) goto out; @@ -1486,9 +1483,12 @@ retry: goto out_unlock; /* look for a matching existing root */ - for_each_active_root(root) { + for_each_root(root) { bool name_match = false; + if (root == &cgroup_dummy_root) + continue; + /* * If we asked for a name then it must match. Also, if * name matches but sybsys_mask doesn't, we should fail. @@ -2106,9 +2106,12 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) int retval = 0; mutex_lock(&cgroup_mutex); - for_each_active_root(root) { + for_each_root(root) { struct cgroup *from_cgrp; + if (root == &cgroup_dummy_root) + continue; + down_read(&css_set_rwsem); from_cgrp = task_cgroup_from_root(from, root); up_read(&css_set_rwsem); @@ -4073,26 +4076,17 @@ int __init cgroup_init(void) BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files)); - /* allocate id for the dummy hierarchy */ + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); /* Add init_css_set to the hash table */ key = css_set_hash(init_css_set.subsys); hash_add(css_set_table, &init_css_set.hlist, key); - BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1)); - - err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top, - 0, 1, GFP_KERNEL); - BUG_ON(err < 0); - - cgroup_root_count = 1; - init_cgrp_cset_link.cset = &init_css_set; - init_cgrp_cset_link.cgrp = cgroup_dummy_top; - list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links); - list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links); + BUG_ON(cgroup_setup_root(&cgroup_dummy_root, 0)); mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgroup_tree_mutex); for_each_subsys(ss, ssid) { if (!ss->early_init) @@ -4176,11 +4170,14 @@ int proc_cgroup_show(struct seq_file *m, void *v) mutex_lock(&cgroup_mutex); down_read(&css_set_rwsem); - for_each_active_root(root) { + for_each_root(root) { struct cgroup_subsys *ss; struct cgroup *cgrp; int ssid, count = 0; + if (root == &cgroup_dummy_root) + continue; + seq_printf(m, "%d:", root->hierarchy_id); for_each_subsys(ss, ssid) if (root->subsys_mask & (1 << ssid)) -- cgit v1.2.3 From 5df3603229e520442502ff7fc5715c77bbf61912 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:54 -0400 Subject: cgroup: treat cgroup_dummy_root as an equivalent hierarchy during rebinding Currently, while rebinding, cgroup_dummy_root serves as the anchor point. In addition to the target root, rebind_subsystems() takes @added_mask and @removed_mask. The subsystems specified in the former are expected to be on the dummy root and then moved to the target root. The ones in the latter are moved from non-dummy root to dummy. Now that the dummy root is a fully functional one and we're planning to use it for the default unified hierarchy, this level of distinction between dummy and non-dummy roots is quite awkward. This patch updates rebind_subsystems() to take the target root and one subsystem mask and move the specified subsystmes to the target root which may or may not be the dummy root. IOW, unbinding now becomes moving the subsystems to the dummy root and binding to non-dummy root. This makes the dummy root mostly equivalent to other hierarchies in terms of the mechanism of moving subsystems around; however, we still retain all the semantical restrictions so that this patch doesn't introduce any visible behavior differences. Another noteworthy detail is that rebind_subsystems() guarantees that moving a subsystem to the dummy root never fails so that valid unmounting attempts always succeed. This unifies binding and unbinding of subsystems. The invocation points of ->bind() were inconsistent between the two and now moved after whole rebinding is complete. This doesn't break the current users and generally makes more sense. All rebind_subsystems() users are converted accordingly. Note that cgroup_remount() now makes two calls to rebind_subsystems() to bind and then unbind the requested subsystems. This will allow repurposing of the dummy hierarchy as the default unified hierarchy and shouldn't make any userland visible behavior difference. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 100 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 56 insertions(+), 44 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 78017f52c69b..b632981bd9c7 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -175,8 +175,8 @@ static int need_forkexit_callback __read_mostly; static struct cftype cgroup_base_files[]; static void cgroup_put(struct cgroup *cgrp); -static int rebind_subsystems(struct cgroupfs_root *root, - unsigned long added_mask, unsigned removed_mask); +static int rebind_subsystems(struct cgroupfs_root *dst_root, + unsigned long ss_mask); static void cgroup_destroy_css_killed(struct cgroup *cgrp); static int cgroup_destroy_locked(struct cgroup *cgrp); static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], @@ -739,7 +739,7 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) BUG_ON(!list_empty(&cgrp->children)); /* Rebind all subsystems back to the default hierarchy */ - WARN_ON(rebind_subsystems(root, 0, root->subsys_mask)); + rebind_subsystems(&cgroup_dummy_root, root->subsys_mask); /* * Release all the links from cset_links to this hierarchy's @@ -976,69 +976,77 @@ static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask) } } -static int rebind_subsystems(struct cgroupfs_root *root, - unsigned long added_mask, unsigned removed_mask) +static int rebind_subsystems(struct cgroupfs_root *dst_root, + unsigned long ss_mask) { - struct cgroup *cgrp = &root->top_cgroup; + struct cgroup *dst_top = &dst_root->top_cgroup; struct cgroup_subsys *ss; - int i, ret; + int ssid, ret; lockdep_assert_held(&cgroup_tree_mutex); lockdep_assert_held(&cgroup_mutex); - /* Check that any added subsystems are currently free */ - for_each_subsys(ss, i) - if ((added_mask & (1 << i)) && ss->root != &cgroup_dummy_root) + for_each_subsys(ss, ssid) { + if (!(ss_mask & (1 << ssid))) + continue; + + /* if @ss is on the dummy_root, we can always move it */ + if (ss->root == &cgroup_dummy_root) + continue; + + /* if @ss has non-root cgroups attached to it, can't move */ + if (!list_empty(&ss->root->top_cgroup.children)) return -EBUSY; - ret = cgroup_populate_dir(cgrp, added_mask); - if (ret) - return ret; + /* can't move between two non-dummy roots either */ + if (dst_root != &cgroup_dummy_root) + return -EBUSY; + } + + if (dst_root != &cgroup_dummy_root) { + ret = cgroup_populate_dir(dst_top, ss_mask); + if (ret) + return ret; + } /* * Nothing can fail from this point on. Remove files for the * removed subsystems and rebind each subsystem. */ mutex_unlock(&cgroup_mutex); - cgroup_clear_dir(cgrp, removed_mask); + for_each_subsys(ss, ssid) + if ((ss_mask & (1 << ssid)) && ss->root != &cgroup_dummy_root) + cgroup_clear_dir(&ss->root->top_cgroup, 1 << ssid); mutex_lock(&cgroup_mutex); - for_each_subsys(ss, i) { - unsigned long bit = 1UL << i; - - if (bit & added_mask) { - /* We're binding this subsystem to this hierarchy */ - BUG_ON(cgroup_css(cgrp, ss)); - BUG_ON(!cgroup_css(cgroup_dummy_top, ss)); - BUG_ON(cgroup_css(cgroup_dummy_top, ss)->cgroup != cgroup_dummy_top); + for_each_subsys(ss, ssid) { + struct cgroupfs_root *src_root; + struct cgroup *src_top; + struct cgroup_subsys_state *css; - rcu_assign_pointer(cgrp->subsys[i], - cgroup_css(cgroup_dummy_top, ss)); - cgroup_css(cgrp, ss)->cgroup = cgrp; + if (!(ss_mask & (1 << ssid))) + continue; - ss->root = root; - if (ss->bind) - ss->bind(cgroup_css(cgrp, ss)); + src_root = ss->root; + src_top = &src_root->top_cgroup; + css = cgroup_css(src_top, ss); - /* refcount was already taken, and we're keeping it */ - root->subsys_mask |= bit; - } else if (bit & removed_mask) { - /* We're removing this subsystem */ - BUG_ON(cgroup_css(cgrp, ss) != cgroup_css(cgroup_dummy_top, ss)); - BUG_ON(cgroup_css(cgrp, ss)->cgroup != cgrp); + WARN_ON(!css || cgroup_css(dst_top, ss)); - if (ss->bind) - ss->bind(cgroup_css(cgroup_dummy_top, ss)); + RCU_INIT_POINTER(src_top->subsys[ssid], NULL); + rcu_assign_pointer(dst_top->subsys[ssid], css); + ss->root = dst_root; + css->cgroup = dst_top; - cgroup_css(cgroup_dummy_top, ss)->cgroup = cgroup_dummy_top; - RCU_INIT_POINTER(cgrp->subsys[i], NULL); + src_root->subsys_mask &= ~(1 << ssid); + dst_root->subsys_mask |= 1 << ssid; - cgroup_subsys[i]->root = &cgroup_dummy_root; - root->subsys_mask &= ~bit; - } + if (ss->bind) + ss->bind(css); } - kernfs_activate(cgrp->kn); + if (dst_root != &cgroup_dummy_root) + kernfs_activate(dst_top->kn); return 0; } @@ -1277,10 +1285,12 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) goto out_unlock; } - ret = rebind_subsystems(root, added_mask, removed_mask); + ret = rebind_subsystems(root, added_mask); if (ret) goto out_unlock; + rebind_subsystems(&cgroup_dummy_root, removed_mask); + if (opts.release_agent) { spin_lock(&release_agent_path_lock); strcpy(root->release_agent_path, opts.release_agent); @@ -1420,7 +1430,7 @@ static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask) if (ret) goto destroy_root; - ret = rebind_subsystems(root, ss_mask, 0); + ret = rebind_subsystems(root, ss_mask); if (ret) goto destroy_root; @@ -4026,6 +4036,8 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) BUG_ON(online_css(css)); + cgroup_dummy_root.subsys_mask |= 1 << ss->id; + mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); } -- cgit v1.2.3 From 944196278d3dea0cece1636de417b56897d9a108 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:54 -0400 Subject: cgroup: move ->subsys_mask from cgroupfs_root to cgroup cgroupfs_root->subsys_mask represents the controllers attached to the hierarchy. This patch moves the field to cgroup. Subsystem initialization and rebinding updates the top cgroup's subsys_mask. For !root cgroups, the subsys_mask bits are set from create_css() and cleared from kill_css(), which effectively means that all cgroups will have the same subsys_mask as the top cgroup. While this doesn't make any difference now, this will help implementation of the default unified hierarchy where !root cgroups may have subsets of the top_cgroup's subsys_mask. While at it, __kill_css() is split out of kill_css(). The former doesn't care about the subsys_mask while the latter becomes noop if the controller is already killed and clears the matching bit if not before proceeding to killing the css. This will be used later by the default unified hierarchy implementation. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 61 ++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 22 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b632981bd9c7..807f88dbda51 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -526,7 +526,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, * won't change, so no need for locking. */ for_each_subsys(ss, i) { - if (root->subsys_mask & (1UL << i)) { + if (root->top_cgroup.subsys_mask & (1UL << i)) { /* Subsystem is in this hierarchy. So we want * the subsystem state from the new * cgroup */ @@ -739,7 +739,7 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) BUG_ON(!list_empty(&cgrp->children)); /* Rebind all subsystems back to the default hierarchy */ - rebind_subsystems(&cgroup_dummy_root, root->subsys_mask); + rebind_subsystems(&cgroup_dummy_root, cgrp->subsys_mask); /* * Release all the links from cset_links to this hierarchy's @@ -1038,8 +1038,8 @@ static int rebind_subsystems(struct cgroupfs_root *dst_root, ss->root = dst_root; css->cgroup = dst_top; - src_root->subsys_mask &= ~(1 << ssid); - dst_root->subsys_mask |= 1 << ssid; + src_top->subsys_mask &= ~(1 << ssid); + dst_top->subsys_mask |= 1 << ssid; if (ss->bind) ss->bind(css); @@ -1058,7 +1058,7 @@ static int cgroup_show_options(struct seq_file *seq, int ssid; for_each_subsys(ss, ssid) - if (root->subsys_mask & (1 << ssid)) + if (root->top_cgroup.subsys_mask & (1 << ssid)) seq_printf(seq, ",%s", ss->name); if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) seq_puts(seq, ",sane_behavior"); @@ -1262,12 +1262,12 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) if (ret) goto out_unlock; - if (opts.subsys_mask != root->subsys_mask || opts.release_agent) + if (opts.subsys_mask != root->top_cgroup.subsys_mask || opts.release_agent) pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", task_tgid_nr(current), current->comm); - added_mask = opts.subsys_mask & ~root->subsys_mask; - removed_mask = root->subsys_mask & ~opts.subsys_mask; + added_mask = opts.subsys_mask & ~root->top_cgroup.subsys_mask; + removed_mask = root->top_cgroup.subsys_mask & ~opts.subsys_mask; /* Don't allow flags or name to change at remount */ if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) || @@ -1515,7 +1515,7 @@ retry: * subsystems) then they must match. */ if ((opts.subsys_mask || opts.none) && - (opts.subsys_mask != root->subsys_mask)) { + (opts.subsys_mask != root->top_cgroup.subsys_mask)) { if (!name_match) continue; ret = -EBUSY; @@ -3594,6 +3594,8 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) cgroup_get(cgrp); css_get(css->parent); + cgrp->subsys_mask |= 1 << ss->id; + if (ss->broken_hierarchy && !ss->warned_broken_hierarchy && parent->parent) { pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n", @@ -3700,7 +3702,7 @@ static long cgroup_create(struct cgroup *parent, const char *name, /* let's create and online css's */ for_each_subsys(ss, ssid) { - if (root->subsys_mask & (1 << ssid)) { + if (root->top_cgroup.subsys_mask & (1 << ssid)) { err = create_css(cgrp, ss); if (err) goto err_destroy; @@ -3788,17 +3790,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref) queue_work(cgroup_destroy_wq, &css->destroy_work); } -/** - * kill_css - destroy a css - * @css: css to destroy - * - * This function initiates destruction of @css by removing cgroup interface - * files and putting its base reference. ->css_offline() will be invoked - * asynchronously once css_tryget() is guaranteed to fail and when the - * reference count reaches zero, @css will be released. - */ -static void kill_css(struct cgroup_subsys_state *css) +static void __kill_css(struct cgroup_subsys_state *css) { + lockdep_assert_held(&cgroup_tree_mutex); + /* * This must happen before css is disassociated with its cgroup. * See seq_css() for details. @@ -3824,6 +3819,28 @@ static void kill_css(struct cgroup_subsys_state *css) percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn); } +/** + * kill_css - destroy a css + * @css: css to destroy + * + * This function initiates destruction of @css by removing cgroup interface + * files and putting its base reference. ->css_offline() will be invoked + * asynchronously once css_tryget() is guaranteed to fail and when the + * reference count reaches zero, @css will be released. + */ +static void kill_css(struct cgroup_subsys_state *css) +{ + struct cgroup *cgrp = css->cgroup; + + lockdep_assert_held(&cgroup_tree_mutex); + + /* if already killed, noop */ + if (cgrp->subsys_mask & (1 << css->ss->id)) { + cgrp->subsys_mask &= ~(1 << css->ss->id); + __kill_css(css); + } +} + /** * cgroup_destroy_locked - the first stage of cgroup destruction * @cgrp: cgroup to be destroyed @@ -4036,7 +4053,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) BUG_ON(online_css(css)); - cgroup_dummy_root.subsys_mask |= 1 << ss->id; + cgroup_dummy_root.top_cgroup.subsys_mask |= 1 << ss->id; mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); @@ -4192,7 +4209,7 @@ int proc_cgroup_show(struct seq_file *m, void *v) seq_printf(m, "%d:", root->hierarchy_id); for_each_subsys(ss, ssid) - if (root->subsys_mask & (1 << ssid)) + if (root->top_cgroup.subsys_mask & (1 << ssid)) seq_printf(m, "%s%s", count++ ? "," : "", ss->name); if (strlen(root->name)) seq_printf(m, "%sname=%s", count ? "," : "", -- cgit v1.2.3 From 3dd06ffa9df99aa88f4e01eaa0c9d3cb362430b0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:54 -0400 Subject: cgroup: rename cgroup_dummy_root and related names The dummy root will be repurposed to serve as the default unified hierarchy. Let's rename things in preparation. * s/cgroup_dummy_root/cgrp_dfl_root/ * s/cgroupfs_root/cgroup_root/ as we don't do fs part directly anymore * s/cgroup_root->top_cgroup/cgroup_root->cgrp/ for brevity This is pure rename. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 168 +++++++++++++++++++++++++++----------------------------- 1 file changed, 81 insertions(+), 87 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 807f88dbda51..60ea16058c42 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -138,14 +138,11 @@ static const char *cgroup_subsys_name[] = { #undef SUBSYS /* - * The dummy hierarchy, reserved for the subsystems that are otherwise + * The default hierarchy, reserved for the subsystems that are otherwise * unattached - it never has more than a single cgroup, and all tasks are * part of that cgroup. */ -static struct cgroupfs_root cgroup_dummy_root; - -/* dummy_top is a shorthand for the dummy hierarchy's top cgroup */ -static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup; +static struct cgroup_root cgrp_dfl_root; /* The list of hierarchy roots */ @@ -175,7 +172,7 @@ static int need_forkexit_callback __read_mostly; static struct cftype cgroup_base_files[]; static void cgroup_put(struct cgroup *cgrp); -static int rebind_subsystems(struct cgroupfs_root *dst_root, +static int rebind_subsystems(struct cgroup_root *dst_root, unsigned long ss_mask); static void cgroup_destroy_css_killed(struct cgroup *cgrp); static int cgroup_destroy_locked(struct cgroup *cgrp); @@ -514,7 +511,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, struct cgroup *cgrp, struct cgroup_subsys_state *template[]) { - struct cgroupfs_root *root = cgrp->root; + struct cgroup_root *root = cgrp->root; struct cgroup_subsys *ss; struct css_set *cset; unsigned long key; @@ -526,7 +523,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset, * won't change, so no need for locking. */ for_each_subsys(ss, i) { - if (root->top_cgroup.subsys_mask & (1UL << i)) { + if (root->cgrp.subsys_mask & (1UL << i)) { /* Subsystem is in this hierarchy. So we want * the subsystem state from the new * cgroup */ @@ -685,14 +682,14 @@ static struct css_set *find_css_set(struct css_set *old_cset, return cset; } -static struct cgroupfs_root *cgroup_root_from_kf(struct kernfs_root *kf_root) +static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) { - struct cgroup *top_cgrp = kf_root->kn->priv; + struct cgroup *root_cgrp = kf_root->kn->priv; - return top_cgrp->root; + return root_cgrp->root; } -static int cgroup_init_root_id(struct cgroupfs_root *root) +static int cgroup_init_root_id(struct cgroup_root *root) { int id; @@ -706,7 +703,7 @@ static int cgroup_init_root_id(struct cgroupfs_root *root) return 0; } -static void cgroup_exit_root_id(struct cgroupfs_root *root) +static void cgroup_exit_root_id(struct cgroup_root *root) { lockdep_assert_held(&cgroup_mutex); @@ -716,7 +713,7 @@ static void cgroup_exit_root_id(struct cgroupfs_root *root) } } -static void cgroup_free_root(struct cgroupfs_root *root) +static void cgroup_free_root(struct cgroup_root *root) { if (root) { /* hierarhcy ID shoulid already have been released */ @@ -727,9 +724,9 @@ static void cgroup_free_root(struct cgroupfs_root *root) } } -static void cgroup_destroy_root(struct cgroupfs_root *root) +static void cgroup_destroy_root(struct cgroup_root *root) { - struct cgroup *cgrp = &root->top_cgroup; + struct cgroup *cgrp = &root->cgrp; struct cgrp_cset_link *link, *tmp_link; mutex_lock(&cgroup_tree_mutex); @@ -739,7 +736,7 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) BUG_ON(!list_empty(&cgrp->children)); /* Rebind all subsystems back to the default hierarchy */ - rebind_subsystems(&cgroup_dummy_root, cgrp->subsys_mask); + rebind_subsystems(&cgrp_dfl_root, cgrp->subsys_mask); /* * Release all the links from cset_links to this hierarchy's @@ -770,7 +767,7 @@ static void cgroup_destroy_root(struct cgroupfs_root *root) /* look up cgroup associated with given css_set on the specified hierarchy */ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, - struct cgroupfs_root *root) + struct cgroup_root *root) { struct cgroup *res = NULL; @@ -778,7 +775,7 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, lockdep_assert_held(&css_set_rwsem); if (cset == &init_css_set) { - res = &root->top_cgroup; + res = &root->cgrp; } else { struct cgrp_cset_link *link; @@ -801,7 +798,7 @@ static struct cgroup *cset_cgroup_from_root(struct css_set *cset, * called with cgroup_mutex and css_set_rwsem held. */ static struct cgroup *task_cgroup_from_root(struct task_struct *task, - struct cgroupfs_root *root) + struct cgroup_root *root) { /* * No need to lock the task - since we hold cgroup_mutex the @@ -837,9 +834,9 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, * A cgroup can only be deleted if both its 'count' of using tasks * is zero, and its list of 'children' cgroups is empty. Since all * tasks in the system use _some_ cgroup, and since there is always at - * least one task in the system (init, pid == 1), therefore, top_cgroup + * least one task in the system (init, pid == 1), therefore, root cgroup * always has either children cgroups and/or using tasks. So we don't - * need a special hack to ensure that top_cgroup cannot be deleted. + * need a special hack to ensure that root cgroup cannot be deleted. * * P.S. One more locking exception. RCU is used to guard the * update of a tasks cgroup pointer by cgroup_attach_task() @@ -905,7 +902,7 @@ static void cgroup_free_fn(struct work_struct *work) kfree(cgrp); } else { /* - * This is top cgroup's refcnt reaching zero, which + * This is root cgroup's refcnt reaching zero, which * indicates that the root should be released. */ cgroup_destroy_root(cgrp->root); @@ -976,10 +973,9 @@ static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask) } } -static int rebind_subsystems(struct cgroupfs_root *dst_root, +static int rebind_subsystems(struct cgroup_root *dst_root, unsigned long ss_mask) { - struct cgroup *dst_top = &dst_root->top_cgroup; struct cgroup_subsys *ss; int ssid, ret; @@ -991,20 +987,20 @@ static int rebind_subsystems(struct cgroupfs_root *dst_root, continue; /* if @ss is on the dummy_root, we can always move it */ - if (ss->root == &cgroup_dummy_root) + if (ss->root == &cgrp_dfl_root) continue; /* if @ss has non-root cgroups attached to it, can't move */ - if (!list_empty(&ss->root->top_cgroup.children)) + if (!list_empty(&ss->root->cgrp.children)) return -EBUSY; /* can't move between two non-dummy roots either */ - if (dst_root != &cgroup_dummy_root) + if (dst_root != &cgrp_dfl_root) return -EBUSY; } - if (dst_root != &cgroup_dummy_root) { - ret = cgroup_populate_dir(dst_top, ss_mask); + if (dst_root != &cgrp_dfl_root) { + ret = cgroup_populate_dir(&dst_root->cgrp, ss_mask); if (ret) return ret; } @@ -1015,50 +1011,48 @@ static int rebind_subsystems(struct cgroupfs_root *dst_root, */ mutex_unlock(&cgroup_mutex); for_each_subsys(ss, ssid) - if ((ss_mask & (1 << ssid)) && ss->root != &cgroup_dummy_root) - cgroup_clear_dir(&ss->root->top_cgroup, 1 << ssid); + if ((ss_mask & (1 << ssid)) && ss->root != &cgrp_dfl_root) + cgroup_clear_dir(&ss->root->cgrp, 1 << ssid); mutex_lock(&cgroup_mutex); for_each_subsys(ss, ssid) { - struct cgroupfs_root *src_root; - struct cgroup *src_top; + struct cgroup_root *src_root; struct cgroup_subsys_state *css; if (!(ss_mask & (1 << ssid))) continue; src_root = ss->root; - src_top = &src_root->top_cgroup; - css = cgroup_css(src_top, ss); + css = cgroup_css(&src_root->cgrp, ss); - WARN_ON(!css || cgroup_css(dst_top, ss)); + WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss)); - RCU_INIT_POINTER(src_top->subsys[ssid], NULL); - rcu_assign_pointer(dst_top->subsys[ssid], css); + RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL); + rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css); ss->root = dst_root; - css->cgroup = dst_top; + css->cgroup = &dst_root->cgrp; - src_top->subsys_mask &= ~(1 << ssid); - dst_top->subsys_mask |= 1 << ssid; + src_root->cgrp.subsys_mask &= ~(1 << ssid); + dst_root->cgrp.subsys_mask |= 1 << ssid; if (ss->bind) ss->bind(css); } - if (dst_root != &cgroup_dummy_root) - kernfs_activate(dst_top->kn); + if (dst_root != &cgrp_dfl_root) + kernfs_activate(dst_root->cgrp.kn); return 0; } static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root) { - struct cgroupfs_root *root = cgroup_root_from_kf(kf_root); + struct cgroup_root *root = cgroup_root_from_kf(kf_root); struct cgroup_subsys *ss; int ssid; for_each_subsys(ss, ssid) - if (root->top_cgroup.subsys_mask & (1 << ssid)) + if (root->cgrp.subsys_mask & (1 << ssid)) seq_printf(seq, ",%s", ss->name); if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) seq_puts(seq, ",sane_behavior"); @@ -1072,7 +1066,7 @@ static int cgroup_show_options(struct seq_file *seq, seq_printf(seq, ",release_agent=%s", root->release_agent_path); spin_unlock(&release_agent_path_lock); - if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags)) + if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) seq_puts(seq, ",clone_children"); if (strlen(root->name)) seq_printf(seq, ",name=%s", root->name); @@ -1245,7 +1239,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) { int ret = 0; - struct cgroupfs_root *root = cgroup_root_from_kf(kf_root); + struct cgroup_root *root = cgroup_root_from_kf(kf_root); struct cgroup_sb_opts opts; unsigned long added_mask, removed_mask; @@ -1262,12 +1256,12 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) if (ret) goto out_unlock; - if (opts.subsys_mask != root->top_cgroup.subsys_mask || opts.release_agent) + if (opts.subsys_mask != root->cgrp.subsys_mask || opts.release_agent) pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", task_tgid_nr(current), current->comm); - added_mask = opts.subsys_mask & ~root->top_cgroup.subsys_mask; - removed_mask = root->top_cgroup.subsys_mask & ~opts.subsys_mask; + added_mask = opts.subsys_mask & ~root->cgrp.subsys_mask; + removed_mask = root->cgrp.subsys_mask & ~opts.subsys_mask; /* Don't allow flags or name to change at remount */ if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) || @@ -1280,7 +1274,7 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) } /* remounting is not allowed for populated hierarchies */ - if (!list_empty(&root->top_cgroup.children)) { + if (!list_empty(&root->cgrp.children)) { ret = -EBUSY; goto out_unlock; } @@ -1289,7 +1283,7 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) if (ret) goto out_unlock; - rebind_subsystems(&cgroup_dummy_root, removed_mask); + rebind_subsystems(&cgrp_dfl_root, removed_mask); if (opts.release_agent) { spin_lock(&release_agent_path_lock); @@ -1368,10 +1362,10 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp) cgrp->dummy_css.cgroup = cgrp; } -static void init_cgroup_root(struct cgroupfs_root *root, +static void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts) { - struct cgroup *cgrp = &root->top_cgroup; + struct cgroup *cgrp = &root->cgrp; INIT_LIST_HEAD(&root->root_list); atomic_set(&root->nr_cgrps, 1); @@ -1385,13 +1379,13 @@ static void init_cgroup_root(struct cgroupfs_root *root, if (opts->name) strcpy(root->name, opts->name); if (opts->cpuset_clone_children) - set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags); + set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); } -static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask) +static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask) { LIST_HEAD(tmp_links); - struct cgroup *root_cgrp = &root->top_cgroup; + struct cgroup *root_cgrp = &root->cgrp; struct css_set *cset; int i, ret; @@ -1443,7 +1437,7 @@ static int cgroup_setup_root(struct cgroupfs_root *root, unsigned long ss_mask) cgroup_root_count++; /* - * Link the top cgroup in this hierarchy into all the css_set + * Link the root cgroup in this hierarchy into all the css_set * objects. */ down_write(&css_set_rwsem); @@ -1472,7 +1466,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data) { - struct cgroupfs_root *root; + struct cgroup_root *root; struct cgroup_sb_opts opts; struct dentry *dentry; int ret; @@ -1496,7 +1490,7 @@ retry: for_each_root(root) { bool name_match = false; - if (root == &cgroup_dummy_root) + if (root == &cgrp_dfl_root) continue; /* @@ -1515,7 +1509,7 @@ retry: * subsystems) then they must match. */ if ((opts.subsys_mask || opts.none) && - (opts.subsys_mask != root->top_cgroup.subsys_mask)) { + (opts.subsys_mask != root->cgrp.subsys_mask)) { if (!name_match) continue; ret = -EBUSY; @@ -1533,13 +1527,13 @@ retry: } /* - * A root's lifetime is governed by its top cgroup. Zero + * A root's lifetime is governed by its root cgroup. Zero * ref indicate that the root is being destroyed. Wait for * destruction to complete so that the subsystems are free. * We can use wait_queue for the wait but this path is * super cold. Let's just sleep for a bit and retry. */ - if (!atomic_inc_not_zero(&root->top_cgroup.refcnt)) { + if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); kfree(opts.release_agent); @@ -1586,16 +1580,16 @@ out_unlock: dentry = kernfs_mount(fs_type, flags, root->kf_root); if (IS_ERR(dentry)) - cgroup_put(&root->top_cgroup); + cgroup_put(&root->cgrp); return dentry; } static void cgroup_kill_sb(struct super_block *sb) { struct kernfs_root *kf_root = kernfs_root_from_sb(sb); - struct cgroupfs_root *root = cgroup_root_from_kf(kf_root); + struct cgroup_root *root = cgroup_root_from_kf(kf_root); - cgroup_put(&root->top_cgroup); + cgroup_put(&root->cgrp); kernfs_kill_sb(sb); } @@ -1622,7 +1616,7 @@ static struct kobject *cgroup_kobj; */ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) { - struct cgroupfs_root *root; + struct cgroup_root *root; struct cgroup *cgrp; int hierarchy_id = 1; char *path = NULL; @@ -2112,14 +2106,14 @@ out_unlock_cgroup: */ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) { - struct cgroupfs_root *root; + struct cgroup_root *root; int retval = 0; mutex_lock(&cgroup_mutex); for_each_root(root) { struct cgroup *from_cgrp; - if (root == &cgroup_dummy_root) + if (root == &cgrp_dfl_root) continue; down_read(&css_set_rwsem); @@ -2151,7 +2145,7 @@ static int cgroup_procs_write(struct cgroup_subsys_state *css, static int cgroup_release_agent_write(struct cgroup_subsys_state *css, struct cftype *cft, const char *buffer) { - struct cgroupfs_root *root = css->cgroup->root; + struct cgroup_root *root = css->cgroup->root; BUILD_BUG_ON(sizeof(root->release_agent_path) < PATH_MAX); if (!cgroup_lock_live_group(css->cgroup)) @@ -2362,14 +2356,14 @@ static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) { LIST_HEAD(pending); struct cgroup_subsys *ss = cfts[0].ss; - struct cgroup *root = &ss->root->top_cgroup; + struct cgroup *root = &ss->root->cgrp; struct cgroup_subsys_state *css; int ret = 0; lockdep_assert_held(&cgroup_tree_mutex); /* don't bother if @ss isn't attached */ - if (ss->root == &cgroup_dummy_root) + if (ss->root == &cgrp_dfl_root) return 0; /* add/rm files for all cgroups created before */ @@ -3623,7 +3617,7 @@ static long cgroup_create(struct cgroup *parent, const char *name, umode_t mode) { struct cgroup *cgrp; - struct cgroupfs_root *root = parent->root; + struct cgroup_root *root = parent->root; int ssid, err; struct cgroup_subsys *ss; struct kernfs_node *kn; @@ -3702,7 +3696,7 @@ static long cgroup_create(struct cgroup *parent, const char *name, /* let's create and online css's */ for_each_subsys(ss, ssid) { - if (root->top_cgroup.subsys_mask & (1 << ssid)) { + if (root->cgrp.subsys_mask & (1 << ssid)) { err = create_css(cgrp, ss); if (err) goto err_destroy; @@ -4031,17 +4025,17 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) INIT_LIST_HEAD(&ss->cfts); - /* Create the top cgroup state for this subsystem */ - ss->root = &cgroup_dummy_root; - css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss)); + /* Create the root cgroup state for this subsystem */ + ss->root = &cgrp_dfl_root; + css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss)); /* We don't handle early failures gracefully */ BUG_ON(IS_ERR(css)); - init_css(css, ss, cgroup_dummy_top); + init_css(css, ss, &cgrp_dfl_root.cgrp); /* Update the init_css_set to contain a subsys * pointer to this state - since the subsystem is * newly registered, all tasks and hence the - * init_css_set is in the subsystem's top cgroup. */ + * init_css_set is in the subsystem's root cgroup. */ init_css_set.subsys[ss->id] = css; need_forkexit_callback |= ss->fork || ss->exit; @@ -4053,7 +4047,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) BUG_ON(online_css(css)); - cgroup_dummy_root.top_cgroup.subsys_mask |= 1 << ss->id; + cgrp_dfl_root.cgrp.subsys_mask |= 1 << ss->id; mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); @@ -4071,7 +4065,7 @@ int __init cgroup_init_early(void) struct cgroup_subsys *ss; int i; - init_cgroup_root(&cgroup_dummy_root, &opts); + init_cgroup_root(&cgrp_dfl_root, &opts); RCU_INIT_POINTER(init_task.cgroups, &init_css_set); for_each_subsys(ss, i) { @@ -4112,7 +4106,7 @@ int __init cgroup_init(void) key = css_set_hash(init_css_set.subsys); hash_add(css_set_table, &init_css_set.hlist, key); - BUG_ON(cgroup_setup_root(&cgroup_dummy_root, 0)); + BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0)); mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); @@ -4181,7 +4175,7 @@ int proc_cgroup_show(struct seq_file *m, void *v) struct task_struct *tsk; char *buf, *path; int retval; - struct cgroupfs_root *root; + struct cgroup_root *root; retval = -ENOMEM; buf = kmalloc(PATH_MAX, GFP_KERNEL); @@ -4204,12 +4198,12 @@ int proc_cgroup_show(struct seq_file *m, void *v) struct cgroup *cgrp; int ssid, count = 0; - if (root == &cgroup_dummy_root) + if (root == &cgrp_dfl_root) continue; seq_printf(m, "%d:", root->hierarchy_id); for_each_subsys(ss, ssid) - if (root->top_cgroup.subsys_mask & (1 << ssid)) + if (root->cgrp.subsys_mask & (1 << ssid)) seq_printf(m, "%s%s", count++ ? "," : "", ss->name); if (strlen(root->name)) seq_printf(m, "%sname=%s", count ? "," : "", @@ -4639,7 +4633,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) struct cgroup *c = link->cgrp; const char *name = "?"; - if (c != cgroup_dummy_top) { + if (c != &cgrp_dfl_root.cgrp) { cgroup_name(c, name_buf, NAME_MAX + 1); name = name_buf; } -- cgit v1.2.3 From 4d3bb511b5f9980fc3e9ae5939ebc475b231d3fc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:54 -0400 Subject: cgroup: drop const from @buffer of cftype->write_string() cftype->write_string() just passes on the writeable buffer from kernfs and there's no reason to add const restriction on the buffer. The only thing const achieves is unnecessarily complicating parsing of the buffer. Drop const from @buffer. Signed-off-by: Tejun Heo Acked-by: Li Zefan Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Daniel Borkmann Cc: Michal Hocko Cc: Johannes Weiner Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki --- kernel/cgroup.c | 2 +- kernel/cgroup_freezer.c | 2 +- kernel/cpuset.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 60ea16058c42..f5754910e80b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2143,7 +2143,7 @@ static int cgroup_procs_write(struct cgroup_subsys_state *css, } static int cgroup_release_agent_write(struct cgroup_subsys_state *css, - struct cftype *cft, const char *buffer) + struct cftype *cft, char *buffer) { struct cgroup_root *root = css->cgroup->root; diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 2ea98b216bff..2bc4a2256444 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -442,7 +442,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze) } static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, - const char *buffer) + char *buffer) { bool freeze; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 8d5324583aa4..efbf9baf77ec 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1610,7 +1610,7 @@ out_unlock: * Common handling for a write to a "cpus" or "mems" file. */ static int cpuset_write_resmask(struct cgroup_subsys_state *css, - struct cftype *cft, const char *buf) + struct cftype *cft, char *buf) { struct cpuset *cs = css_cs(css); struct cpuset *trialcs; -- cgit v1.2.3 From a2dd424750807f83632a2a70293961086fd8f870 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:55 -0400 Subject: cgroup: make cgrp_dfl_root mountable cgrp_dfl_root will be used as the default unified hierarchy. This patch makes cgrp_dfl_root mountable by making the following changes. * cgroup_init_early() now initializes cgrp_dfl_root w/ CGRP_ROOT_SANE_BEHAVIOR. The default hierarchy is always sane. * parse_cgroupfs_options() and cgroup_mount() are updated such that cgrp_dfl_root is mounted if sane_behavior is specified w/o any subsystems. * rebind_subsystems() now populates the root directory of cgrp_dfl_root. Note that the function still guarantees success of rebinding subsystems to cgrp_dfl_root. If populating fails while rebinding to cgrp_dfl_root, it whines but ignores the error. * For backward compatibility, the default hierarchy shows up in /proc/$PID/cgroup only after it's explicitly mounted so that userland which doesn't make use of it doesn't see any change. * "current_css_set_cg_links" file of debug cgroup now treats the default hierarchy the same as other hierarchies. This is visible to userland. Given that it's for debug controller, this should be fine. * While at it, implement cgroup_on_dfl() which tests whether a give cgroup is on the default hierarchy or not. The above changes make cgrp_dfl_root mostly equivalent to other controllers but the actual unified hierarchy behaviors are not implemented yet. Let's plug child cgroup creation in cgrp_dfl_root from create_cgroup() for now. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 94 +++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 61 insertions(+), 33 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index f5754910e80b..69b4939e9f6d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -142,7 +142,13 @@ static const char *cgroup_subsys_name[] = { * unattached - it never has more than a single cgroup, and all tasks are * part of that cgroup. */ -static struct cgroup_root cgrp_dfl_root; +struct cgroup_root cgrp_dfl_root; + +/* + * The default hierarchy always exists but is hidden until mounted for the + * first time. This is for backward compatibility. + */ +static bool cgrp_dfl_root_visible; /* The list of hierarchy roots */ @@ -999,10 +1005,22 @@ static int rebind_subsystems(struct cgroup_root *dst_root, return -EBUSY; } - if (dst_root != &cgrp_dfl_root) { - ret = cgroup_populate_dir(&dst_root->cgrp, ss_mask); - if (ret) + ret = cgroup_populate_dir(&dst_root->cgrp, ss_mask); + if (ret) { + if (dst_root != &cgrp_dfl_root) return ret; + + /* + * Rebinding back to the default root is not allowed to + * fail. Using both default and non-default roots should + * be rare. Moving subsystems back and forth even more so. + * Just warn about it and continue. + */ + if (cgrp_dfl_root_visible) { + pr_warning("cgroup: failed to create files (%d) while rebinding 0x%lx to default root\n", + ret, ss_mask); + pr_warning("cgroup: you may retry by moving them to a different hierarchy and unbinding\n"); + } } /* @@ -1011,7 +1029,7 @@ static int rebind_subsystems(struct cgroup_root *dst_root, */ mutex_unlock(&cgroup_mutex); for_each_subsys(ss, ssid) - if ((ss_mask & (1 << ssid)) && ss->root != &cgrp_dfl_root) + if (ss_mask & (1 << ssid)) cgroup_clear_dir(&ss->root->cgrp, 1 << ssid); mutex_lock(&cgroup_mutex); @@ -1039,8 +1057,7 @@ static int rebind_subsystems(struct cgroup_root *dst_root, ss->bind(css); } - if (dst_root != &cgrp_dfl_root) - kernfs_activate(dst_root->cgrp.kn); + kernfs_activate(dst_root->cgrp.kn); return 0; } @@ -1190,16 +1207,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) return -ENOENT; } - /* - * If the 'all' option was specified select all the subsystems, - * otherwise if 'none', 'name=' and a subsystem name options - * were not specified, let's default to 'all' - */ - if (all_ss || (!one_ss && !opts->none && !opts->name)) - for_each_subsys(ss, i) - if (!ss->disabled) - set_bit(i, &opts->subsys_mask); - /* Consistency checks */ if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { @@ -1211,6 +1218,23 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) pr_err("cgroup: sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n"); return -EINVAL; } + } else { + /* + * If the 'all' option was specified select all the + * subsystems, otherwise if 'none', 'name=' and a subsystem + * name options were not specified, let's default to 'all' + */ + if (all_ss || (!one_ss && !opts->none && !opts->name)) + for_each_subsys(ss, i) + if (!ss->disabled) + set_bit(i, &opts->subsys_mask); + + /* + * We either have to specify by name or by subsystems. (So + * all empty hierarchies must have a name). + */ + if (!opts->subsys_mask && !opts->name) + return -EINVAL; } /* @@ -1226,13 +1250,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) if (opts->subsys_mask && opts->none) return -EINVAL; - /* - * We either have to specify by name or by subsystems. (So all - * empty hierarchies must have a name). - */ - if (!opts->subsys_mask && !opts->name) - return -EINVAL; - return 0; } @@ -1487,6 +1504,14 @@ retry: goto out_unlock; /* look for a matching existing root */ + if (!opts.subsys_mask && !opts.none && !opts.name) { + cgrp_dfl_root_visible = true; + root = &cgrp_dfl_root; + cgroup_get(&root->cgrp); + ret = 0; + goto out_unlock; + } + for_each_root(root) { bool name_match = false; @@ -3622,6 +3647,13 @@ static long cgroup_create(struct cgroup *parent, const char *name, struct cgroup_subsys *ss; struct kernfs_node *kn; + /* + * XXX: The default hierarchy isn't fully implemented yet. Block + * !root cgroup creation on it for now. + */ + if (root == &cgrp_dfl_root) + return -EINVAL; + /* allocate the cgroup and its ID, 0 is reserved for the root */ cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); if (!cgrp) @@ -4061,7 +4093,8 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss) */ int __init cgroup_init_early(void) { - static struct cgroup_sb_opts __initdata opts = { }; + static struct cgroup_sb_opts __initdata opts = + { .flags = CGRP_ROOT_SANE_BEHAVIOR }; struct cgroup_subsys *ss; int i; @@ -4198,7 +4231,7 @@ int proc_cgroup_show(struct seq_file *m, void *v) struct cgroup *cgrp; int ssid, count = 0; - if (root == &cgrp_dfl_root) + if (root == &cgrp_dfl_root && !cgrp_dfl_root_visible) continue; seq_printf(m, "%d:", root->hierarchy_id); @@ -4631,15 +4664,10 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) cset = rcu_dereference(current->cgroups); list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; - const char *name = "?"; - - if (c != &cgrp_dfl_root.cgrp) { - cgroup_name(c, name_buf, NAME_MAX + 1); - name = name_buf; - } + cgroup_name(c, name_buf, NAME_MAX + 1); seq_printf(seq, "Root %d group %s\n", - c->root->hierarchy_id, name); + c->root->hierarchy_id, name_buf); } rcu_read_unlock(); up_read(&css_set_rwsem); -- cgit v1.2.3 From 8cbbf2c972c4444cad36f61cd571714c39b8cf04 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 10:23:55 -0400 Subject: cgroup: implement CFTYPE_ONLY_ON_DFL This cftype flag makes the file only appear on the default hierarchy. This will later be used for cgroup.controllers file. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 69b4939e9f6d..37b6d534b0ca 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2356,6 +2356,8 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], for (cft = cfts; cft->name[0] != '\0'; cft++) { /* does cft->flags tell us to skip this file on @cgrp? */ + if ((cft->flags & CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) + continue; if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp)) continue; if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) -- cgit v1.2.3 From 1b9aba49eab5e85b0d3de8ba630cda6d68546297 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 19 Mar 2014 17:43:21 -0400 Subject: cgroup: fix cgroup_taskset walking order cgroup_taskset is used to track and iterate target tasks while migrating a task or process and should guarantee that the first task iterated is the task group leader if a process is being migrated. b3dc094e9390 ("cgroup: use css_set->mg_tasks to track target tasks during migration") replaced flex array cgroup_taskset->tc_array with css_set->mg_tasks list to remove process size limit and dynamic allocation during migration; unfortunately, it incorrectly used list operations which don't preserve order breaking the guarantee that cgroup_taskset_first() returns the leader for a process target. Fix it by using order preserving list operations. Note that as multiple src_csets may map to a single dst_cset, the iteration order may change across cgroup_task_migrate(); however, the leader is still guaranteed to be the first entry. The switch to list_splice_tail_init() at the end of cgroup_migrate() isn't strictly necessary. Let's still do it for consistency. Signed-off-by: Tejun Heo --- kernel/cgroup.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 37b6d534b0ca..98a8045e2149 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1761,7 +1761,14 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp, get_css_set(new_cset); rcu_assign_pointer(tsk->cgroups, new_cset); - list_move(&tsk->cg_list, &new_cset->mg_tasks); + + /* + * Use move_tail so that cgroup_taskset_first() still returns the + * leader after migration. This works because cgroup_migrate() + * ensures that the dst_cset of the leader is the first on the + * tset's dst_csets list. + */ + list_move_tail(&tsk->cg_list, &new_cset->mg_tasks); /* * We just gained a reference on old_cset by taking it from the @@ -1936,9 +1943,16 @@ static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, if (!cset->mg_src_cgrp) goto next; - list_move(&task->cg_list, &cset->mg_tasks); - list_move(&cset->mg_node, &tset.src_csets); - list_move(&cset->mg_dst_cset->mg_node, &tset.dst_csets); + /* + * cgroup_taskset_first() must always return the leader. + * Take care to avoid disturbing the ordering. + */ + list_move_tail(&task->cg_list, &cset->mg_tasks); + if (list_empty(&cset->mg_node)) + list_add_tail(&cset->mg_node, &tset.src_csets); + if (list_empty(&cset->mg_dst_cset->mg_node)) + list_move_tail(&cset->mg_dst_cset->mg_node, + &tset.dst_csets); next: if (!threadgroup) break; @@ -1999,7 +2013,7 @@ out_release_tset: down_write(&css_set_rwsem); list_splice_init(&tset.dst_csets, &tset.src_csets); list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) { - list_splice_init(&cset->mg_tasks, &cset->tasks); + list_splice_tail_init(&cset->mg_tasks, &cset->tasks); list_del_init(&cset->mg_node); } up_write(&css_set_rwsem); -- cgit v1.2.3 From a19423b98704aa85e84097be6d1d44a8615c2340 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Tue, 11 Mar 2014 02:04:03 +0530 Subject: CPU hotplug: Add lockdep annotations to get/put_online_cpus() Add lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin()/cpu_hotplug_end(). Cc: Ingo Molnar Reviewed-by: Oleg Nesterov Signed-off-by: Gautham R. Shenoy Signed-off-by: Srivatsa S. Bhat Signed-off-by: Rafael J. Wysocki --- kernel/cpu.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index deff2e693766..33caf5e97701 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "smpboot.h" @@ -57,17 +58,30 @@ static struct { * an ongoing cpu hotplug operation. */ int refcount; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif } cpu_hotplug = { .active_writer = NULL, .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), .refcount = 0, +#ifdef CONFIG_DEBUG_LOCK_ALLOC + .dep_map = {.name = "cpu_hotplug.lock" }, +#endif }; +/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ +#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) +#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) +#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) + void get_online_cpus(void) { might_sleep(); if (cpu_hotplug.active_writer == current) return; + cpuhp_lock_acquire_read(); mutex_lock(&cpu_hotplug.lock); cpu_hotplug.refcount++; mutex_unlock(&cpu_hotplug.lock); @@ -87,6 +101,7 @@ void put_online_cpus(void) if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) wake_up_process(cpu_hotplug.active_writer); mutex_unlock(&cpu_hotplug.lock); + cpuhp_lock_release(); } EXPORT_SYMBOL_GPL(put_online_cpus); @@ -117,6 +132,7 @@ void cpu_hotplug_begin(void) { cpu_hotplug.active_writer = current; + cpuhp_lock_acquire(); for (;;) { mutex_lock(&cpu_hotplug.lock); if (likely(!cpu_hotplug.refcount)) @@ -131,6 +147,7 @@ void cpu_hotplug_done(void) { cpu_hotplug.active_writer = NULL; mutex_unlock(&cpu_hotplug.lock); + cpuhp_lock_release(); } /* -- cgit v1.2.3 From 93ae4f978ca7f26d17df915ac7afc919c1dd0353 Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Tue, 11 Mar 2014 02:04:14 +0530 Subject: CPU hotplug: Provide lockless versions of callback registration functions The following method of CPU hotplug callback registration is not safe due to the possibility of an ABBA deadlock involving the cpu_add_remove_lock and the cpu_hotplug.lock. get_online_cpus(); for_each_online_cpu(cpu) init_cpu(cpu); register_cpu_notifier(&foobar_cpu_notifier); put_online_cpus(); The deadlock is shown below: CPU 0 CPU 1 ----- ----- Acquire cpu_hotplug.lock [via get_online_cpus()] CPU online/offline operation takes cpu_add_remove_lock [via cpu_maps_update_begin()] Try to acquire cpu_add_remove_lock [via register_cpu_notifier()] CPU online/offline operation tries to acquire cpu_hotplug.lock [via cpu_hotplug_begin()] *** DEADLOCK! *** The problem here is that callback registration takes the locks in one order whereas the CPU hotplug operations take the same locks in the opposite order. To avoid this issue and to provide a race-free method to register CPU hotplug callbacks (along with initialization of already online CPUs), introduce new variants of the callback registration APIs that simply register the callbacks without holding the cpu_add_remove_lock during the registration. That way, we can avoid the ABBA scenario. However, we will need to hold the cpu_add_remove_lock throughout the entire critical section, to protect updates to the callback/notifier chain. This can be achieved by writing the callback registration code as follows: cpu_maps_update_begin(); [ or cpu_notifier_register_begin(); see below ] for_each_online_cpu(cpu) init_cpu(cpu); /* This doesn't take the cpu_add_remove_lock */ __register_cpu_notifier(&foobar_cpu_notifier); cpu_maps_update_done(); [ or cpu_notifier_register_done(); see below ] Note that we can't use get_online_cpus() here instead of cpu_maps_update_begin() because the cpu_hotplug.lock is dropped during the invocation of CPU_POST_DEAD notifiers, and hence get_online_cpus() cannot provide the necessary synchronization to protect the callback/notifier chains against concurrent reads and writes. On the other hand, since the cpu_add_remove_lock protects the entire hotplug operation (including CPU_POST_DEAD), we can use cpu_maps_update_begin/done() to guarantee proper synchronization. Also, since cpu_maps_update_begin/done() is like a super-set of get/put_online_cpus(), the former naturally protects the critical sections from concurrent hotplug operations. Since the names cpu_maps_update_begin/done() don't make much sense in CPU hotplug callback registration scenarios, we'll introduce new APIs named cpu_notifier_register_begin/done() and map them to cpu_maps_update_begin/done(). In summary, introduce the lockless variants of un/register_cpu_notifier() and also export the cpu_notifier_register_begin/done() APIs for use by modules. This way, we provide a race-free way to register hotplug callbacks as well as perform initialization for the CPUs that are already online. Cc: Thomas Gleixner Cc: Andrew Morton Cc: Peter Zijlstra Cc: Ingo Molnar Acked-by: Oleg Nesterov Acked-by: Toshi Kani Reviewed-by: Gautham R. Shenoy Signed-off-by: Srivatsa S. Bhat Signed-off-by: Rafael J. Wysocki --- kernel/cpu.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 33caf5e97701..a9e710eef0e2 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -28,18 +28,23 @@ static DEFINE_MUTEX(cpu_add_remove_lock); /* - * The following two API's must be used when attempting - * to serialize the updates to cpu_online_mask, cpu_present_mask. + * The following two APIs (cpu_maps_update_begin/done) must be used when + * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. + * The APIs cpu_notifier_register_begin/done() must be used to protect CPU + * hotplug callback (un)registration performed using __register_cpu_notifier() + * or __unregister_cpu_notifier(). */ void cpu_maps_update_begin(void) { mutex_lock(&cpu_add_remove_lock); } +EXPORT_SYMBOL(cpu_notifier_register_begin); void cpu_maps_update_done(void) { mutex_unlock(&cpu_add_remove_lock); } +EXPORT_SYMBOL(cpu_notifier_register_done); static RAW_NOTIFIER_HEAD(cpu_chain); @@ -183,6 +188,11 @@ int __ref register_cpu_notifier(struct notifier_block *nb) return ret; } +int __ref __register_cpu_notifier(struct notifier_block *nb) +{ + return raw_notifier_chain_register(&cpu_chain, nb); +} + static int __cpu_notify(unsigned long val, void *v, int nr_to_call, int *nr_calls) { @@ -206,6 +216,7 @@ static void cpu_notify_nofail(unsigned long val, void *v) BUG_ON(cpu_notify(val, v)); } EXPORT_SYMBOL(register_cpu_notifier); +EXPORT_SYMBOL(__register_cpu_notifier); void __ref unregister_cpu_notifier(struct notifier_block *nb) { @@ -215,6 +226,12 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb) } EXPORT_SYMBOL(unregister_cpu_notifier); +void __ref __unregister_cpu_notifier(struct notifier_block *nb) +{ + raw_notifier_chain_unregister(&cpu_chain, nb); +} +EXPORT_SYMBOL(__unregister_cpu_notifier); + /** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * @cpu: a CPU id -- cgit v1.2.3 From d39ad278a3001c860da4d7c13e51259b1904bec5 Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Tue, 11 Mar 2014 02:11:56 +0530 Subject: trace, ring-buffer: Fix CPU hotplug callback registration Subsystems that want to register CPU hotplug callbacks, as well as perform initialization for the CPUs that are already online, often do it as shown below: get_online_cpus(); for_each_online_cpu(cpu) init_cpu(cpu); register_cpu_notifier(&foobar_cpu_notifier); put_online_cpus(); This is wrong, since it is prone to ABBA deadlocks involving the cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently with CPU hotplug operations). Instead, the correct and race-free way of performing the callback registration is: cpu_notifier_register_begin(); for_each_online_cpu(cpu) init_cpu(cpu); /* Note the use of the double underscored version of the API */ __register_cpu_notifier(&foobar_cpu_notifier); cpu_notifier_register_done(); Fix the tracing ring-buffer code by using this latter form of callback registration. Cc: Frederic Weisbecker Cc: Ingo Molnar Acked-by: Steven Rostedt Signed-off-by: Srivatsa S. Bhat Signed-off-by: Rafael J. Wysocki --- kernel/trace/ring_buffer.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index fc4da2d97f9b..c634868c2921 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1301,7 +1301,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, * In that off case, we need to allocate for all possible cpus. */ #ifdef CONFIG_HOTPLUG_CPU - get_online_cpus(); + cpu_notifier_register_begin(); cpumask_copy(buffer->cpumask, cpu_online_mask); #else cpumask_copy(buffer->cpumask, cpu_possible_mask); @@ -1324,10 +1324,10 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, #ifdef CONFIG_HOTPLUG_CPU buffer->cpu_notify.notifier_call = rb_cpu_notify; buffer->cpu_notify.priority = 0; - register_cpu_notifier(&buffer->cpu_notify); + __register_cpu_notifier(&buffer->cpu_notify); + cpu_notifier_register_done(); #endif - put_online_cpus(); mutex_init(&buffer->mutex); return buffer; @@ -1341,7 +1341,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, fail_free_cpumask: free_cpumask_var(buffer->cpumask); - put_online_cpus(); +#ifdef CONFIG_HOTPLUG_CPU + cpu_notifier_register_done(); +#endif fail_free_buffer: kfree(buffer); @@ -1358,16 +1360,17 @@ ring_buffer_free(struct ring_buffer *buffer) { int cpu; - get_online_cpus(); - #ifdef CONFIG_HOTPLUG_CPU - unregister_cpu_notifier(&buffer->cpu_notify); + cpu_notifier_register_begin(); + __unregister_cpu_notifier(&buffer->cpu_notify); #endif for_each_buffer_cpu(buffer, cpu) rb_free_cpu_buffer(buffer->buffers[cpu]); - put_online_cpus(); +#ifdef CONFIG_HOTPLUG_CPU + cpu_notifier_register_done(); +#endif kfree(buffer->buffers); free_cpumask_var(buffer->cpumask); -- cgit v1.2.3 From c270a817196a9374a2dc730624d1501dced40b4d Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Tue, 11 Mar 2014 02:12:08 +0530 Subject: profile: Fix CPU hotplug callback registration Subsystems that want to register CPU hotplug callbacks, as well as perform initialization for the CPUs that are already online, often do it as shown below: get_online_cpus(); for_each_online_cpu(cpu) init_cpu(cpu); register_cpu_notifier(&foobar_cpu_notifier); put_online_cpus(); This is wrong, since it is prone to ABBA deadlocks involving the cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently with CPU hotplug operations). Instead, the correct and race-free way of performing the callback registration is: cpu_notifier_register_begin(); for_each_online_cpu(cpu) init_cpu(cpu); /* Note the use of the double underscored version of the API */ __register_cpu_notifier(&foobar_cpu_notifier); cpu_notifier_register_done(); Fix the profile code by using this latter form of callback registration. Cc: Al Viro Cc: Mauro Carvalho Chehab Cc: Ingo Molnar Signed-off-by: Srivatsa S. Bhat Signed-off-by: Rafael J. Wysocki --- kernel/profile.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/profile.c b/kernel/profile.c index ebdd9c1a86b4..93b2a3fe0a64 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -591,18 +591,28 @@ out_cleanup: int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ { struct proc_dir_entry *entry; + int err = 0; if (!prof_on) return 0; - if (create_hash_tables()) - return -ENOMEM; + + cpu_notifier_register_begin(); + + if (create_hash_tables()) { + err = -ENOMEM; + goto out; + } + entry = proc_create("profile", S_IWUSR | S_IRUGO, NULL, &proc_profile_operations); if (!entry) - return 0; + goto out; proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); - hotcpu_notifier(profile_cpu_callback, 0); - return 0; + __hotcpu_notifier(profile_cpu_callback, 0); + +out: + cpu_notifier_register_done(); + return err; } module_init(create_proc_profile); #endif /* CONFIG_PROC_FS */ -- cgit v1.2.3 From 3f1c82502c299da08b7b7f08b435212e51166ed9 Mon Sep 17 00:00:00 2001 From: William Roberts Date: Tue, 11 Feb 2014 10:12:01 -0800 Subject: audit: Audit proc//cmdline aka proctitle During an audit event, cache and print the value of the process's proctitle value (proc//cmdline). This is useful in situations where processes are started via fork'd virtual machines where the comm field is incorrect. Often times, setting the comm field still is insufficient as the comm width is not very wide and most virtual machine "package names" do not fit. Also, during execution, many threads have their comm field set as well. By tying it back to the global cmdline value for the process, audit records will be more complete in systems with these properties. An example of where this is useful and applicable is in the realm of Android. With Android, their is no fork/exec for VM instances. The bare, preloaded Dalvik VM listens for a fork and specialize request. When this request comes in, the VM forks, and the loads the specific application (specializing). This was done to take advantage of COW and to not require a load of basic packages by the VM on very app spawn. When this spawn occurs, the package name is set via setproctitle() and shows up in procfs. Many of these package names are longer then 16 bytes, the historical width of task->comm. Having the cmdline in the audit records will couple the application back to the record directly. Also, on my Debian development box, some audit records were more useful then what was printed under comm. The cached proctitle is tied to the life-cycle of the audit_context structure and is built on demand. Proctitle is controllable by userspace, and thus should not be trusted. It is meant as an aid to assist in debugging. The proctitle event is emitted during syscall audits, and can be filtered with auditctl. Example: type=AVC msg=audit(1391217013.924:386): avc: denied { getattr } for pid=1971 comm="mkdir" name="/" dev="selinuxfs" ino=1 scontext=system_u:system_r:consolekit_t:s0-s0:c0.c255 tcontext=system_u:object_r:security_t:s0 tclass=filesystem type=SYSCALL msg=audit(1391217013.924:386): arch=c000003e syscall=137 success=yes exit=0 a0=7f019dfc8bd7 a1=7fffa6aed2c0 a2=fffffffffff4bd25 a3=7fffa6aed050 items=0 ppid=1967 pid=1971 auid=4294967295 uid=0 gid=0 euid=0 suid=0 fsuid=0 egid=0 sgid=0 fsgid=0 tty=(none) ses=4294967295 comm="mkdir" exe="/bin/mkdir" subj=system_u:system_r:consolekit_t:s0-s0:c0.c255 key=(null) type=UNKNOWN[1327] msg=audit(1391217013.924:386): proctitle=6D6B646972002D70002F7661722F72756E2F636F6E736F6C65 Acked-by: Steve Grubb (wrt record formating) Signed-off-by: William Roberts Signed-off-by: Eric Paris --- kernel/audit.h | 6 +++++ kernel/auditsc.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) (limited to 'kernel') diff --git a/kernel/audit.h b/kernel/audit.h index 57cc64d67718..38c967d28de5 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -106,6 +106,11 @@ struct audit_names { bool should_free; }; +struct audit_proctitle { + int len; /* length of the cmdline field. */ + char *value; /* the cmdline field */ +}; + /* The per-task audit context. */ struct audit_context { int dummy; /* must be the first element */ @@ -202,6 +207,7 @@ struct audit_context { } execve; }; int fds[2]; + struct audit_proctitle proctitle; #if AUDIT_DEBUG int put_count; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 6874c1fd453d..043d1ef9362f 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -70,6 +70,7 @@ #include #include #include +#include #include "audit.h" @@ -81,6 +82,9 @@ /* no execve audit message should be longer than this (userspace limits) */ #define MAX_EXECVE_AUDIT_LEN 7500 +/* max length to print of cmdline/proctitle value during audit */ +#define MAX_PROCTITLE_AUDIT_LEN 128 + /* number of audit rules */ int audit_n_rules; @@ -844,6 +848,13 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk, return context; } +static inline void audit_proctitle_free(struct audit_context *context) +{ + kfree(context->proctitle.value); + context->proctitle.value = NULL; + context->proctitle.len = 0; +} + static inline void audit_free_names(struct audit_context *context) { struct audit_names *n, *next; @@ -956,6 +967,7 @@ static inline void audit_free_context(struct audit_context *context) audit_free_aux(context); kfree(context->filterkey); kfree(context->sockaddr); + audit_proctitle_free(context); kfree(context); } @@ -1272,6 +1284,59 @@ static void show_special(struct audit_context *context, int *call_panic) audit_log_end(ab); } +static inline int audit_proctitle_rtrim(char *proctitle, int len) +{ + char *end = proctitle + len - 1; + while (end > proctitle && !isprint(*end)) + end--; + + /* catch the case where proctitle is only 1 non-print character */ + len = end - proctitle + 1; + len -= isprint(proctitle[len-1]) == 0; + return len; +} + +static void audit_log_proctitle(struct task_struct *tsk, + struct audit_context *context) +{ + int res; + char *buf; + char *msg = "(null)"; + int len = strlen(msg); + struct audit_buffer *ab; + + ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE); + if (!ab) + return; /* audit_panic or being filtered */ + + audit_log_format(ab, "proctitle="); + + /* Not cached */ + if (!context->proctitle.value) { + buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL); + if (!buf) + goto out; + /* Historically called this from procfs naming */ + res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN); + if (res == 0) { + kfree(buf); + goto out; + } + res = audit_proctitle_rtrim(buf, res); + if (res == 0) { + kfree(buf); + goto out; + } + context->proctitle.value = buf; + context->proctitle.len = res; + } + msg = context->proctitle.value; + len = context->proctitle.len; +out: + audit_log_n_untrustedstring(ab, msg, len); + audit_log_end(ab); +} + static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { int i, call_panic = 0; @@ -1389,6 +1454,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts audit_log_name(context, n, NULL, i++, &call_panic); } + audit_log_proctitle(tsk, context); + /* Send end of event record to help user space know we are finished */ ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE); if (ab) -- cgit v1.2.3 From 638a0fd2a062568c568661be0a780be8e8836d03 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 28 Feb 2014 10:49:05 -0800 Subject: audit: Use struct net not pid_t to remember the network namespce to reply in While reading through 3.14-rc1 I found a pretty siginficant mishandling of network namespaces in the recent audit changes. In struct audit_netlink_list and audit_reply add a reference to the network namespace of the caller and remove the userspace pid of the caller. This cleanly remembers the callers network namespace, and removes a huge class of races and nasty failure modes that can occur when attempting to relook up the callers network namespace from a pid_t (including the caller's network namespace changing, pid wraparound, and the pid simply not being present). Signed-off-by: "Eric W. Biederman" Acked-by: Richard Guy Briggs Signed-off-by: Eric Paris --- kernel/audit.c | 10 ++++++---- kernel/audit.h | 2 +- kernel/auditfilter.c | 3 ++- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 34c5a2310fbf..71fb41f393d7 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -182,7 +182,7 @@ struct audit_buffer { struct audit_reply { __u32 portid; - pid_t pid; + struct net *net; struct sk_buff *skb; }; @@ -500,7 +500,7 @@ int audit_send_list(void *_dest) { struct audit_netlink_list *dest = _dest; struct sk_buff *skb; - struct net *net = get_net_ns_by_pid(dest->pid); + struct net *net = dest->net; struct audit_net *aunet = net_generic(net, audit_net_id); /* wait for parent to finish and send an ACK */ @@ -510,6 +510,7 @@ int audit_send_list(void *_dest) while ((skb = __skb_dequeue(&dest->q)) != NULL) netlink_unicast(aunet->nlsk, skb, dest->portid, 0); + put_net(net); kfree(dest); return 0; @@ -543,7 +544,7 @@ out_kfree_skb: static int audit_send_reply_thread(void *arg) { struct audit_reply *reply = (struct audit_reply *)arg; - struct net *net = get_net_ns_by_pid(reply->pid); + struct net *net = reply->net; struct audit_net *aunet = net_generic(net, audit_net_id); mutex_lock(&audit_cmd_mutex); @@ -552,6 +553,7 @@ static int audit_send_reply_thread(void *arg) /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); + put_net(net); kfree(reply); return 0; } @@ -583,8 +585,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, if (!skb) goto out; + reply->net = get_net(current->nsproxy->net_ns); reply->portid = portid; - reply->pid = task_pid_vnr(current); reply->skb = skb; tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); diff --git a/kernel/audit.h b/kernel/audit.h index 38c967d28de5..7bb65730c890 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -253,7 +253,7 @@ extern void audit_panic(const char *message); struct audit_netlink_list { __u32 portid; - pid_t pid; + struct net *net; struct sk_buff_head q; }; diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 3152d1aea164..a0d470131fd0 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "audit.h" /* @@ -1085,8 +1086,8 @@ int audit_list_rules_send(__u32 portid, int seq) dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); if (!dest) return -ENOMEM; + dest->net = get_net(current->nsproxy->net_ns); dest->portid = portid; - dest->pid = task_pid_vnr(current); skb_queue_head_init(&dest->q); mutex_lock(&audit_filter_mutex); -- cgit v1.2.3 From 099dd235113700bbb476e572cd191ddb77b9af46 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Fri, 28 Feb 2014 20:36:55 -0800 Subject: audit: Send replies in the proper network namespace. In perverse cases of file descriptor passing the current network namespace of a process and the network namespace of a socket used by that socket may differ. Therefore use the network namespace of the appropiate socket to ensure replies always go to the appropiate socket. Signed-off-by: "Eric W. Biederman" Acked-by: Richard Guy Briggs Signed-off-by: Eric Paris --- kernel/audit.c | 21 ++++++++++----------- kernel/auditfilter.c | 7 +++++-- 2 files changed, 15 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 71fb41f393d7..7b44bd47759c 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -570,9 +570,11 @@ static int audit_send_reply_thread(void *arg) * Allocates an skb, builds the netlink message, and sends it to the port id. * No failure notifications. */ -static void audit_send_reply(__u32 portid, int seq, int type, int done, +static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, int multi, const void *payload, int size) { + u32 portid = NETLINK_CB(request_skb).portid; + struct net *net = sock_net(NETLINK_CB(request_skb).sk); struct sk_buff *skb; struct task_struct *tsk; struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), @@ -585,7 +587,7 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, if (!skb) goto out; - reply->net = get_net(current->nsproxy->net_ns); + reply->net = get_net(net); reply->portid = portid; reply->skb = skb; @@ -675,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb) seq = nlmsg_hdr(skb)->nlmsg_seq; - audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, - &af, sizeof(af)); + audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); return 0; } @@ -796,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) s.backlog = skb_queue_len(&audit_skb_queue); s.version = AUDIT_VERSION_LATEST; s.backlog_wait_time = audit_backlog_wait_time; - audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, - &s, sizeof(s)); + audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_SET: { @@ -907,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) seq, data, nlmsg_len(nlh)); break; case AUDIT_LIST_RULES: - err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); + err = audit_list_rules_send(skb, seq); break; case AUDIT_TRIM: audit_trim_trees(); @@ -972,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) memcpy(sig_data->ctx, ctx, len); security_release_secctx(ctx, len); } - audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, - 0, 0, sig_data, sizeof(*sig_data) + len); + audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, + sig_data, sizeof(*sig_data) + len); kfree(sig_data); break; case AUDIT_TTY_GET: { @@ -985,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) s.log_passwd = tsk->signal->audit_tty_log_passwd; spin_unlock(&tsk->sighand->siglock); - audit_send_reply(NETLINK_CB(skb).portid, seq, - AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); + audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_TTY_SET: { diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index a0d470131fd0..549bbb6e6597 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "audit.h" /* @@ -1071,8 +1072,10 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, * @portid: target portid for netlink audit messages * @seq: netlink audit message sequence (serial) number */ -int audit_list_rules_send(__u32 portid, int seq) +int audit_list_rules_send(struct sk_buff *request_skb, int seq) { + u32 portid = NETLINK_CB(request_skb).portid; + struct net *net = sock_net(NETLINK_CB(request_skb).sk); struct task_struct *tsk; struct audit_netlink_list *dest; int err = 0; @@ -1086,7 +1089,7 @@ int audit_list_rules_send(__u32 portid, int seq) dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); if (!dest) return -ENOMEM; - dest->net = get_net(current->nsproxy->net_ns); + dest->net = get_net(net); dest->portid = portid; skb_queue_head_init(&dest->q); -- cgit v1.2.3 From 4a3eb726d1543c4b616b9a0a4d4c53ddd276f5f4 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Tue, 18 Feb 2014 15:29:43 -0500 Subject: audit: rename the misleading audit_get_context() to audit_take_context() "get" usually implies incrementing a refcount into a structure to indicate a reference being held by another part of code. Change this function name to indicate it is in fact being taken from it, returning the value while clearing it in the supplying structure. Signed-off-by: Richard Guy Briggs --- kernel/auditsc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 043d1ef9362f..57bf178ca7d5 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -811,7 +811,8 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) rcu_read_unlock(); } -static inline struct audit_context *audit_get_context(struct task_struct *tsk, +/* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */ +static inline struct audit_context *audit_take_context(struct task_struct *tsk, int return_valid, long return_code) { @@ -1474,7 +1475,7 @@ void __audit_free(struct task_struct *tsk) { struct audit_context *context; - context = audit_get_context(tsk, 0, 0); + context = audit_take_context(tsk, 0, 0); if (!context) return; @@ -1568,7 +1569,7 @@ void __audit_syscall_exit(int success, long return_code) else success = AUDITSC_FAILURE; - context = audit_get_context(tsk, success, return_code); + context = audit_take_context(tsk, success, return_code); if (!context) return; -- cgit v1.2.3 From c92cdeb45eea38515e82187f48c2e4f435fb4e25 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Tue, 10 Dec 2013 22:10:41 -0500 Subject: audit: convert PPIDs to the inital PID namespace. sys_getppid() returns the parent pid of the current process in its own pid namespace. Since audit filters are based in the init pid namespace, a process could avoid a filter or trigger an unintended one by being in an alternate pid namespace or log meaningless information. Switch to task_ppid_nr() for PPIDs to anchor all audit filters in the init_pid_ns. (informed by ebiederman's 6c621b7e) Cc: stable@vger.kernel.org Cc: Eric W. Biederman Signed-off-by: Richard Guy Briggs --- kernel/audit.c | 4 ++-- kernel/auditsc.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 7b44bd47759c..e1e1b2137048 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1819,10 +1819,10 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) spin_unlock_irq(&tsk->sighand->siglock); audit_log_format(ab, - " ppid=%ld pid=%d auid=%u uid=%u gid=%u" + " ppid=%d pid=%d auid=%u uid=%u gid=%u" " euid=%u suid=%u fsuid=%u" " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", - sys_getppid(), + task_ppid_nr(tsk), tsk->pid, from_kuid(&init_user_ns, audit_get_loginuid(tsk)), from_kuid(&init_user_ns, cred->uid), diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 57bf178ca7d5..a6cf7ab56e61 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -465,7 +465,7 @@ static int audit_filter_rules(struct task_struct *tsk, case AUDIT_PPID: if (ctx) { if (!ctx->ppid) - ctx->ppid = sys_getppid(); + ctx->ppid = task_ppid_nr(tsk); result = audit_comparator(ctx->ppid, f->op, f->val); } break; -- cgit v1.2.3 From f1dc4867ff41b7bcca57fa19449d1fe7ad517ac1 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Wed, 11 Dec 2013 13:52:26 -0500 Subject: audit: anchor all pid references in the initial pid namespace Store and log all PIDs with reference to the initial PID namespace and use the access functions task_pid_nr() and task_tgid_nr() for task->pid and task->tgid. Cc: "Eric W. Biederman" (informed by ebiederman's c776b5d2) Signed-off-by: Richard Guy Briggs --- kernel/audit.c | 5 +++-- kernel/auditfilter.c | 17 ++++++++++++++++- kernel/auditsc.c | 16 +++++++++------- 3 files changed, 28 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index e1e1b2137048..5a096f8e28cb 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -649,6 +649,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) { int rc = 0; uid_t uid = from_kuid(&init_user_ns, current_uid()); + pid_t pid = task_tgid_nr(current); if (!audit_enabled && msg_type != AUDIT_USER_AVC) { *ab = NULL; @@ -658,7 +659,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type) *ab = audit_log_start(NULL, GFP_KERNEL, msg_type); if (unlikely(!*ab)) return rc; - audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid); + audit_log_format(*ab, "pid=%d uid=%u", pid, uid); audit_log_session_info(*ab); audit_log_task_context(*ab); @@ -1823,7 +1824,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) " euid=%u suid=%u fsuid=%u" " egid=%u sgid=%u fsgid=%u tty=%s ses=%u", task_ppid_nr(tsk), - tsk->pid, + task_pid_nr(tsk), from_kuid(&init_user_ns, audit_get_loginuid(tsk)), from_kuid(&init_user_ns, cred->uid), from_kgid(&init_user_ns, cred->gid), diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 549bbb6e6597..96c8a704f130 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -433,6 +433,19 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, f->val = 0; } + if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) { + struct pid *pid; + rcu_read_lock(); + pid = find_vpid(f->val); + if (!pid) { + rcu_read_unlock(); + err = -ESRCH; + goto exit_free; + } + f->val = pid_nr(pid); + rcu_read_unlock(); + } + err = audit_field_valid(entry, f); if (err) goto exit_free; @@ -1242,12 +1255,14 @@ static int audit_filter_user_rules(struct audit_krule *rule, int type, for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; + pid_t pid; int result = 0; u32 sid; switch (f->type) { case AUDIT_PID: - result = audit_comparator(task_pid_vnr(current), f->op, f->val); + pid = task_pid_nr(current); + result = audit_comparator(pid, f->op, f->val); break; case AUDIT_UID: result = audit_uid_comparator(current_uid(), f->op, f->uid); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index a6cf7ab56e61..6381f25ac3d4 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -457,10 +457,12 @@ static int audit_filter_rules(struct task_struct *tsk, struct audit_field *f = &rule->fields[i]; struct audit_names *n; int result = 0; + pid_t pid; switch (f->type) { case AUDIT_PID: - result = audit_comparator(tsk->pid, f->op, f->val); + pid = task_pid_nr(tsk); + result = audit_comparator(pid, f->op, f->val); break; case AUDIT_PPID: if (ctx) { @@ -2051,7 +2053,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, audit_log_format(ab, "pid=%d uid=%u" " old-auid=%u new-auid=%u old-ses=%u new-ses=%u" " res=%d", - current->pid, uid, + task_pid_nr(current), uid, oldloginuid, loginuid, oldsessionid, sessionid, !rc); audit_log_end(ab); @@ -2275,7 +2277,7 @@ void __audit_ptrace(struct task_struct *t) { struct audit_context *context = current->audit_context; - context->target_pid = t->pid; + context->target_pid = task_pid_nr(t); context->target_auid = audit_get_loginuid(t); context->target_uid = task_uid(t); context->target_sessionid = audit_get_sessionid(t); @@ -2300,7 +2302,7 @@ int __audit_signal_info(int sig, struct task_struct *t) if (audit_pid && t->tgid == audit_pid) { if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { - audit_sig_pid = tsk->pid; + audit_sig_pid = task_pid_nr(tsk); if (uid_valid(tsk->loginuid)) audit_sig_uid = tsk->loginuid; else @@ -2314,7 +2316,7 @@ int __audit_signal_info(int sig, struct task_struct *t) /* optimize the common case by putting first signal recipient directly * in audit_context */ if (!ctx->target_pid) { - ctx->target_pid = t->tgid; + ctx->target_pid = task_tgid_nr(t); ctx->target_auid = audit_get_loginuid(t); ctx->target_uid = t_uid; ctx->target_sessionid = audit_get_sessionid(t); @@ -2335,7 +2337,7 @@ int __audit_signal_info(int sig, struct task_struct *t) } BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS); - axp->target_pid[axp->pid_count] = t->tgid; + axp->target_pid[axp->pid_count] = task_tgid_nr(t); axp->target_auid[axp->pid_count] = audit_get_loginuid(t); axp->target_uid[axp->pid_count] = t_uid; axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); @@ -2435,7 +2437,7 @@ static void audit_log_task(struct audit_buffer *ab) from_kgid(&init_user_ns, gid), sessionid); audit_log_task_context(ab); - audit_log_format(ab, " pid=%d comm=", current->pid); + audit_log_format(ab, " pid=%d comm=", task_pid_nr(current)); audit_log_untrustedstring(ab, current->comm); if (mm) { down_read(&mm->mmap_sem); -- cgit v1.2.3 From 5a3cb3b6c3a07904bb66baf055b2eaf01198b1f9 Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Fri, 16 Aug 2013 00:04:46 -0400 Subject: audit: allow user processes to log from another PID namespace Still only permit the audit logging daemon and control to operate from the initial PID namespace, but allow processes to log from another PID namespace. Cc: "Eric W. Biederman" (informed by ebiederman's c776b5d2) Signed-off-by: Richard Guy Briggs --- kernel/audit.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 5a096f8e28cb..72c6e1cd6ef5 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -607,9 +607,8 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) { int err = 0; - /* Only support the initial namespaces for now. */ - if ((current_user_ns() != &init_user_ns) || - (task_active_pid_ns(current) != &init_pid_ns)) + /* Only support initial user namespace for now. */ + if ((current_user_ns() != &init_user_ns)) return -EPERM; switch (msg_type) { @@ -629,6 +628,11 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) case AUDIT_TTY_SET: case AUDIT_TRIM: case AUDIT_MAKE_EQUIV: + /* Only support auditd and auditctl in initial pid namespace + * for now. */ + if ((task_active_pid_ns(current) != &init_pid_ns)) + return -EPERM; + if (!capable(CAP_AUDIT_CONTROL)) err = -EPERM; break; -- cgit v1.2.3 From aa589a13b5d00d3c643ee4114d8cbc3addb4e99f Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Mon, 24 Feb 2014 12:31:11 -0500 Subject: audit: remove superfluous new- prefix in AUDIT_LOGIN messages The new- prefix on ses and auid are un-necessary and break ausearch. Signed-off-by: Richard Guy Briggs --- kernel/auditsc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 6381f25ac3d4..61ac3cf53f1d 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2051,7 +2051,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, if (!ab) return; audit_log_format(ab, "pid=%d uid=%u" - " old-auid=%u new-auid=%u old-ses=%u new-ses=%u" + " old-auid=%u auid=%u old-ses=%u ses=%u" " res=%d", task_pid_nr(current), uid, oldloginuid, loginuid, oldsessionid, sessionid, -- cgit v1.2.3 From ddfad8affdb73cc8df5890fef16d98d63ff3a6f0 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 19 Jan 2011 19:22:35 -0500 Subject: audit: include subject in login records The login uid change record does not include the selinux context of the task logging in. Add that information. (Updated from 2011-01: RHBZ:670328 -- RGB) Reported-by: Steve Grubb Acked-by: James Morris Signed-off-by: Eric Paris Signed-off-by: Aristeu Rozanski Signed-off-by: Richard Guy Briggs --- kernel/auditsc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 61ac3cf53f1d..bd3de52600ff 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2050,12 +2050,10 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); if (!ab) return; - audit_log_format(ab, "pid=%d uid=%u" - " old-auid=%u auid=%u old-ses=%u ses=%u" - " res=%d", - task_pid_nr(current), uid, - oldloginuid, loginuid, oldsessionid, sessionid, - !rc); + audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid); + audit_log_task_context(ab); + audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", + oldloginuid, loginuid, oldsessionid, sessionid, !rc); audit_log_end(ab); } -- cgit v1.2.3 From f12835276c3182f2b998d93dfd60100cf4b60c05 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Wed, 5 Mar 2014 16:29:55 -0500 Subject: audit: remove stray newlines from audit_log_lost messages Calling audit_log_lost with a \n in the format string leads to extra newlines in dmesg. That function will eventually call audit_panic which uses pr_err with an explicit \n included. Just make these calls match the others that lack \n. Reported-by: Jonathan Kamens Signed-off-by: Josh Boyer Signed-off-by: Richard Guy Briggs --- kernel/audit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index 72c6e1cd6ef5..c0696dcfed11 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -396,7 +396,7 @@ static void audit_printk_skb(struct sk_buff *skb) if (printk_ratelimit()) pr_notice("type=%d %s\n", nlh->nlmsg_type, data); else - audit_log_lost("printk limit exceeded\n"); + audit_log_lost("printk limit exceeded"); } audit_hold_skb(skb); @@ -412,7 +412,7 @@ static void kauditd_send_skb(struct sk_buff *skb) BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */ if (audit_pid) { pr_err("*NO* daemon at audit_pid=%d\n", audit_pid); - audit_log_lost("auditd disappeared\n"); + audit_log_lost("auditd disappeared"); audit_pid = 0; audit_sock = NULL; } -- cgit v1.2.3 From b7550787fe8b5beffb5f56fa11a87712d699d085 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 5 Mar 2014 14:34:36 -0800 Subject: audit: remove stray newline from audit_log_execve_info() audit_panic() call There's an unnecessary use of a \n in audit_panic. Signed-off-by: Richard Guy Briggs --- kernel/auditsc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/auditsc.c b/kernel/auditsc.c index bd3de52600ff..254ce2063d1d 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1173,7 +1173,7 @@ static void audit_log_execve_info(struct audit_context *context, */ buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); if (!buf) { - audit_panic("out of memory for argv string\n"); + audit_panic("out of memory for argv string"); return; } -- cgit v1.2.3 From 5e937a9ae9137899c6641d718bd3820861099a09 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 11 Mar 2014 12:48:43 -0400 Subject: syscall_get_arch: remove useless function arguments Every caller of syscall_get_arch() uses current for the task and no implementors of the function need args. So just get rid of both of those things. Admittedly, since these are inline functions we aren't wasting stack space, but it just makes the prototypes better. Signed-off-by: Eric Paris Cc: linux-arm-kernel@lists.infradead.org Cc: linux-mips@linux-mips.org Cc: linux390@de.ibm.com Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linux-arch@vger.kernel.org --- kernel/seccomp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/seccomp.c b/kernel/seccomp.c index b7a10048a32c..eda2da3df822 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -95,7 +95,7 @@ u32 seccomp_bpf_load(int off) if (off == BPF_DATA(nr)) return syscall_get_nr(current, regs); if (off == BPF_DATA(arch)) - return syscall_get_arch(current, regs); + return syscall_get_arch(); if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { unsigned long value; int arg = (off - BPF_DATA(args[0])) / sizeof(u64); @@ -351,7 +351,7 @@ static void seccomp_send_sigsys(int syscall, int reason) info.si_code = SYS_SECCOMP; info.si_call_addr = (void __user *)KSTK_EIP(current); info.si_errno = reason; - info.si_arch = syscall_get_arch(current, task_pt_regs(current)); + info.si_arch = syscall_get_arch(); info.si_syscall = syscall; force_sig_info(SIGSYS, &info, current); } -- cgit v1.2.3 From e1b2dc176f2d5be7952c47a4e4e8f3b06a90db1c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 20 Mar 2014 11:10:15 -0400 Subject: cgroup: break kernfs active_ref protection in cgroup directory operations cgroup_tree_mutex should nest above the kernfs active_ref protection; however, cgroup_create() and cgroup_rename() were grabbing cgroup_tree_mutex while under kernfs active_ref protection. This has actualy possibility to lead to deadlocks in case these operations race against cgroup_rmdir() which invokes kernfs_remove() on directory kernfs_node while holding cgroup_tree_mutex. Neither cgroup_create() or cgroup_rename() requires active_ref protection. The former already has enough synchronization through cgroup_lock_live_group() and the latter doesn't care, so this can be fixed by updating both functions to break all active_ref protections before grabbing cgroup_tree_mutex. While this patch fixes the immediate issue, it probably needs further work in the long term - kernfs directories should enable lockdep annotations and maybe the better way to handle this is marking directory nodes as not needing active_ref protection rather than breaking it in each operation. Signed-off-by: Tejun Heo Cc: Greg Kroah-Hartman --- kernel/cgroup.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 98a8045e2149..58c67b3060b5 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2324,6 +2324,14 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, if (cgroup_sane_behavior(cgrp)) return -EPERM; + /* + * We're gonna grab cgroup_tree_mutex which nests outside kernfs + * active_ref. kernfs_rename() doesn't require active_ref + * protection. Break them before grabbing cgroup_tree_mutex. + */ + kernfs_break_active_protection(new_parent); + kernfs_break_active_protection(kn); + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); @@ -2331,6 +2339,9 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); + + kernfs_unbreak_active_protection(kn); + kernfs_unbreak_active_protection(new_parent); return ret; } @@ -3778,8 +3789,22 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode) { struct cgroup *parent = parent_kn->priv; + int ret; - return cgroup_create(parent, name, mode); + /* + * cgroup_create() grabs cgroup_tree_mutex which nests outside + * kernfs active_ref and cgroup_create() already synchronizes + * properly against removal through cgroup_lock_live_group(). + * Break it before calling cgroup_create(). + */ + cgroup_get(parent); + kernfs_break_active_protection(parent_kn); + + ret = cgroup_create(parent, name, mode); + + kernfs_unbreak_active_protection(parent_kn); + cgroup_put(parent); + return ret; } /* -- cgit v1.2.3 From bc4c426ee2431d1f717004d3bbaacbd819b544fd Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Fri, 21 Mar 2014 08:23:38 -0400 Subject: Revert "tracing: Move event storage for array from macro to standalone function" I originally wrote commit 35bb4399bd0e to shrink the size of the overhead of tracepoints by several kilobytes. Later, I received a patch from Vaibhav Nagarnaik that fixed a bug in the same code that this commit touches. Not only did it fix a bug, it also removed code and shrunk the size of the overhead of trace events even more than this commit did. Since this commit is scheduled for 3.15 and Vaibhav's patch is already in mainline, I need to revert this patch in order to keep it from conflicting with Vaibhav's patch. Not to mention, Vaibhav's patch makes this patch obsolete. Link: http://lkml.kernel.org/r/20140320225637.0226041b@gandalf.local.home Cc: Vaibhav Nagarnaik Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 6 ++++++ kernel/trace/trace_export.c | 12 ++++++++---- kernel/trace/trace_output.c | 21 --------------------- 3 files changed, 14 insertions(+), 25 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index b8f73b333a3c..2f7b8e31e3a4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -27,6 +27,12 @@ DEFINE_MUTEX(event_mutex); +DEFINE_MUTEX(event_storage_mutex); +EXPORT_SYMBOL_GPL(event_storage_mutex); + +char event_storage[EVENT_STORAGE_SIZE]; +EXPORT_SYMBOL_GPL(event_storage); + LIST_HEAD(ftrace_events); static LIST_HEAD(ftrace_common_fields); diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 39c746c5ae73..7c3e3e72e2b6 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -96,10 +96,14 @@ static void __always_unused ____ftrace_check_##name(void) \ #define __array(type, item, len) \ do { \ BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ - ret = ftrace_event_define_field(event_call, #type, len, \ - #item, offsetof(typeof(field), item), \ - sizeof(field.item), \ - is_signed_type(type), filter_type); \ + mutex_lock(&event_storage_mutex); \ + snprintf(event_storage, sizeof(event_storage), \ + "%s[%d]", #type, len); \ + ret = trace_define_field(event_call, event_storage, #item, \ + offsetof(typeof(field), item), \ + sizeof(field.item), \ + is_signed_type(type), filter_type); \ + mutex_unlock(&event_storage_mutex); \ if (ret) \ return ret; \ } while (0); diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ee8d74840b88..ca0e79e2abaa 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -20,10 +20,6 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static int next_event_type = __TRACE_LAST_TYPE + 1; -#define EVENT_STORAGE_SIZE 128 -static DEFINE_MUTEX(event_storage_mutex); -static char event_storage[EVENT_STORAGE_SIZE]; - int trace_print_seq(struct seq_file *m, struct trace_seq *s) { int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; @@ -474,23 +470,6 @@ int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) } EXPORT_SYMBOL_GPL(ftrace_output_call); -int ftrace_event_define_field(struct ftrace_event_call *call, - char *type, int len, char *item, int offset, - int field_size, int sign, int filter) -{ - int ret; - - mutex_lock(&event_storage_mutex); - snprintf(event_storage, sizeof(event_storage), - "%s[%d]", type, len); - ret = trace_define_field(call, event_storage, item, offset, - field_size, sign, filter); - mutex_unlock(&event_storage_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(ftrace_event_define_field); - #ifdef CONFIG_KRETPROBES static inline const char *kretprobed(const char *name) { -- cgit v1.2.3 From 0dea6d52638b2693b18cd2ed8938b236e0789ddb Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Fri, 21 Mar 2014 01:19:01 -0400 Subject: tracepoint: Remove unused API functions After the following commit: commit b75ef8b44b1cb95f5a26484b0e2fe37a63b12b44 Author: Mathieu Desnoyers Date: Wed Aug 10 15:18:39 2011 -0400 Tracepoint: Dissociate from module mutex The following functions became unnecessary: - tracepoint_probe_register_noupdate, - tracepoint_probe_unregister_noupdate, - tracepoint_probe_update_all. In fact, none of the in-kernel tracers, nor LTTng, nor SystemTAP use them. Remove those. Moreover, the functions: - tracepoint_iter_start, - tracepoint_iter_next, - tracepoint_iter_stop, - tracepoint_iter_reset. are unused by in-kernel tracers, LTTng and SystemTAP. Remove those too. Link: http://lkml.kernel.org/r/1395379142-2118-2-git-send-email-mathieu.desnoyers@efficios.com Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- kernel/tracepoint.c | 222 ++-------------------------------------------------- 1 file changed, 5 insertions(+), 217 deletions(-) (limited to 'kernel') diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index e2a58a22b0f4..65d9f9459a75 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -67,10 +67,7 @@ struct tracepoint_entry { }; struct tp_probes { - union { - struct rcu_head rcu; - struct list_head list; - } u; + struct rcu_head rcu; struct tracepoint_func probes[0]; }; @@ -83,7 +80,7 @@ static inline void *allocate_probes(int count) static void rcu_free_old_probes(struct rcu_head *head) { - kfree(container_of(head, struct tp_probes, u.rcu)); + kfree(container_of(head, struct tp_probes, rcu)); } static inline void release_probes(struct tracepoint_func *old) @@ -91,7 +88,7 @@ static inline void release_probes(struct tracepoint_func *old) if (old) { struct tp_probes *tp_probes = container_of(old, struct tp_probes, probes[0]); - call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); + call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes); } } @@ -459,204 +456,11 @@ int tracepoint_probe_unregister(const char *name, void *probe, void *data) } EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); -static LIST_HEAD(old_probes); -static int need_update; - -static void tracepoint_add_old_probes(void *old) -{ - need_update = 1; - if (old) { - struct tp_probes *tp_probes = container_of(old, - struct tp_probes, probes[0]); - list_add(&tp_probes->u.list, &old_probes); - } -} - -/** - * tracepoint_probe_register_noupdate - register a probe but not connect - * @name: tracepoint name - * @probe: probe handler - * @data: probe private data - * - * caller must call tracepoint_probe_update_all() - */ -int tracepoint_probe_register_noupdate(const char *name, void *probe, - void *data) -{ - struct tracepoint_func *old; - - mutex_lock(&tracepoints_mutex); - old = tracepoint_add_probe(name, probe, data); - if (IS_ERR(old)) { - mutex_unlock(&tracepoints_mutex); - return PTR_ERR(old); - } - tracepoint_add_old_probes(old); - mutex_unlock(&tracepoints_mutex); - return 0; -} -EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); - -/** - * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect - * @name: tracepoint name - * @probe: probe function pointer - * @data: probe private data - * - * caller must call tracepoint_probe_update_all() - */ -int tracepoint_probe_unregister_noupdate(const char *name, void *probe, - void *data) -{ - struct tracepoint_func *old; - - mutex_lock(&tracepoints_mutex); - old = tracepoint_remove_probe(name, probe, data); - if (IS_ERR(old)) { - mutex_unlock(&tracepoints_mutex); - return PTR_ERR(old); - } - tracepoint_add_old_probes(old); - mutex_unlock(&tracepoints_mutex); - return 0; -} -EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate); - -/** - * tracepoint_probe_update_all - update tracepoints - */ -void tracepoint_probe_update_all(void) -{ - LIST_HEAD(release_probes); - struct tp_probes *pos, *next; - - mutex_lock(&tracepoints_mutex); - if (!need_update) { - mutex_unlock(&tracepoints_mutex); - return; - } - if (!list_empty(&old_probes)) - list_replace_init(&old_probes, &release_probes); - need_update = 0; - tracepoint_update_probes(); - mutex_unlock(&tracepoints_mutex); - list_for_each_entry_safe(pos, next, &release_probes, u.list) { - list_del(&pos->u.list); - call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); - } -} -EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); - -/** - * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. - * @tracepoint: current tracepoints (in), next tracepoint (out) - * @begin: beginning of the range - * @end: end of the range - * - * Returns whether a next tracepoint has been found (1) or not (0). - * Will return the first tracepoint in the range if the input tracepoint is - * NULL. - */ -static int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, - struct tracepoint * const *begin, struct tracepoint * const *end) -{ - if (!*tracepoint && begin != end) { - *tracepoint = begin; - return 1; - } - if (*tracepoint >= begin && *tracepoint < end) - return 1; - return 0; -} - -#ifdef CONFIG_MODULES -static void tracepoint_get_iter(struct tracepoint_iter *iter) -{ - int found = 0; - struct tp_module *iter_mod; - - /* Core kernel tracepoints */ - if (!iter->module) { - found = tracepoint_get_iter_range(&iter->tracepoint, - __start___tracepoints_ptrs, - __stop___tracepoints_ptrs); - if (found) - goto end; - } - /* Tracepoints in modules */ - mutex_lock(&tracepoints_mutex); - list_for_each_entry(iter_mod, &tracepoint_module_list, list) { - /* - * Sorted module list - */ - if (iter_mod < iter->module) - continue; - else if (iter_mod > iter->module) - iter->tracepoint = NULL; - found = tracepoint_get_iter_range(&iter->tracepoint, - iter_mod->tracepoints_ptrs, - iter_mod->tracepoints_ptrs - + iter_mod->num_tracepoints); - if (found) { - iter->module = iter_mod; - break; - } - } - mutex_unlock(&tracepoints_mutex); -end: - if (!found) - tracepoint_iter_reset(iter); -} -#else /* CONFIG_MODULES */ -static void tracepoint_get_iter(struct tracepoint_iter *iter) -{ - int found = 0; - - /* Core kernel tracepoints */ - found = tracepoint_get_iter_range(&iter->tracepoint, - __start___tracepoints_ptrs, - __stop___tracepoints_ptrs); - if (!found) - tracepoint_iter_reset(iter); -} -#endif /* CONFIG_MODULES */ - -void tracepoint_iter_start(struct tracepoint_iter *iter) -{ - tracepoint_get_iter(iter); -} -EXPORT_SYMBOL_GPL(tracepoint_iter_start); - -void tracepoint_iter_next(struct tracepoint_iter *iter) -{ - iter->tracepoint++; - /* - * iter->tracepoint may be invalid because we blindly incremented it. - * Make sure it is valid by marshalling on the tracepoints, getting the - * tracepoints from following modules if necessary. - */ - tracepoint_get_iter(iter); -} -EXPORT_SYMBOL_GPL(tracepoint_iter_next); - -void tracepoint_iter_stop(struct tracepoint_iter *iter) -{ -} -EXPORT_SYMBOL_GPL(tracepoint_iter_stop); - -void tracepoint_iter_reset(struct tracepoint_iter *iter) -{ -#ifdef CONFIG_MODULES - iter->module = NULL; -#endif /* CONFIG_MODULES */ - iter->tracepoint = NULL; -} -EXPORT_SYMBOL_GPL(tracepoint_iter_reset); #ifdef CONFIG_MODULES static int tracepoint_module_coming(struct module *mod) { - struct tp_module *tp_mod, *iter; + struct tp_module *tp_mod; int ret = 0; if (!mod->num_tracepoints) @@ -677,23 +481,7 @@ static int tracepoint_module_coming(struct module *mod) } tp_mod->num_tracepoints = mod->num_tracepoints; tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; - - /* - * tracepoint_module_list is kept sorted by struct module pointer - * address for iteration on tracepoints from a seq_file that can release - * the mutex between calls. - */ - list_for_each_entry_reverse(iter, &tracepoint_module_list, list) { - BUG_ON(iter == tp_mod); /* Should never be in the list twice */ - if (iter < tp_mod) { - /* We belong to the location right after iter. */ - list_add(&tp_mod->list, &iter->list); - goto module_added; - } - } - /* We belong to the beginning of the list */ - list_add(&tp_mod->list, &tracepoint_module_list); -module_added: + list_add_tail(&tp_mod->list, &tracepoint_module_list); tracepoint_update_probe_range(mod->tracepoints_ptrs, mod->tracepoints_ptrs + mod->num_tracepoints); end: -- cgit v1.2.3 From 01a971406177c2ca9834be6914a67e20f463a3e6 Mon Sep 17 00:00:00 2001 From: Monam Agarwal Date: Mon, 24 Mar 2014 00:17:18 +0530 Subject: cgroup: Use RCU_INIT_POINTER(x, NULL) in cgroup.c This patch replaces rcu_assign_pointer(x, NULL) with RCU_INIT_POINTER(x, NULL) The rcu_assign_pointer() ensures that the initialization of a structure is carried out before storing a pointer to that structure. And in the case of the NULL pointer, there is no structure to initialize. So, rcu_assign_pointer(p, NULL) can be safely converted to RCU_INIT_POINTER(p, NULL) Signed-off-by: Monam Agarwal Signed-off-by: Tejun Heo --- kernel/cgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 58c67b3060b5..e378cb2fac5e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3545,7 +3545,7 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - rcu_assign_pointer(css->cgroup->subsys[css->ss->id], NULL); + RCU_INIT_POINTER(css->cgroup->subsys[css->ss->id], NULL); call_rcu(&css->rcu_head, css_free_rcu_fn); } -- cgit v1.2.3 From 3862807880acc0adaef6749738d210c9f45c3049 Mon Sep 17 00:00:00 2001 From: Aaron Tomlin Date: Mon, 24 Mar 2014 14:03:57 +0000 Subject: tracing: Add BUG_ON when stack end location is over written It is difficult to detect a stack overrun when it actually occurs. We have observed that this type of corruption is often silent and can go unnoticed. Once the corrupted region is examined, the outcome is undefined and often results in sporadic system crashes. When the stack tracing feature is enabled, let's check for this condition and take appropriate action. Note: init_task doesn't get its stack end location set to STACK_END_MAGIC. Link: http://lkml.kernel.org/r/1395669837-30209-1-git-send-email-atomlin@redhat.com Signed-off-by: Aaron Tomlin Signed-off-by: Steven Rostedt --- kernel/trace/trace_stack.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index e6be585cf06a..21b320e5d163 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -144,6 +145,8 @@ check_stack(unsigned long ip, unsigned long *stack) i++; } + BUG_ON(current != &init_task && + *(end_of_stack(current)) != STACK_END_MAGIC); out: arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); -- cgit v1.2.3 From e231d54c1239ccf31aaee311bed0c4d1937cae2c Mon Sep 17 00:00:00 2001 From: Monam Agarwal Date: Mon, 24 Mar 2014 00:16:19 +0530 Subject: kernel: Use RCU_INIT_POINTER(x, NULL) in audit.c This patch replaces rcu_assign_pointer(x, NULL) with RCU_INIT_POINTER(x, NULL) The rcu_assign_pointer() ensures that the initialization of a structure is carried out before storing a pointer to that structure. And in the case of the NULL pointer, there is no structure to initialize. So, rcu_assign_pointer(p, NULL) can be safely converted to RCU_INIT_POINTER(p, NULL) Signed-off-by: Monam Agarwal Signed-off-by: Eric Paris --- kernel/audit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index c0696dcfed11..ad77d1e80895 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -1092,7 +1092,7 @@ static void __net_exit audit_net_exit(struct net *net) audit_sock = NULL; } - rcu_assign_pointer(aunet->nlsk, NULL); + RCU_INIT_POINTER(aunet->nlsk, NULL); synchronize_net(); netlink_kernel_release(sock); } -- cgit v1.2.3 From e8604cb43690b781f9a7ad4a770f3e10259fe939 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 28 Mar 2014 15:18:27 +0800 Subject: cgroup: fix spurious lockdep warning in cgroup_exit() cgroup_exit() is called in fork and exit path. If it's called in the failure path during fork, PF_EXITING isn't set, and then lockdep will complain. Fix this by removing cgroup_exit() in that failure path. cgroup_fork() does nothing that needs cleanup. Reported-by: Sasha Levin Signed-off-by: Li Zefan Signed-off-by: Tejun Heo --- kernel/cgroup.c | 3 +-- kernel/fork.c | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e378cb2fac5e..60fd6f1f6d4e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4431,8 +4431,7 @@ void cgroup_post_fork(struct task_struct *child) * notify_on_release(), then leave the task attached to the root cgroup in * each hierarchy for the remainder of its exit. No need to bother with * init_css_set refcnting. init_css_set never goes away and we can't race - * with migration path - either PF_EXITING is visible to migration path or - * @tsk never got on the tasklist. + * with migration path - PF_EXITING is visible to migration path. */ void cgroup_exit(struct task_struct *tsk, int run_callbacks) { diff --git a/kernel/fork.c b/kernel/fork.c index a17621c6cd42..8852b3463ab7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1271,7 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; - goto bad_fork_cleanup_cgroup; + goto bad_fork_cleanup_threadgroup_lock; } mpol_fix_fork_child_flag(p); #endif @@ -1524,11 +1524,10 @@ bad_fork_cleanup_policy: perf_event_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); -bad_fork_cleanup_cgroup: +bad_fork_cleanup_threadgroup_lock: #endif if (clone_flags & CLONE_THREAD) threadgroup_change_end(current); - cgroup_exit(p, 0); delayacct_tsk_free(p); module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: -- cgit v1.2.3 From 1ec41830e087cda1f62dda4182c2b62811eb0ffc Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 28 Mar 2014 15:22:19 +0800 Subject: cgroup: remove useless argument from cgroup_exit() Signed-off-by: Li Zefan Signed-off-by: Tejun Heo --- kernel/cgroup.c | 5 ++--- kernel/exit.c | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 60fd6f1f6d4e..f7f94322d312 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4416,7 +4416,6 @@ void cgroup_post_fork(struct task_struct *child) /** * cgroup_exit - detach cgroup from exiting task * @tsk: pointer to task_struct of exiting process - * @run_callback: run exit callbacks? * * Description: Detach cgroup from @tsk and release it. * @@ -4433,7 +4432,7 @@ void cgroup_post_fork(struct task_struct *child) * init_css_set refcnting. init_css_set never goes away and we can't race * with migration path - PF_EXITING is visible to migration path. */ -void cgroup_exit(struct task_struct *tsk, int run_callbacks) +void cgroup_exit(struct task_struct *tsk) { struct cgroup_subsys *ss; struct css_set *cset; @@ -4455,7 +4454,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) cset = task_css_set(tsk); RCU_INIT_POINTER(tsk->cgroups, &init_css_set); - if (run_callbacks && need_forkexit_callback) { + if (need_forkexit_callback) { /* see cgroup_post_fork() for details */ for_each_subsys(ss, i) { if (ss->exit) { diff --git a/kernel/exit.c b/kernel/exit.c index 1e77fc645317..6480d1c85d7a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -797,7 +797,7 @@ void do_exit(long code) */ perf_event_exit_task(tsk); - cgroup_exit(tsk, 1); + cgroup_exit(tsk); if (group_dead) disassociate_ctty(1); -- cgit v1.2.3 From 57673c2b0baa900dddae3b9eb3d7748ebf550eb3 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 31 Mar 2014 14:39:57 +1030 Subject: Use 'E' instead of 'X' for unsigned module taint flag. Takashi Iwai says: > The letter 'X' has been already used for SUSE kernels for very long > time, to indicate the external supported modules. Can the new flag be > changed to another letter for avoiding conflict...? > (BTW, we also use 'N' for "no support", too.) Note: this code should be cleaned up, so we don't have such maps in three places! Signed-off-by: Rusty Russell --- kernel/module.c | 2 +- kernel/panic.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index c1acb0c5b637..5806e096d110 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1014,7 +1014,7 @@ static size_t module_flags_taint(struct module *mod, char *buf) if (mod->taints & (1 << TAINT_CRAP)) buf[l++] = 'C'; if (mod->taints & (1 << TAINT_UNSIGNED_MODULE)) - buf[l++] = 'X'; + buf[l++] = 'E'; /* * TAINT_FORCED_RMMOD: could be added. * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't diff --git a/kernel/panic.c b/kernel/panic.c index 0e25fe10871e..02b6c9f0171b 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -210,7 +210,7 @@ static const struct tnt tnts[] = { { TAINT_CRAP, 'C', ' ' }, { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' }, { TAINT_OOT_MODULE, 'O', ' ' }, - { TAINT_UNSIGNED_MODULE, 'X', ' ' }, + { TAINT_UNSIGNED_MODULE, 'E', ' ' }, }; /** @@ -229,7 +229,7 @@ static const struct tnt tnts[] = { * 'C' - modules from drivers/staging are loaded. * 'I' - Working around severe firmware bug. * 'O' - Out-of-tree module has been loaded. - * 'X' - Unsigned module has been loaded. + * 'E' - Unsigned module has been loaded. * * The string is overwritten by the next call to print_tainted(). */ -- cgit v1.2.3 From bd4cf0ed331a275e9bf5a49e6d0fd55dffc551b8 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 28 Mar 2014 18:58:25 +0100 Subject: net: filter: rework/optimize internal BPF interpreter's instruction set This patch replaces/reworks the kernel-internal BPF interpreter with an optimized BPF instruction set format that is modelled closer to mimic native instruction sets and is designed to be JITed with one to one mapping. Thus, the new interpreter is noticeably faster than the current implementation of sk_run_filter(); mainly for two reasons: 1. Fall-through jumps: BPF jump instructions are forced to go either 'true' or 'false' branch which causes branch-miss penalty. The new BPF jump instructions have only one branch and fall-through otherwise, which fits the CPU branch predictor logic better. `perf stat` shows drastic difference for branch-misses between the old and new code. 2. Jump-threaded implementation of interpreter vs switch statement: Instead of single table-jump at the top of 'switch' statement, gcc will now generate multiple table-jump instructions, which helps CPU branch predictor logic. Note that the verification of filters is still being done through sk_chk_filter() in classical BPF format, so filters from user- or kernel space are verified in the same way as we do now, and same restrictions/constraints hold as well. We reuse current BPF JIT compilers in a way that this upgrade would even be fine as is, but nevertheless allows for a successive upgrade of BPF JIT compilers to the new format. The internal instruction set migration is being done after the probing for JIT compilation, so in case JIT compilers are able to create a native opcode image, we're going to use that, and in all other cases we're doing a follow-up migration of the BPF program's instruction set, so that it can be transparently run in the new interpreter. In short, the *internal* format extends BPF in the following way (more details can be taken from the appended documentation): - Number of registers increase from 2 to 10 - Register width increases from 32-bit to 64-bit - Conditional jt/jf targets replaced with jt/fall-through - Adds signed > and >= insns - 16 4-byte stack slots for register spill-fill replaced with up to 512 bytes of multi-use stack space - Introduction of bpf_call insn and register passing convention for zero overhead calls from/to other kernel functions - Adds arithmetic right shift and endianness conversion insns - Adds atomic_add insn - Old tax/txa insns are replaced with 'mov dst,src' insn Performance of two BPF filters generated by libpcap resp. bpf_asm was measured on x86_64, i386 and arm32 (other libpcap programs have similar performance differences): fprog #1 is taken from Documentation/networking/filter.txt: tcpdump -i eth0 port 22 -dd fprog #2 is taken from 'man tcpdump': tcpdump -i eth0 'tcp port 22 and (((ip[2:2] - ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0)' -dd Raw performance data from BPF micro-benchmark: SK_RUN_FILTER on the same SKB (cache-hit) or 10k SKBs (cache-miss); time in ns per call, smaller is better: --x86_64-- fprog #1 fprog #1 fprog #2 fprog #2 cache-hit cache-miss cache-hit cache-miss old BPF 90 101 192 202 new BPF 31 71 47 97 old BPF jit 12 34 17 44 new BPF jit TBD --i386-- fprog #1 fprog #1 fprog #2 fprog #2 cache-hit cache-miss cache-hit cache-miss old BPF 107 136 227 252 new BPF 40 119 69 172 --arm32-- fprog #1 fprog #1 fprog #2 fprog #2 cache-hit cache-miss cache-hit cache-miss old BPF 202 300 475 540 new BPF 180 270 330 470 old BPF jit 26 182 37 202 new BPF jit TBD Thus, without changing any userland BPF filters, applications on top of AF_PACKET (or other families) such as libpcap/tcpdump, cls_bpf classifier, netfilter's xt_bpf, team driver's load-balancing mode, and many more will have better interpreter filtering performance. While we are replacing the internal BPF interpreter, we also need to convert seccomp BPF in the same step to make use of the new internal structure since it makes use of lower-level API details without being further decoupled through higher-level calls like sk_unattached_filter_{create,destroy}(), for example. Just as for normal socket filtering, also seccomp BPF experiences a time-to-verdict speedup: 05-sim-long_jumps.c of libseccomp was used as micro-benchmark: seccomp_rule_add_exact(ctx,... seccomp_rule_add_exact(ctx,... rc = seccomp_load(ctx); for (i = 0; i < 10000000; i++) syscall(199, 100); 'short filter' has 2 rules 'large filter' has 200 rules 'short filter' performance is slightly better on x86_64/i386/arm32 'large filter' is much faster on x86_64 and i386 and shows no difference on arm32 --x86_64-- short filter old BPF: 2.7 sec 39.12% bench libc-2.15.so [.] syscall 8.10% bench [kernel.kallsyms] [k] sk_run_filter 6.31% bench [kernel.kallsyms] [k] system_call 5.59% bench [kernel.kallsyms] [k] trace_hardirqs_on_caller 4.37% bench [kernel.kallsyms] [k] trace_hardirqs_off_caller 3.70% bench [kernel.kallsyms] [k] __secure_computing 3.67% bench [kernel.kallsyms] [k] lock_is_held 3.03% bench [kernel.kallsyms] [k] seccomp_bpf_load new BPF: 2.58 sec 42.05% bench libc-2.15.so [.] syscall 6.91% bench [kernel.kallsyms] [k] system_call 6.25% bench [kernel.kallsyms] [k] trace_hardirqs_on_caller 6.07% bench [kernel.kallsyms] [k] __secure_computing 5.08% bench [kernel.kallsyms] [k] sk_run_filter_int_seccomp --arm32-- short filter old BPF: 4.0 sec 39.92% bench [kernel.kallsyms] [k] vector_swi 16.60% bench [kernel.kallsyms] [k] sk_run_filter 14.66% bench libc-2.17.so [.] syscall 5.42% bench [kernel.kallsyms] [k] seccomp_bpf_load 5.10% bench [kernel.kallsyms] [k] __secure_computing new BPF: 3.7 sec 35.93% bench [kernel.kallsyms] [k] vector_swi 21.89% bench libc-2.17.so [.] syscall 13.45% bench [kernel.kallsyms] [k] sk_run_filter_int_seccomp 6.25% bench [kernel.kallsyms] [k] __secure_computing 3.96% bench [kernel.kallsyms] [k] syscall_trace_exit --x86_64-- large filter old BPF: 8.6 seconds 73.38% bench [kernel.kallsyms] [k] sk_run_filter 10.70% bench libc-2.15.so [.] syscall 5.09% bench [kernel.kallsyms] [k] seccomp_bpf_load 1.97% bench [kernel.kallsyms] [k] system_call new BPF: 5.7 seconds 66.20% bench [kernel.kallsyms] [k] sk_run_filter_int_seccomp 16.75% bench libc-2.15.so [.] syscall 3.31% bench [kernel.kallsyms] [k] system_call 2.88% bench [kernel.kallsyms] [k] __secure_computing --i386-- large filter old BPF: 5.4 sec new BPF: 3.8 sec --arm32-- large filter old BPF: 13.5 sec 73.88% bench [kernel.kallsyms] [k] sk_run_filter 10.29% bench [kernel.kallsyms] [k] vector_swi 6.46% bench libc-2.17.so [.] syscall 2.94% bench [kernel.kallsyms] [k] seccomp_bpf_load 1.19% bench [kernel.kallsyms] [k] __secure_computing 0.87% bench [kernel.kallsyms] [k] sys_getuid new BPF: 13.5 sec 76.08% bench [kernel.kallsyms] [k] sk_run_filter_int_seccomp 10.98% bench [kernel.kallsyms] [k] vector_swi 5.87% bench libc-2.17.so [.] syscall 1.77% bench [kernel.kallsyms] [k] __secure_computing 0.93% bench [kernel.kallsyms] [k] sys_getuid BPF filters generated by seccomp are very branchy, so the new internal BPF performance is better than the old one. Performance gains will be even higher when BPF JIT is committed for the new structure, which is planned in future work (as successive JIT migrations). BPF has also been stress-tested with trinity's BPF fuzzer. Joint work with Daniel Borkmann. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Cc: Hagen Paul Pfeifer Cc: Kees Cook Cc: Paul Moore Cc: Ingo Molnar Cc: H. Peter Anvin Cc: linux-kernel@vger.kernel.org Acked-by: Kees Cook Signed-off-by: David S. Miller --- kernel/seccomp.c | 119 +++++++++++++++++++++++++++---------------------------- 1 file changed, 58 insertions(+), 61 deletions(-) (limited to 'kernel') diff --git a/kernel/seccomp.c b/kernel/seccomp.c index b7a10048a32c..4f18e754c23e 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -55,60 +55,33 @@ struct seccomp_filter { atomic_t usage; struct seccomp_filter *prev; unsigned short len; /* Instruction count */ - struct sock_filter insns[]; + struct sock_filter_int insnsi[]; }; /* Limit any path through the tree to 256KB worth of instructions. */ #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) -/** - * get_u32 - returns a u32 offset into data - * @data: a unsigned 64 bit value - * @index: 0 or 1 to return the first or second 32-bits - * - * This inline exists to hide the length of unsigned long. If a 32-bit - * unsigned long is passed in, it will be extended and the top 32-bits will be - * 0. If it is a 64-bit unsigned long, then whatever data is resident will be - * properly returned. - * +/* * Endianness is explicitly ignored and left for BPF program authors to manage * as per the specific architecture. */ -static inline u32 get_u32(u64 data, int index) +static void populate_seccomp_data(struct seccomp_data *sd) { - return ((u32 *)&data)[index]; -} + struct task_struct *task = current; + struct pt_regs *regs = task_pt_regs(task); -/* Helper for bpf_load below. */ -#define BPF_DATA(_name) offsetof(struct seccomp_data, _name) -/** - * bpf_load: checks and returns a pointer to the requested offset - * @off: offset into struct seccomp_data to load from - * - * Returns the requested 32-bits of data. - * seccomp_check_filter() should assure that @off is 32-bit aligned - * and not out of bounds. Failure to do so is a BUG. - */ -u32 seccomp_bpf_load(int off) -{ - struct pt_regs *regs = task_pt_regs(current); - if (off == BPF_DATA(nr)) - return syscall_get_nr(current, regs); - if (off == BPF_DATA(arch)) - return syscall_get_arch(current, regs); - if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { - unsigned long value; - int arg = (off - BPF_DATA(args[0])) / sizeof(u64); - int index = !!(off % sizeof(u64)); - syscall_get_arguments(current, regs, arg, 1, &value); - return get_u32(value, index); - } - if (off == BPF_DATA(instruction_pointer)) - return get_u32(KSTK_EIP(current), 0); - if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) - return get_u32(KSTK_EIP(current), 1); - /* seccomp_check_filter should make this impossible. */ - BUG(); + sd->nr = syscall_get_nr(task, regs); + sd->arch = syscall_get_arch(task, regs); + + /* Unroll syscall_get_args to help gcc on arm. */ + syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); + syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); + syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); + syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); + syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); + syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]); + + sd->instruction_pointer = KSTK_EIP(task); } /** @@ -133,17 +106,17 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) switch (code) { case BPF_S_LD_W_ABS: - ftest->code = BPF_S_ANC_SECCOMP_LD_W; + ftest->code = BPF_LDX | BPF_W | BPF_ABS; /* 32-bit aligned and not out of bounds. */ if (k >= sizeof(struct seccomp_data) || k & 3) return -EINVAL; continue; case BPF_S_LD_W_LEN: - ftest->code = BPF_S_LD_IMM; + ftest->code = BPF_LD | BPF_IMM; ftest->k = sizeof(struct seccomp_data); continue; case BPF_S_LDX_W_LEN: - ftest->code = BPF_S_LDX_IMM; + ftest->code = BPF_LDX | BPF_IMM; ftest->k = sizeof(struct seccomp_data); continue; /* Explicitly include allowed calls. */ @@ -185,6 +158,7 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) case BPF_S_JMP_JGT_X: case BPF_S_JMP_JSET_K: case BPF_S_JMP_JSET_X: + sk_decode_filter(ftest, ftest); continue; default: return -EINVAL; @@ -202,18 +176,21 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) static u32 seccomp_run_filters(int syscall) { struct seccomp_filter *f; + struct seccomp_data sd; u32 ret = SECCOMP_RET_ALLOW; /* Ensure unexpected behavior doesn't result in failing open. */ if (WARN_ON(current->seccomp.filter == NULL)) return SECCOMP_RET_KILL; + populate_seccomp_data(&sd); + /* * All filters in the list are evaluated and the lowest BPF return * value always takes priority (ignoring the DATA). */ for (f = current->seccomp.filter; f; f = f->prev) { - u32 cur_ret = sk_run_filter(NULL, f->insns); + u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi); if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) ret = cur_ret; } @@ -231,6 +208,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) struct seccomp_filter *filter; unsigned long fp_size = fprog->len * sizeof(struct sock_filter); unsigned long total_insns = fprog->len; + struct sock_filter *fp; + int new_len; long ret; if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) @@ -252,28 +231,43 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) CAP_SYS_ADMIN) != 0) return -EACCES; - /* Allocate a new seccomp_filter */ - filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, - GFP_KERNEL|__GFP_NOWARN); - if (!filter) + fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN); + if (!fp) return -ENOMEM; - atomic_set(&filter->usage, 1); - filter->len = fprog->len; /* Copy the instructions from fprog. */ ret = -EFAULT; - if (copy_from_user(filter->insns, fprog->filter, fp_size)) - goto fail; + if (copy_from_user(fp, fprog->filter, fp_size)) + goto free_prog; /* Check and rewrite the fprog via the skb checker */ - ret = sk_chk_filter(filter->insns, filter->len); + ret = sk_chk_filter(fp, fprog->len); if (ret) - goto fail; + goto free_prog; /* Check and rewrite the fprog for seccomp use */ - ret = seccomp_check_filter(filter->insns, filter->len); + ret = seccomp_check_filter(fp, fprog->len); + if (ret) + goto free_prog; + + /* Convert 'sock_filter' insns to 'sock_filter_int' insns */ + ret = sk_convert_filter(fp, fprog->len, NULL, &new_len); + if (ret) + goto free_prog; + + /* Allocate a new seccomp_filter */ + filter = kzalloc(sizeof(struct seccomp_filter) + + sizeof(struct sock_filter_int) * new_len, + GFP_KERNEL|__GFP_NOWARN); + if (!filter) + goto free_prog; + + ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len); if (ret) - goto fail; + goto free_filter; + + atomic_set(&filter->usage, 1); + filter->len = new_len; /* * If there is an existing filter, make it the prev and don't drop its @@ -282,8 +276,11 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) filter->prev = current->seccomp.filter; current->seccomp.filter = filter; return 0; -fail: + +free_filter: kfree(filter); +free_prog: + kfree(fp); return ret; } -- cgit v1.2.3 From 543bc6a1a987672b79d6ebe8e2ab10471d8f1047 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Sun, 30 Mar 2014 19:07:54 -0400 Subject: AUDIT: Allow login in non-init namespaces It its possible to configure your PAM stack to refuse login if audit messages (about the login) were unable to be sent. This is common in many distros and thus normal configuration of many containers. The PAM modules determine if audit is enabled/disabled in the kernel based on the return value from sending an audit message on the netlink socket. If userspace gets back ECONNREFUSED it believes audit is disabled in the kernel. If it gets any other error else it refuses to let the login proceed. Just about ever since the introduction of namespaces the kernel audit subsystem has returned EPERM if the task sending a message was not in the init user or pid namespace. So many forms of containers have never worked if audit was enabled in the kernel. BUT if the container was not in net_init then the kernel network code would send ECONNREFUSED (instead of the audit code sending EPERM). Thus by pure accident/dumb luck/bug if an admin configured the PAM stack to reject all logins that didn't talk to audit, but then ran the login untility in the non-init_net namespace, it would work!! Clearly this was a bug, but it is a bug some people expected. With the introduction of network namespace support in 3.14-rc1 the two bugs stopped cancelling each other out. Now, containers in the non-init_net namespace refused to let users log in (just like PAM was configfured!) Obviously some people were not happy that what used to let users log in, now didn't! This fix is kinda hacky. We return ECONNREFUSED for all non-init relevant namespaces. That means that not only will the old broken non-init_net setups continue to work, now the broken non-init_pid or non-init_user setups will 'work'. They don't really work, since audit isn't logging things. But it's what most users want. In 3.15 we should have patches to support not only the non-init_net (3.14) namespace but also the non-init_pid and non-init_user namespace. So all will be right in the world. This just opens the doors wide open on 3.14 and hopefully makes users happy, if not the audit system... Reported-by: Andre Tomt Reported-by: Adam Richter Signed-off-by: Eric Paris Signed-off-by: Linus Torvalds Conflicts: kernel/audit.c --- kernel/audit.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/audit.c b/kernel/audit.c index ad77d1e80895..873b965fdc58 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -608,8 +608,18 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) int err = 0; /* Only support initial user namespace for now. */ + /* + * We return ECONNREFUSED because it tricks userspace into thinking + * that audit was not configured into the kernel. Lots of users + * configure their PAM stack (because that's what the distro does) + * to reject login if unable to send messages to audit. If we return + * ECONNREFUSED the PAM stack thinks the kernel does not have audit + * configured in and will let login proceed. If we return EPERM + * userspace will reject all logins. This should be removed when we + * support non init namespaces!! + */ if ((current_user_ns() != &init_user_ns)) - return -EPERM; + return -ECONNREFUSED; switch (msg_type) { case AUDIT_LIST: -- cgit v1.2.3 From fbb32750a62df75d1ffea547f3908b21c5496d9f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 2 Feb 2014 21:09:54 -0500 Subject: pipe: kill ->map() and ->unmap() all pipe_buffer_operations have the same instances of those... Signed-off-by: Al Viro --- kernel/relay.c | 2 -- kernel/trace/trace.c | 4 ---- 2 files changed, 6 deletions(-) (limited to 'kernel') diff --git a/kernel/relay.c b/kernel/relay.c index 5001c9887db1..98833f664fb6 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -1195,8 +1195,6 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe, static const struct pipe_buf_operations relay_pipe_buf_ops = { .can_merge = 0, - .map = generic_pipe_buf_map, - .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = relay_pipe_buf_release, .steal = generic_pipe_buf_steal, diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 24c1f2382557..7511de35257f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4316,8 +4316,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, static const struct pipe_buf_operations tracing_pipe_buf_ops = { .can_merge = 0, - .map = generic_pipe_buf_map, - .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = generic_pipe_buf_release, .steal = generic_pipe_buf_steal, @@ -5194,8 +5192,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .can_merge = 0, - .map = generic_pipe_buf_map, - .unmap = generic_pipe_buf_unmap, .confirm = generic_pipe_buf_confirm, .release = buffer_pipe_buf_release, .steal = generic_pipe_buf_steal, -- cgit v1.2.3 From 56c4911aedbecc2bdf7940073e85d52b691e2509 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 2 Apr 2014 15:46:42 -0400 Subject: audit: do not cast audit_rule_data pointers pointlesly For some sort of legacy support audit_rule is a subset of (and first entry in) audit_rule_data. We don't actually need or use audit_rule. We just do a cast from one to the other for no gain what so ever. Stop the crazy casting. Signed-off-by: Eric Paris --- kernel/auditfilter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 96c8a704f130..70101e0b184a 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -228,7 +228,7 @@ static int audit_match_signal(struct audit_entry *entry) #endif /* Common user-space to kernel rule translation. */ -static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) +static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *rule) { unsigned listnr; struct audit_entry *entry; @@ -405,7 +405,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, int i; char *str; - entry = audit_to_entry_common((struct audit_rule *)data); + entry = audit_to_entry_common(data); if (IS_ERR(entry)) goto exit_nofree; -- cgit v1.2.3 From d23082257d83e4bc89727d5aedee197e907999d2 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 2 Apr 2014 17:45:05 +0200 Subject: pid_namespace: pidns_get() should check task_active_pid_ns() != NULL pidns_get()->get_pid_ns() can hit ns == NULL. This task_struct can't go away, but task_active_pid_ns(task) is NULL if release_task(task) was already called. Alternatively we could change get_pid_ns(ns) to check ns != NULL, but it seems that other callers are fine. Signed-off-by: Oleg Nesterov Cc: Eric W. Biederman ebiederm@xmission.com> Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- kernel/pid_namespace.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 06c62de9c711..db95d8eb761b 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -318,7 +318,9 @@ static void *pidns_get(struct task_struct *task) struct pid_namespace *ns; rcu_read_lock(); - ns = get_pid_ns(task_active_pid_ns(task)); + ns = task_active_pid_ns(task); + if (ns) + get_pid_ns(ns); rcu_read_unlock(); return ns; -- cgit v1.2.3 From 81c98869faa5f3a9457c93efef908ef476326b31 Mon Sep 17 00:00:00 2001 From: Nishanth Aravamudan Date: Thu, 3 Apr 2014 14:46:25 -0700 Subject: kthread: ensure locality of task_struct allocations In the presence of memoryless nodes, numa_node_id() will return the current CPU's NUMA node, but that may not be where we expect to allocate from memory from. Instead, we should rely on the fallback code in the memory allocator itself, by using NUMA_NO_NODE. Also, when calling kthread_create_on_node(), use the nearest node with memory to the cpu in question, rather than the node it is running on. Signed-off-by: Nishanth Aravamudan Reviewed-by: Christoph Lameter Acked-by: David Rientjes Cc: Anton Blanchard Cc: Tejun Heo Cc: Oleg Nesterov Cc: Jan Kara Cc: Thomas Gleixner Cc: Tetsuo Handa Cc: Wanpeng Li Cc: Joonsoo Kim Cc: Ben Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/kthread.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/kthread.c b/kernel/kthread.c index b5ae3ee860a9..9a130ec06f7a 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -217,7 +217,7 @@ int tsk_fork_get_node(struct task_struct *tsk) if (tsk == kthreadd_task) return tsk->pref_node_fork; #endif - return numa_node_id(); + return NUMA_NO_NODE; } static void create_kthread(struct kthread_create_info *create) @@ -369,7 +369,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), { struct task_struct *p; - p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, + p = kthread_create_on_node(threadfn, data, cpu_to_mem(cpu), namefmt, cpu); if (IS_ERR(p)) return p; -- cgit v1.2.3 From 62572e29bc530b38921ef6059088b4788a9832a5 Mon Sep 17 00:00:00 2001 From: Ben Zhang Date: Thu, 3 Apr 2014 14:47:18 -0700 Subject: kernel/watchdog.c: touch_nmi_watchdog should only touch local cpu not every one I ran into a scenario where while one cpu was stuck and should have panic'd because of the NMI watchdog, it didn't. The reason was another cpu was spewing stack dumps on to the console. Upon investigation, I noticed that when writing to the console and also when dumping the stack, the watchdog is touched. This causes all the cpus to reset their NMI watchdog flags and the 'stuck' cpu just spins forever. This change causes the semantics of touch_nmi_watchdog to be changed slightly. Previously, I accidentally changed the semantics and we noticed there was a codepath in which touch_nmi_watchdog could be touched from a preemtible area. That caused a BUG() to happen when CONFIG_DEBUG_PREEMPT was enabled. I believe it was the acpi code. My attempt here re-introduces the change to have the touch_nmi_watchdog() code only touch the local cpu instead of all of the cpus. But instead of using __get_cpu_var(), I use the __raw_get_cpu_var() version. This avoids the preemption problem. However my reasoning wasn't because I was trying to be lazy. Instead I rationalized it as, well if preemption is enabled then interrupts should be enabled to and the NMI watchdog will have no reason to trigger. So it won't matter if the wrong cpu is touched because the percpu interrupt counters the NMI watchdog uses should still be incrementing. Don said: : I'm ok with this patch, though it does alter the behaviour of how : touch_nmi_watchdog works. For the most part I don't think most callers : need to touch all of the watchdogs (on each cpu). Perhaps a corner case : will pop up (the scheduler?? to mimic touch_all_softlockup_watchdogs() ). : : But this does address an issue where if a system is locked up and one cpu : is spewing out useful debug messages (or error messages), the hard lockup : will fail to go off. We have seen this on RHEL also. Signed-off-by: Don Zickus Signed-off-by: Ben Zhang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 01c6f979486f..e90089fd78e0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -158,14 +158,14 @@ void touch_all_softlockup_watchdogs(void) #ifdef CONFIG_HARDLOCKUP_DETECTOR void touch_nmi_watchdog(void) { - if (watchdog_user_enabled) { - unsigned cpu; - - for_each_present_cpu(cpu) { - if (per_cpu(watchdog_nmi_touch, cpu) != true) - per_cpu(watchdog_nmi_touch, cpu) = true; - } - } + /* + * Using __raw here because some code paths have + * preemption enabled. If preemption is enabled + * then interrupts should be enabled too, in which + * case we shouldn't have to worry about the watchdog + * going off. + */ + __raw_get_cpu_var(watchdog_nmi_touch) = true; touch_softlockup_watchdog(); } EXPORT_SYMBOL(touch_nmi_watchdog); -- cgit v1.2.3 From d26914d11751b23ca2e8747725f2cae10c2f2c1b Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 3 Apr 2014 14:47:24 -0700 Subject: mm: optimize put_mems_allowed() usage Since put_mems_allowed() is strictly optional, its a seqcount retry, we don't need to evaluate the function if the allocation was in fact successful, saving a smp_rmb some loads and comparisons on some relative fast-paths. Since the naming, get/put_mems_allowed() does suggest a mandatory pairing, rename the interface, as suggested by Mel, to resemble the seqcount interface. This gives us: read_mems_allowed_begin() and read_mems_allowed_retry(), where it is important to note that the return value of the latter call is inverted from its previous incarnation. Signed-off-by: Peter Zijlstra Signed-off-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cpuset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e6b1b66afe52..f6fc7475f1a1 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1022,7 +1022,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, task_lock(tsk); /* * Determine if a loop is necessary if another thread is doing - * get_mems_allowed(). If at least one node remains unchanged and + * read_mems_allowed_begin(). If at least one node remains unchanged and * tsk does not have a mempolicy, then an empty nodemask will not be * possible when mems_allowed is larger than a word. */ -- cgit v1.2.3 From 5509a5d27b971a90b940e148ca9ca53312e4fa7a Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 3 Apr 2014 14:48:19 -0700 Subject: drop_caches: add some documentation and info message There is plenty of anecdotal evidence and a load of blog posts suggesting that using "drop_caches" periodically keeps your system running in "tip top shape". Perhaps adding some kernel documentation will increase the amount of accurate data on its use. If we are not shrinking caches effectively, then we have real bugs. Using drop_caches will simply mask the bugs and make them harder to find, but certainly does not fix them, nor is it an appropriate "workaround" to limit the size of the caches. On the contrary, there have been bug reports on issues that turned out to be misguided use of cache dropping. Dropping caches is a very drastic and disruptive operation that is good for debugging and running tests, but if it creates bug reports from production use, kernel developers should be aware of its use. Add a bit more documentation about it, a syslog message to track down abusers, and vmstat drop counters to help analyze problem reports. [akpm@linux-foundation.org: checkpatch fixes] [hannes@cmpxchg.org: add runtime suppression control] Signed-off-by: Dave Hansen Signed-off-by: Michal Hocko Acked-by: KOSAKI Motohiro Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 09d2e2413605..5c14b547882e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -123,7 +123,7 @@ static int __maybe_unused neg_one = -1; static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; -static int __maybe_unused three = 3; +static int __maybe_unused four = 4; static unsigned long one_ul = 1; static int one_hundred = 100; #ifdef CONFIG_PRINTK @@ -1264,7 +1264,7 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = drop_caches_sysctl_handler, .extra1 = &one, - .extra2 = &three, + .extra2 = &four, }, #ifdef CONFIG_COMPACTION { -- cgit v1.2.3 From 6af9f7bf3c399e0ab1eee048e13572c6d4e15fe9 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Thu, 3 Apr 2014 14:48:25 -0700 Subject: sys_sysfs: Add CONFIG_SYSFS_SYSCALL sys_sysfs is an obsolete system call no longer supported by libc. - This patch adds a default CONFIG_SYSFS_SYSCALL=y - Option can be turned off in expert mode. - cond_syscall added to kernel/sys_ni.c [akpm@linux-foundation.org: tweak Kconfig help text] Signed-off-by: Fabian Frederick Cc: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys_ni.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 7078052284fd..74395a95b7e9 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -146,6 +146,7 @@ cond_syscall(sys_io_destroy); cond_syscall(sys_io_submit); cond_syscall(sys_io_cancel); cond_syscall(sys_io_getevents); +cond_syscall(sys_sysfs); cond_syscall(sys_syslog); cond_syscall(sys_process_vm_readv); cond_syscall(sys_process_vm_writev); -- cgit v1.2.3 From 8f6c5ffc8987f4f5b5a3e9d557d94bbf3a9bf216 Mon Sep 17 00:00:00 2001 From: Wang YanQing Date: Thu, 3 Apr 2014 14:48:26 -0700 Subject: kernel/groups.c: remove return value of set_groups After commit 6307f8fee295 ("security: remove dead hook task_setgroups"), set_groups will always return zero, so we could just remove return value of set_groups. This patch reduces code size, and simplfies code to use set_groups, because we don't need to check its return value any more. [akpm@linux-foundation.org: remove obsolete claims from set_groups() comment] Signed-off-by: Wang YanQing Cc: "Eric W. Biederman" Cc: Serge Hallyn Cc: Eric Paris Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/groups.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'kernel') diff --git a/kernel/groups.c b/kernel/groups.c index 90cf1c38c8ea..451698f86cfa 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -157,17 +157,13 @@ int groups_search(const struct group_info *group_info, kgid_t grp) * set_groups - Change a group subscription in a set of credentials * @new: The newly prepared set of credentials to alter * @group_info: The group list to install - * - * Validate a group subscription and, if valid, insert it into a set - * of credentials. */ -int set_groups(struct cred *new, struct group_info *group_info) +void set_groups(struct cred *new, struct group_info *group_info) { put_group_info(new->group_info); groups_sort(group_info); get_group_info(group_info); new->group_info = group_info; - return 0; } EXPORT_SYMBOL(set_groups); @@ -182,18 +178,12 @@ EXPORT_SYMBOL(set_groups); int set_current_groups(struct group_info *group_info) { struct cred *new; - int ret; new = prepare_creds(); if (!new) return -ENOMEM; - ret = set_groups(new, group_info); - if (ret < 0) { - abort_creds(new); - return ret; - } - + set_groups(new, group_info); return commit_creds(new); } -- cgit v1.2.3 From 69369a7003735d0d8ef22097e27a55a8bad9557a Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Thu, 3 Apr 2014 14:48:27 -0700 Subject: fs, kernel: permit disabling the uselib syscall uselib hasn't been used since libc5; glibc does not use it. Support turning it off. When disabled, also omit the load_elf_library implementation from binfmt_elf.c, which only uselib invokes. bloat-o-meter: add/remove: 0/4 grow/shrink: 0/1 up/down: 0/-785 (-785) function old new delta padzero 39 36 -3 uselib_flags 20 - -20 sys_uselib 168 - -168 SyS_uselib 168 - -168 load_elf_library 426 - -426 The new CONFIG_USELIB defaults to `y'. Signed-off-by: Josh Triplett Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sys_ni.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 74395a95b7e9..bc8d1b74a6b9 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -152,6 +152,7 @@ cond_syscall(sys_process_vm_readv); cond_syscall(sys_process_vm_writev); cond_syscall(compat_sys_process_vm_readv); cond_syscall(compat_sys_process_vm_writev); +cond_syscall(sys_uselib); /* arch-specific weak syscall entries */ cond_syscall(sys_pciconfig_read); -- cgit v1.2.3 From c96d6660dc65b0a90aea9834bfd8be1d5656da18 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Thu, 3 Apr 2014 14:48:35 -0700 Subject: kernel: audit/fix non-modular users of module_init in core code Code that is obj-y (always built-in) or dependent on a bool Kconfig (built-in or absent) can never be modular. So using module_init as an alias for __initcall can be somewhat misleading. Fix these up now, so that we can relocate module_init from init.h into module.h in the future. If we don't do this, we'd have to add module.h to obviously non-modular code, and that would be a worse thing. The audit targets the following module_init users for change: kernel/user.c obj-y kernel/kexec.c bool KEXEC (one instance per arch) kernel/profile.c bool PROFILING kernel/hung_task.c bool DETECT_HUNG_TASK kernel/sched/stats.c bool SCHEDSTATS kernel/user_namespace.c bool USER_NS Note that direct use of __initcall is discouraged, vs. one of the priority categorized subgroups. As __initcall gets mapped onto device_initcall, our use of subsys_initcall (which makes sense for these files) will thus change this registration from level 6-device to level 4-subsys (i.e. slightly earlier). However no observable impact of that difference has been observed during testing. Also, two instances of missing ";" at EOL are fixed in kexec. Signed-off-by: Paul Gortmaker Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Eric Biederman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/hung_task.c | 3 +-- kernel/kexec.c | 4 ++-- kernel/profile.c | 2 +- kernel/sched/stats.c | 2 +- kernel/user.c | 3 +-- kernel/user_namespace.c | 2 +- 6 files changed, 7 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 0b9c169d577f..06bb1417b063 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -246,5 +246,4 @@ static int __init hung_task_init(void) return 0; } - -module_init(hung_task_init); +subsys_initcall(hung_task_init); diff --git a/kernel/kexec.c b/kernel/kexec.c index 45601cf41bee..c0d261c7db7b 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1235,7 +1235,7 @@ static int __init crash_notes_memory_init(void) } return 0; } -module_init(crash_notes_memory_init) +subsys_initcall(crash_notes_memory_init); /* @@ -1629,7 +1629,7 @@ static int __init crash_save_vmcoreinfo_init(void) return 0; } -module_init(crash_save_vmcoreinfo_init) +subsys_initcall(crash_save_vmcoreinfo_init); /* * Move into place and start executing a preloaded standalone diff --git a/kernel/profile.c b/kernel/profile.c index ebdd9c1a86b4..1b266dbe755a 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -604,5 +604,5 @@ int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ hotcpu_notifier(profile_cpu_callback, 0); return 0; } -module_init(create_proc_profile); +subsys_initcall(create_proc_profile); #endif /* CONFIG_PROC_FS */ diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index da98af347e8b..a476bea17fbc 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -142,4 +142,4 @@ static int __init proc_schedstat_init(void) proc_create("schedstat", 0, NULL, &proc_schedstat_operations); return 0; } -module_init(proc_schedstat_init); +subsys_initcall(proc_schedstat_init); diff --git a/kernel/user.c b/kernel/user.c index c006131beb77..294fc6a94168 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -222,5 +222,4 @@ static int __init uid_cache_init(void) return 0; } - -module_init(uid_cache_init); +subsys_initcall(uid_cache_init); diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index dd06439b9c84..0d8f6023fd8d 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -902,4 +902,4 @@ static __init int user_namespaces_init(void) user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC); return 0; } -module_init(user_namespaces_init); +subsys_initcall(user_namespaces_init); -- cgit v1.2.3 From 28ab49ff7f3dcaf4df8d2bd0d4099b8c08285ed7 Mon Sep 17 00:00:00 2001 From: Daeseok Youn Date: Thu, 3 Apr 2014 14:48:36 -0700 Subject: kernel/resource.c: make reallocate_resource() static sparse says: kernel/resource.c:518:5: warning: symbol 'reallocate_resource' was not declared. Should it be static? Signed-off-by: Daeseok Youn Reviewed-by: Yasuaki Ishimatsu Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 673061c06da1..8957d686e29b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -511,7 +511,7 @@ static int find_resource(struct resource *root, struct resource *new, * @newsize: new size of the resource descriptor * @constraint: the size and alignment constraints to be met. */ -int reallocate_resource(struct resource *root, struct resource *old, +static int reallocate_resource(struct resource *root, struct resource *old, resource_size_t newsize, struct resource_constraint *constraint) { -- cgit v1.2.3 From c64730b26f08cccfbc8fcbf169c304b4bd71dcac Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Thu, 3 Apr 2014 14:48:39 -0700 Subject: printk: remove obsolete check for log level "c" The kernel log level "c" was removed in commit 61e99ab8e35a ("printk: remove the now unnecessary "C" annotation for KERN_CONT"). It is no longer detected in printk_get_level(). Hence we do not need to check it in vprintk_emit. Signed-off-by: Petr Mladek Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Jan Kara Cc: Michal Hocko Cc: Kay Sievers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk/printk.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 4dae9cbe9259..db7a02e05241 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1560,8 +1560,6 @@ asmlinkage int vprintk_emit(int facility, int level, level = kern_level - '0'; case 'd': /* KERN_DEFAULT */ lflags |= LOG_PREFIX; - case 'c': /* KERN_CONT */ - break; } text_len -= end_of_header - text; text = (char *)end_of_header; -- cgit v1.2.3 From e8c42d36ab86cf45f88c3a0e344233b1032fbf3d Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Thu, 3 Apr 2014 14:48:41 -0700 Subject: printk: add comment about tricky check for text buffer size There is no check for potential "text_len" overflow. It is not needed because only valid level is detected. It took me some time to understand why. It would deserve a comment ;-) Signed-off-by: Petr Mladek Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Jan Kara Cc: Michal Hocko Cc: Kay Sievers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk/printk.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index db7a02e05241..012f3e40671d 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1561,6 +1561,11 @@ asmlinkage int vprintk_emit(int facility, int level, case 'd': /* KERN_DEFAULT */ lflags |= LOG_PREFIX; } + /* + * No need to check length here because vscnprintf + * put '\0' at the end of the string. Only valid and + * newly printed level is detected. + */ text_len -= end_of_header - text; text = (char *)end_of_header; } -- cgit v1.2.3 From 39b25109b400ea397e64c417d8b965a53e2ee0f0 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Thu, 3 Apr 2014 14:48:42 -0700 Subject: printk: use also the last bytes in the ring buffer It seems that we have newer used the last byte in the ring buffer. In fact, we have newer used the last 4 bytes because of padding. First problem is in the check for free space. The exact number of free bytes is enough to store the length of data. Second problem is in the check where the ring buffer is rotated. The left side counts the first unused index. It is unused, so it might be the same as the size of the buffer. Note that the first problem has to be fixed together with the second one. Otherwise, the buffer is rotated even when there is enough space on the end of the buffer. Then the beginning of the buffer is rewritten and valid entries get corrupted. Signed-off-by: Petr Mladek Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Jan Kara Cc: Michal Hocko Cc: Kay Sievers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk/printk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 012f3e40671d..b3a1790f9e05 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -319,7 +319,7 @@ static void log_store(int facility, int level, else free = log_first_idx - log_next_idx; - if (free > size + sizeof(struct printk_log)) + if (free >= size + sizeof(struct printk_log)) break; /* drop old messages until we have enough contiuous space */ @@ -327,7 +327,7 @@ static void log_store(int facility, int level, log_first_seq++; } - if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) { + if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) { /* * This message + an additional empty header does not fit * at the end of the buffer. Add an empty header with len == 0 -- cgit v1.2.3 From fce6e0338abe910ba6d4db0657ae8adc6aa1a72b Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Thu, 3 Apr 2014 14:48:43 -0700 Subject: printk: do not compute the size of the message twice This is just a tiny optimization. It removes duplicate computation of the message size. Signed-off-by: Petr Mladek Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Jan Kara Cc: Michal Hocko Cc: Kay Sievers Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk/printk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index b3a1790f9e05..ff9faf4e3cd5 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -351,7 +351,7 @@ static void log_store(int facility, int level, else msg->ts_nsec = local_clock(); memset(log_dict(msg) + dict_len, 0, pad_len); - msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len; + msg->len = size; /* insert message */ log_next_idx += msg->len; -- cgit v1.2.3 From 72581487a61f6304a7cc32e189eb65fb1c920a53 Mon Sep 17 00:00:00 2001 From: Jane Li Date: Thu, 3 Apr 2014 14:48:45 -0700 Subject: printk: fix one circular lockdep warning about console_lock Fix a warning about possible circular locking dependency. If do in following sequence: enter suspend -> resume -> plug-out CPUx (echo 0 > cpux/online) lockdep will show warning as following: ====================================================== [ INFO: possible circular locking dependency detected ] 3.10.0 #2 Tainted: G O ------------------------------------------------------- sh/1271 is trying to acquire lock: (console_lock){+.+.+.}, at: console_cpu_notify+0x20/0x2c but task is already holding lock: (cpu_hotplug.lock){+.+.+.}, at: cpu_hotplug_begin+0x2c/0x58 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (cpu_hotplug.lock){+.+.+.}: lock_acquire+0x98/0x12c mutex_lock_nested+0x50/0x3d8 cpu_hotplug_begin+0x2c/0x58 _cpu_up+0x24/0x154 cpu_up+0x64/0x84 smp_init+0x9c/0xd4 kernel_init_freeable+0x78/0x1c8 kernel_init+0x8/0xe4 ret_from_fork+0x14/0x2c -> #1 (cpu_add_remove_lock){+.+.+.}: lock_acquire+0x98/0x12c mutex_lock_nested+0x50/0x3d8 disable_nonboot_cpus+0x8/0xe8 suspend_devices_and_enter+0x214/0x448 pm_suspend+0x1e4/0x284 try_to_suspend+0xa4/0xbc process_one_work+0x1c4/0x4fc worker_thread+0x138/0x37c kthread+0xa4/0xb0 ret_from_fork+0x14/0x2c -> #0 (console_lock){+.+.+.}: __lock_acquire+0x1b38/0x1b80 lock_acquire+0x98/0x12c console_lock+0x54/0x68 console_cpu_notify+0x20/0x2c notifier_call_chain+0x44/0x84 __cpu_notify+0x2c/0x48 cpu_notify_nofail+0x8/0x14 _cpu_down+0xf4/0x258 cpu_down+0x24/0x40 store_online+0x30/0x74 dev_attr_store+0x18/0x24 sysfs_write_file+0x16c/0x19c vfs_write+0xb4/0x190 SyS_write+0x3c/0x70 ret_fast_syscall+0x0/0x48 Chain exists of: console_lock --> cpu_add_remove_lock --> cpu_hotplug.lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(cpu_hotplug.lock); lock(cpu_add_remove_lock); lock(cpu_hotplug.lock); lock(console_lock); *** DEADLOCK *** There are three locks involved in two sequence: a) pm suspend: console_lock (@suspend_console()) cpu_add_remove_lock (@disable_nonboot_cpus()) cpu_hotplug.lock (@_cpu_down()) b) Plug-out CPUx: cpu_add_remove_lock (@(cpu_down()) cpu_hotplug.lock (@_cpu_down()) console_lock (@console_cpu_notify()) => Lockdeps prints warning log. There should be not real deadlock, as flag of console_suspended can protect this. Although console_suspend() releases console_sem, it doesn't tell lockdep about it. That results in the lockdep warning about circular locking when doing the following: enter suspend -> resume -> plug-out CPUx (echo 0 > cpux/online) Fix the problem by telling lockdep we actually released the semaphore in console_suspend() and acquired it again in console_resume(). Signed-off-by: Jane Li Reviewed-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/printk/printk.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index ff9faf4e3cd5..a45b50962295 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1883,6 +1883,7 @@ void suspend_console(void) console_lock(); console_suspended = 1; up(&console_sem); + mutex_release(&console_lock_dep_map, 1, _RET_IP_); } void resume_console(void) @@ -1890,6 +1891,7 @@ void resume_console(void) if (!console_suspend_enabled) return; down(&console_sem); + mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_); console_suspended = 0; console_unlock(); } -- cgit v1.2.3 From c6b3d5bcd67c75961a1e8b9564d1475c0f194a84 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 4 Apr 2014 17:14:41 +0800 Subject: cgroup: fix top cgroup refcnt leak As mount() and kill_sb() is not a one-to-one match, If we mount the same cgroupfs in serveral mount points, and then umount all of them, kill_sb() will be called only once. Try: # mount -t cgroup -o cpuacct xxx /cgroup # mount -t cgroup -o cpuacct xxx /cgroup2 # cat /proc/cgroups | grep cpuacct cpuacct 2 1 1 # umount /cgroup # umount /cgroup2 # cat /proc/cgroups | grep cpuacct cpuacct 2 1 1 You'll see cgroupfs will never be freed. Signed-off-by: Li Zefan Signed-off-by: Tejun Heo --- kernel/cgroup.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fede3d3f28ff..0dfc7324c789 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1487,6 +1487,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, struct cgroup_sb_opts opts; struct dentry *dentry; int ret; + bool new_sb; /* * The first time anyone tries to mount a cgroup, enable the list @@ -1603,8 +1604,8 @@ out_unlock: if (ret) return ERR_PTR(ret); - dentry = kernfs_mount(fs_type, flags, root->kf_root, NULL); - if (IS_ERR(dentry)) + dentry = kernfs_mount(fs_type, flags, root->kf_root, &new_sb); + if (IS_ERR(dentry) || !new_sb) cgroup_put(&root->cgrp); return dentry; } -- cgit v1.2.3 From b8780c363d808a726a34793caa900923d32b6b80 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 7 Apr 2014 17:33:06 +0200 Subject: sched: remove sleep_on() and friends This is the final piece in the puzzle, as all patches to remove the last users of \(interruptible_\|\)sleep_on\(_timeout\|\) have made it into the 3.15 merge window. The work was long overdue, and this interface in particular should not have survived the BKL removal that was done a couple of years ago. Citing Jon Corbet from http://lwn.net/2001/0201/kernel.php3": "[...] it was suggested that the janitors look for and fix all code that calls sleep_on() [...] since (1) almost all such code is incorrect, and (2) Linus has agreed that those functions should be removed in the 2.5 development series". We haven't quite made it for 2.5, but maybe we can merge this for 3.15. Signed-off-by: Arnd Bergmann Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Linus Torvalds --- kernel/sched/core.c | 46 ---------------------------------------------- 1 file changed, 46 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1d1b87b36778..0ff3f34bc7e3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2845,52 +2845,6 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, } EXPORT_SYMBOL(default_wake_function); -static long __sched -sleep_on_common(wait_queue_head_t *q, int state, long timeout) -{ - unsigned long flags; - wait_queue_t wait; - - init_waitqueue_entry(&wait, current); - - __set_current_state(state); - - spin_lock_irqsave(&q->lock, flags); - __add_wait_queue(q, &wait); - spin_unlock(&q->lock); - timeout = schedule_timeout(timeout); - spin_lock_irq(&q->lock); - __remove_wait_queue(q, &wait); - spin_unlock_irqrestore(&q->lock, flags); - - return timeout; -} - -void __sched interruptible_sleep_on(wait_queue_head_t *q) -{ - sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); -} -EXPORT_SYMBOL(interruptible_sleep_on); - -long __sched -interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) -{ - return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); -} -EXPORT_SYMBOL(interruptible_sleep_on_timeout); - -void __sched sleep_on(wait_queue_head_t *q) -{ - sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); -} -EXPORT_SYMBOL(sleep_on); - -long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) -{ - return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); -} -EXPORT_SYMBOL(sleep_on_timeout); - #ifdef CONFIG_RT_MUTEXES /* -- cgit v1.2.3 From 49957f8e2a43035a97d05bddefa394492a969c0d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 7 Apr 2014 16:44:47 -0400 Subject: cgroup: newly created dirs and files should be owned by the creator While converting cgroup to kernfs, 2bd59d48ebfb ("cgroup: convert to kernfs") accidentally dropped the logic which makes newly created cgroup dirs and files owned by the current uid / gid. This broke cases where cgroup subtree management is delegated to !root as the sub manager wouldn't be able to create more than single level of hierarchy or put tasks into child cgroups it created. Among other things, this breaks user session management in systemd and one of the symptoms was 90s hang during shutdown. User session systemd running as the user creates a sub-service to initiate shutdown and tries to put kill(1) into it but fails because cgroup.procs is owned by root. This leads to 90s hang during shutdown. Implement cgroup_kn_set_ugid() which sets a kn's uid and gid to those of the caller and use it from file and dir creation paths. Signed-off-by: Tejun Heo Reported-by: Linus Torvalds --- kernel/cgroup.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0dfc7324c789..9fcdaa705b6c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2346,11 +2346,26 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, return ret; } +/* set uid and gid of cgroup dirs and files to that of the creator */ +static int cgroup_kn_set_ugid(struct kernfs_node *kn) +{ + struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = current_fsuid(), + .ia_gid = current_fsgid(), }; + + if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && + gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) + return 0; + + return kernfs_setattr(kn, &iattr); +} + static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) { char name[CGROUP_FILE_NAME_MAX]; struct kernfs_node *kn; struct lock_class_key *key = NULL; + int ret; #ifdef CONFIG_DEBUG_LOCK_ALLOC key = &cft->lockdep_key; @@ -2358,7 +2373,13 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), cgroup_file_mode(cft), 0, cft->kf_ops, cft, NULL, false, key); - return PTR_ERR_OR_ZERO(kn); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = cgroup_kn_set_ugid(kn); + if (ret) + kernfs_remove(kn); + return ret; } /** @@ -3753,6 +3774,10 @@ static long cgroup_create(struct cgroup *parent, const char *name, */ idr_replace(&root->cgroup_idr, cgrp, cgrp->id); + err = cgroup_kn_set_ugid(kn); + if (err) + goto err_destroy; + err = cgroup_addrm_files(cgrp, cgroup_base_files, true); if (err) goto err_destroy; -- cgit v1.2.3 From a0715cc22601e8830ace98366c0c2bd8da52af52 Mon Sep 17 00:00:00 2001 From: Alex Thorlton Date: Mon, 7 Apr 2014 15:37:10 -0700 Subject: mm, thp: add VM_INIT_DEF_MASK and PRCTL_THP_DISABLE Add VM_INIT_DEF_MASK, to allow us to set the default flags for VMs. It also adds a prctl control which allows us to set the THP disable bit in mm->def_flags so that VMs will pick up the setting as they are created. Signed-off-by: Alex Thorlton Suggested-by: Oleg Nesterov Cc: Gerald Schaefer Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Christian Borntraeger Cc: Paolo Bonzini Cc: "Kirill A. Shutemov" Cc: Mel Gorman Acked-by: Rik van Riel Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Andrea Arcangeli Cc: Oleg Nesterov Cc: "Eric W. Biederman" Cc: Alexander Viro Cc: Johannes Weiner Cc: David Rientjes Cc: Paolo Bonzini Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 11 ++++++++--- kernel/sys.c | 15 +++++++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index abc45890f0a5..e40c0a01d5a6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -530,8 +530,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); INIT_LIST_HEAD(&mm->mmlist); - mm->flags = (current->mm) ? - (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; mm->core_state = NULL; atomic_long_set(&mm->nr_ptes, 0); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); @@ -540,8 +538,15 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) mm_init_owner(mm, p); clear_tlb_flush_pending(mm); - if (likely(!mm_alloc_pgd(mm))) { + if (current->mm) { + mm->flags = current->mm->flags & MMF_INIT_MASK; + mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; + } else { + mm->flags = default_dump_filter; mm->def_flags = 0; + } + + if (likely(!mm_alloc_pgd(mm))) { mmu_notifier_mm_init(mm); return mm; } diff --git a/kernel/sys.c b/kernel/sys.c index adaeab6f7a87..fba0f29401ea 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1996,6 +1996,21 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, if (arg2 || arg3 || arg4 || arg5) return -EINVAL; return current->no_new_privs ? 1 : 0; + case PR_GET_THP_DISABLE: + if (arg2 || arg3 || arg4 || arg5) + return -EINVAL; + error = !!(me->mm->def_flags & VM_NOHUGEPAGE); + break; + case PR_SET_THP_DISABLE: + if (arg3 || arg4 || arg5) + return -EINVAL; + down_write(&me->mm->mmap_sem); + if (arg2) + me->mm->def_flags |= VM_NOHUGEPAGE; + else + me->mm->def_flags &= ~VM_NOHUGEPAGE; + up_write(&me->mm->mmap_sem); + break; default: error = -EINVAL; break; -- cgit v1.2.3 From 615d6e8756c87149f2d4c1b93d471bca002bd849 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 7 Apr 2014 15:37:25 -0700 Subject: mm: per-thread vma caching This patch is a continuation of efforts trying to optimize find_vma(), avoiding potentially expensive rbtree walks to locate a vma upon faults. The original approach (https://lkml.org/lkml/2013/11/1/410), where the largest vma was also cached, ended up being too specific and random, thus further comparison with other approaches were needed. There are two things to consider when dealing with this, the cache hit rate and the latency of find_vma(). Improving the hit-rate does not necessarily translate in finding the vma any faster, as the overhead of any fancy caching schemes can be too high to consider. We currently cache the last used vma for the whole address space, which provides a nice optimization, reducing the total cycles in find_vma() by up to 250%, for workloads with good locality. On the other hand, this simple scheme is pretty much useless for workloads with poor locality. Analyzing ebizzy runs shows that, no matter how many threads are running, the mmap_cache hit rate is less than 2%, and in many situations below 1%. The proposed approach is to replace this scheme with a small per-thread cache, maximizing hit rates at a very low maintenance cost. Invalidations are performed by simply bumping up a 32-bit sequence number. The only expensive operation is in the rare case of a seq number overflow, where all caches that share the same address space are flushed. Upon a miss, the proposed replacement policy is based on the page number that contains the virtual address in question. Concretely, the following results are seen on an 80 core, 8 socket x86-64 box: 1) System bootup: Most programs are single threaded, so the per-thread scheme does improve ~50% hit rate by just adding a few more slots to the cache. +----------------+----------+------------------+ | caching scheme | hit-rate | cycles (billion) | +----------------+----------+------------------+ | baseline | 50.61% | 19.90 | | patched | 73.45% | 13.58 | +----------------+----------+------------------+ 2) Kernel build: This one is already pretty good with the current approach as we're dealing with good locality. +----------------+----------+------------------+ | caching scheme | hit-rate | cycles (billion) | +----------------+----------+------------------+ | baseline | 75.28% | 11.03 | | patched | 88.09% | 9.31 | +----------------+----------+------------------+ 3) Oracle 11g Data Mining (4k pages): Similar to the kernel build workload. +----------------+----------+------------------+ | caching scheme | hit-rate | cycles (billion) | +----------------+----------+------------------+ | baseline | 70.66% | 17.14 | | patched | 91.15% | 12.57 | +----------------+----------+------------------+ 4) Ebizzy: There's a fair amount of variation from run to run, but this approach always shows nearly perfect hit rates, while baseline is just about non-existent. The amounts of cycles can fluctuate between anywhere from ~60 to ~116 for the baseline scheme, but this approach reduces it considerably. For instance, with 80 threads: +----------------+----------+------------------+ | caching scheme | hit-rate | cycles (billion) | +----------------+----------+------------------+ | baseline | 1.06% | 91.54 | | patched | 99.97% | 14.18 | +----------------+----------+------------------+ [akpm@linux-foundation.org: fix nommu build, per Davidlohr] [akpm@linux-foundation.org: document vmacache_valid() logic] [akpm@linux-foundation.org: attempt to untangle header files] [akpm@linux-foundation.org: add vmacache_find() BUG_ON] [hughd@google.com: add vmacache_valid_mm() (from Oleg)] [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: adjust and enhance comments] Signed-off-by: Davidlohr Bueso Reviewed-by: Rik van Riel Acked-by: Linus Torvalds Reviewed-by: Michel Lespinasse Cc: Oleg Nesterov Tested-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/debug/debug_core.c | 14 +++++++++++--- kernel/fork.c | 7 ++++++- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 99982a70ddad..2956c8da1605 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -224,10 +225,17 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) if (!CACHE_FLUSH_IS_SAFE) return; - if (current->mm && current->mm->mmap_cache) { - flush_cache_range(current->mm->mmap_cache, - addr, addr + BREAK_INSTR_SIZE); + if (current->mm) { + int i; + + for (i = 0; i < VMACACHE_SIZE; i++) { + if (!current->vmacache[i]) + continue; + flush_cache_range(current->vmacache[i], + addr, addr + BREAK_INSTR_SIZE); + } } + /* Force flush instruction cache if it was outside the mm */ flush_icache_range(addr, addr + BREAK_INSTR_SIZE); } diff --git a/kernel/fork.c b/kernel/fork.c index e40c0a01d5a6..bc0e96b78dfd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #include #include #include @@ -364,7 +366,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) mm->locked_vm = 0; mm->mmap = NULL; - mm->mmap_cache = NULL; + mm->vmacache_seqnum = 0; mm->map_count = 0; cpumask_clear(mm_cpumask(mm)); mm->mm_rb = RB_ROOT; @@ -882,6 +884,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) if (!oldmm) return 0; + /* initialize the new vmacache entries */ + vmacache_flush(tsk); + if (clone_flags & CLONE_VM) { atomic_inc(&oldmm->mm_users); mm = oldmm; -- cgit v1.2.3 From 514ddb446c5c5a238eca32b7052b7a8accae4e93 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 7 Apr 2014 15:37:27 -0700 Subject: fork: collapse copy_flags into copy_process copy_flags() does not use the clone_flags formal and can be collapsed into copy_process() for cleaner code. Signed-off-by: David Rientjes Cc: Johannes Weiner Cc: Michal Hocko Cc: KAMEZAWA Hiroyuki Cc: Christoph Lameter Cc: Pekka Enberg Cc: Tejun Heo Cc: Mel Gorman Cc: Oleg Nesterov Cc: Rik van Riel Cc: Jianguo Wu Cc: Tim Hockin Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index bc0e96b78dfd..c777964c0662 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1080,15 +1080,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) return 0; } -static void copy_flags(unsigned long clone_flags, struct task_struct *p) -{ - unsigned long new_flags = p->flags; - - new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); - new_flags |= PF_FORKNOEXEC; - p->flags = new_flags; -} - SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) { current->clear_child_tid = tidptr; @@ -1238,7 +1229,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_count; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ - copy_flags(clone_flags, p); + p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); + p->flags |= PF_FORKNOEXEC; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); -- cgit v1.2.3 From f0432d159601f96839f514f286eaa5b75c4112dc Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 7 Apr 2014 15:37:30 -0700 Subject: mm, mempolicy: remove per-process flag PF_MEMPOLICY is an unnecessary optimization for CONFIG_SLAB users. There's no significant performance degradation to checking current->mempolicy rather than current->flags & PF_MEMPOLICY in the allocation path, especially since this is considered unlikely(). Running TCP_RR with netperf-2.4.5 through localhost on 16 cpu machine with 64GB of memory and without a mempolicy: threads before after 16 1249409 1244487 32 1281786 1246783 48 1239175 1239138 64 1244642 1241841 80 1244346 1248918 96 1266436 1254316 112 1307398 1312135 128 1327607 1326502 Per-process flags are a scarce resource so we should free them up whenever possible and make them available. We'll be using it shortly for memcg oom reserves. Signed-off-by: David Rientjes Cc: Johannes Weiner Cc: Michal Hocko Cc: KAMEZAWA Hiroyuki Cc: Christoph Lameter Cc: Pekka Enberg Cc: Tejun Heo Cc: Mel Gorman Cc: Oleg Nesterov Cc: Rik van Riel Cc: Jianguo Wu Cc: Tim Hockin Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index c777964c0662..e905e9c6b224 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1276,7 +1276,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->mempolicy = NULL; goto bad_fork_cleanup_threadgroup_lock; } - mpol_fix_fork_child_flag(p); #endif #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; -- cgit v1.2.3 From 539a13b47e462d28c48f076c63871580f694a366 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 7 Apr 2014 15:37:32 -0700 Subject: res_counter: remove interface for locked charging and uncharging The res_counter_{charge,uncharge}_locked() variants are not used in the kernel outside of the resource counter code itself, so remove the interface. Signed-off-by: David Rientjes Acked-by: Michal Hocko Cc: Johannes Weiner Cc: KAMEZAWA Hiroyuki Cc: Christoph Lameter Cc: Pekka Enberg Cc: Tejun Heo Cc: Mel Gorman Cc: Oleg Nesterov Cc: Rik van Riel Cc: Jianguo Wu Cc: Tim Hockin Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/res_counter.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'kernel') diff --git a/kernel/res_counter.c b/kernel/res_counter.c index 4aa8a305aede..51dbac6a3633 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c @@ -22,8 +22,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent) counter->parent = parent; } -int res_counter_charge_locked(struct res_counter *counter, unsigned long val, - bool force) +static u64 res_counter_uncharge_locked(struct res_counter *counter, + unsigned long val) +{ + if (WARN_ON(counter->usage < val)) + val = counter->usage; + + counter->usage -= val; + return counter->usage; +} + +static int res_counter_charge_locked(struct res_counter *counter, + unsigned long val, bool force) { int ret = 0; @@ -86,15 +96,6 @@ int res_counter_charge_nofail(struct res_counter *counter, unsigned long val, return __res_counter_charge(counter, val, limit_fail_at, true); } -u64 res_counter_uncharge_locked(struct res_counter *counter, unsigned long val) -{ - if (WARN_ON(counter->usage < val)) - val = counter->usage; - - counter->usage -= val; - return counter->usage; -} - u64 res_counter_uncharge_until(struct res_counter *counter, struct res_counter *top, unsigned long val) -- cgit v1.2.3 From c39df5fa37b0623589508c95515b4aa1531c524e Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 7 Apr 2014 15:38:29 -0700 Subject: exit: call disassociate_ctty() before exit_task_namespaces() Commit 8aac62706ada ("move exit_task_namespaces() outside of exit_notify()") breaks pppd and the exiting service crashes the kernel: BUG: unable to handle kernel NULL pointer dereference at 0000000000000028 IP: ppp_register_channel+0x13/0x20 [ppp_generic] Call Trace: ppp_asynctty_open+0x12b/0x170 [ppp_async] tty_ldisc_open.isra.2+0x27/0x60 tty_ldisc_hangup+0x1e3/0x220 __tty_hangup+0x2c4/0x440 disassociate_ctty+0x61/0x270 do_exit+0x7f2/0xa50 ppp_register_channel() needs ->net_ns and current->nsproxy == NULL. Move disassociate_ctty() before exit_task_namespaces(), it doesn't make sense to delay it after perf_event_exit_task() or cgroup_exit(). This also allows to use task_work_add() inside the (nontrivial) code paths in disassociate_ctty(). Investigated by Peter Hurley. Signed-off-by: Oleg Nesterov Reported-by: Sree Harsha Totakura Cc: Peter Hurley Cc: Sree Harsha Totakura Cc: "Eric W. Biederman" Cc: Jeff Dike Cc: Ingo Molnar Cc: Andrey Vagin Cc: Al Viro Cc: [v3.10+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 6480d1c85d7a..11f9e39a7368 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -784,6 +784,8 @@ void do_exit(long code) exit_shm(tsk); exit_files(tsk); exit_fs(tsk); + if (group_dead) + disassociate_ctty(1); exit_task_namespaces(tsk); exit_task_work(tsk); check_stack_usage(); @@ -799,13 +801,9 @@ void do_exit(long code) cgroup_exit(tsk); - if (group_dead) - disassociate_ctty(1); - module_put(task_thread_info(tsk)->exec_domain->module); proc_exit_connector(tsk); - /* * FIXME: do that only when needed, using sched_exit tracepoint */ -- cgit v1.2.3 From 4bcb8232cf4eb061b086c10f56b6808adcdb5a93 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 7 Apr 2014 15:38:30 -0700 Subject: exit: move check_stack_usage() to the end of do_exit() It is not clear why check_stack_usage() is called so early and thus it never checks the stack usage in, say, exit_notify() or flush_ptrace_hw_breakpoint() or other functions which are only called by do_exit(). Move the callsite down to the last preempt_disable/schedule. Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 11f9e39a7368..171c9a9d7b00 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -788,7 +788,6 @@ void do_exit(long code) disassociate_ctty(1); exit_task_namespaces(tsk); exit_task_work(tsk); - check_stack_usage(); exit_thread(); /* @@ -842,6 +841,7 @@ void do_exit(long code) validate_creds_for_do_exit(tsk); + check_stack_usage(); preempt_disable(); if (tsk->nr_dirtied) __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); -- cgit v1.2.3 From ef9823939e5acd5d323ff61fbc427ef998dd203e Mon Sep 17 00:00:00 2001 From: Guillaume Morin Date: Mon, 7 Apr 2014 15:38:31 -0700 Subject: kernel/exit.c: call proc_exit_connector() after exit_state is set The process events connector delivers a notification when a process exits. This is really convenient for a process that spawns and wants to monitor its children through an epoll-able() interface. Unfortunately, there is a small window between when the event is delivered and the child become wait()-able. This is creates a race if the parent wants to make sure that it knows about the exit, e.g pid_t pid = fork(); if (pid > 0) { register_interest_for_pid(pid); if (waitpid(pid, NULL, WNOHANG) > 0) { /* We might have raced with exit() */ } return; } /* Child */ execve(...) register_interest_for_pid() would be telling the the connector socket reader to pay attention to events related to pid. Though this is not a bug, I think it would make the connector a bit more usable if this race was closed by simply moving the call to proc_exit_connector() from just before exit_notify() to right after. Oleg said: : Even with this patch the code above is still "racy" if the child is : multi-threaded. Plus it should obviously filter-out subthreads. And : afaics there is no way to make it reliable, even if you change the code : above so that waitpid() is called only after the last thread exits WNOHANG : still can fail. Signed-off-by: Guillaume Morin Cc: Matt Helsley Cc: Oleg Nesterov Cc: David S. Miller Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 171c9a9d7b00..decf648574f6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -802,13 +802,13 @@ void do_exit(long code) module_put(task_thread_info(tsk)->exec_domain->module); - proc_exit_connector(tsk); /* * FIXME: do that only when needed, using sched_exit tracepoint */ flush_ptrace_hw_breakpoint(tsk); exit_notify(tsk, group_dead); + proc_exit_connector(tsk); #ifdef CONFIG_NUMA task_lock(tsk); mpol_put(tsk->mempolicy); -- cgit v1.2.3 From dfccbb5e49a621c1b21a62527d61fc4305617aca Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 7 Apr 2014 15:38:41 -0700 Subject: wait: fix reparent_leader() vs EXIT_DEAD->EXIT_ZOMBIE race wait_task_zombie() first does EXIT_ZOMBIE->EXIT_DEAD transition and drops tasklist_lock. If this task is not the natural child and it is traced, we change its state back to EXIT_ZOMBIE for ->real_parent. The last transition is racy, this is even documented in 50b8d257486a "ptrace: partially fix the do_wait(WEXITED) vs EXIT_DEAD->EXIT_ZOMBIE race". wait_consider_task() tries to detect this transition and clear ->notask_error but we can't rely on ptrace_reparented(), debugger can exit and do ptrace_unlink() before its sub-thread sets EXIT_ZOMBIE. And there is another problem which were missed before: this transition can also race with reparent_leader() which doesn't reset >exit_signal if EXIT_DEAD, assuming that this task must be reaped by someone else. So the tracee can be re-parented with ->exit_signal != SIGCHLD, and if /sbin/init doesn't use __WALL it becomes unreapable. Change reparent_leader() to update ->exit_signal even if EXIT_DEAD. Note: this is the simple temporary hack for -stable, it doesn't try to solve all problems, it will be reverted by the next changes. Signed-off-by: Oleg Nesterov Reported-by: Jan Kratochvil Reported-by: Michal Schmidt Tested-by: Michal Schmidt Cc: Al Viro Cc: Lennart Poettering Cc: Roland McGrath Cc: Tejun Heo Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index decf648574f6..e354cbb13a9b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -560,9 +560,6 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, struct list_head *dead) { list_move_tail(&p->sibling, &p->real_parent->children); - - if (p->exit_state == EXIT_DEAD) - return; /* * If this is a threaded reparent there is no need to * notify anyone anything has happened. @@ -570,9 +567,19 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, if (same_thread_group(p->real_parent, father)) return; - /* We don't want people slaying init. */ + /* + * We don't want people slaying init. + * + * Note: we do this even if it is EXIT_DEAD, wait_task_zombie() + * can change ->exit_state to EXIT_ZOMBIE. If this is the final + * state, do_notify_parent() was already called and ->exit_signal + * doesn't matter. + */ p->exit_signal = SIGCHLD; + if (p->exit_state == EXIT_DEAD) + return; + /* If it has exited notify the new parent about this child's death. */ if (!p->ptrace && p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { -- cgit v1.2.3 From abd50b39e783e1b6c75c7534c37f1eb2d94a89cd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 7 Apr 2014 15:38:42 -0700 Subject: wait: introduce EXIT_TRACE to avoid the racy EXIT_DEAD->EXIT_ZOMBIE transition wait_task_zombie() first does EXIT_ZOMBIE->EXIT_DEAD transition and drops tasklist_lock. If this task is not the natural child and it is traced, we change its state back to EXIT_ZOMBIE for ->real_parent. The last transition is racy, this is even documented in 50b8d257486a "ptrace: partially fix the do_wait(WEXITED) vs EXIT_DEAD->EXIT_ZOMBIE race". wait_consider_task() tries to detect this transition and clear ->notask_error but we can't rely on ptrace_reparented(), debugger can exit and do ptrace_unlink() before its sub-thread sets EXIT_ZOMBIE. And there is another problem which were missed before: this transition can also race with reparent_leader() which doesn't reset >exit_signal if EXIT_DEAD, assuming that this task must be reaped by someone else. So the tracee can be re-parented with ->exit_signal != SIGCHLD, and if /sbin/init doesn't use __WALL it becomes unreapable. This was fixed by the previous commit, but it was the temporary hack. 1. Add the new exit_state, EXIT_TRACE. It means that the task is the traced zombie, debugger is going to detach and notify its natural parent. This new state is actually EXIT_ZOMBIE | EXIT_DEAD. This way we can avoid the changes in proc/kgdb code, get_task_state() still reports "X (dead)" in this case. Note: with or without this change userspace can see Z -> X -> Z transition. Not really bad, but probably makes sense to fix. 2. Change wait_task_zombie() to use EXIT_TRACE instead of EXIT_DEAD if we need to notify the ->real_parent. 3. Revert the previous hack in reparent_leader(), now that EXIT_DEAD is always the final state we can safely ignore such a task. 4. Change wait_consider_task() to check EXIT_TRACE separately and kill the racy and no longer needed ptrace_reparented() case. If ptrace == T an EXIT_TRACE thread should be simply ignored, the owner of this state is going to ptrace_unlink() this task. We can pretend that it was already removed from ->ptraced list. Otherwise we should skip this thread too but clear ->notask_error, we must be the natural parent and debugger is going to untrace and notify us. IOW, this doesn't differ from "EXIT_ZOMBIE && p->ptrace" even if the task was already untraced. Signed-off-by: Oleg Nesterov Reported-by: Jan Kratochvil Reported-by: Michal Schmidt Tested-by: Michal Schmidt Cc: Al Viro Cc: Lennart Poettering Cc: Roland McGrath Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 50 +++++++++++++++++++++----------------------------- 1 file changed, 21 insertions(+), 29 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index e354cbb13a9b..022a0ff17318 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -560,6 +560,9 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, struct list_head *dead) { list_move_tail(&p->sibling, &p->real_parent->children); + + if (p->exit_state == EXIT_DEAD) + return; /* * If this is a threaded reparent there is no need to * notify anyone anything has happened. @@ -567,19 +570,9 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, if (same_thread_group(p->real_parent, father)) return; - /* - * We don't want people slaying init. - * - * Note: we do this even if it is EXIT_DEAD, wait_task_zombie() - * can change ->exit_state to EXIT_ZOMBIE. If this is the final - * state, do_notify_parent() was already called and ->exit_signal - * doesn't matter. - */ + /* We don't want people slaying init. */ p->exit_signal = SIGCHLD; - if (p->exit_state == EXIT_DEAD) - return; - /* If it has exited notify the new parent about this child's death. */ if (!p->ptrace && p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { @@ -1043,17 +1036,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) return wait_noreap_copyout(wo, p, pid, uid, why, status); } + traced = ptrace_reparented(p); /* - * Try to move the task's state to DEAD - * only one thread is allowed to do this: + * Move the task's state to DEAD/TRACE, only one thread can do this. */ - state = xchg(&p->exit_state, EXIT_DEAD); - if (state != EXIT_ZOMBIE) { - BUG_ON(state != EXIT_DEAD); + state = traced ? EXIT_TRACE : EXIT_DEAD; + if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) return 0; - } - - traced = ptrace_reparented(p); /* * It can be ptraced but not reparented, check * thread_group_leader() to filter out sub-threads. @@ -1114,7 +1103,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) /* * Now we are sure this task is interesting, and no other - * thread can reap it because we set its state to EXIT_DEAD. + * thread can reap it because we its state == DEAD/TRACE. */ read_unlock(&tasklist_lock); @@ -1159,14 +1148,14 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) * If this is not a sub-thread, notify the parent. * If parent wants a zombie, don't release it now. */ + state = EXIT_DEAD; if (thread_group_leader(p) && - !do_notify_parent(p, p->exit_signal)) { - p->exit_state = EXIT_ZOMBIE; - p = NULL; - } + !do_notify_parent(p, p->exit_signal)) + state = EXIT_ZOMBIE; + p->exit_state = state; write_unlock_irq(&tasklist_lock); } - if (p != NULL) + if (state == EXIT_DEAD) release_task(p); return retval; @@ -1362,12 +1351,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, } /* dead body doesn't have much to contribute */ - if (unlikely(p->exit_state == EXIT_DEAD)) { + if (unlikely(p->exit_state == EXIT_DEAD)) + return 0; + + if (unlikely(p->exit_state == EXIT_TRACE)) { /* - * But do not ignore this task until the tracer does - * wait_task_zombie()->do_notify_parent(). + * ptrace == 0 means we are the natural parent. In this case + * we should clear notask_error, debugger will notify us. */ - if (likely(!ptrace) && unlikely(ptrace_reparented(p))) + if (likely(!ptrace)) wo->notask_error = 0; return 0; } -- cgit v1.2.3 From b436069059fede30ca31d4bf439cc86436ff5b1d Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 7 Apr 2014 15:38:43 -0700 Subject: wait: use EXIT_TRACE only if thread_group_leader(zombie) wait_task_zombie() always uses EXIT_TRACE/ptrace_unlink() if ptrace_reparented(). This is suboptimal and a bit confusing: we do not need do_notify_parent(p) if !thread_group_leader(p) and in this case we also do not need ptrace_unlink(), we can rely on ptrace_release_task(). Change wait_task_zombie() to check thread_group_leader() along with ptrace_reparented() and simplify the final p->exit_state transition. Signed-off-by: Oleg Nesterov Tested-by: Michal Schmidt Cc: Jan Kratochvil Cc: Al Viro Cc: Lennart Poettering Cc: Roland McGrath Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 022a0ff17318..4773ed990907 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1040,7 +1040,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) /* * Move the task's state to DEAD/TRACE, only one thread can do this. */ - state = traced ? EXIT_TRACE : EXIT_DEAD; + state = traced && thread_group_leader(p) ? EXIT_TRACE : EXIT_DEAD; if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) return 0; /* @@ -1140,18 +1140,15 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) if (!retval) retval = pid; - if (traced) { + if (state == EXIT_TRACE) { write_lock_irq(&tasklist_lock); /* We dropped tasklist, ptracer could die and untrace */ ptrace_unlink(p); - /* - * If this is not a sub-thread, notify the parent. - * If parent wants a zombie, don't release it now. - */ - state = EXIT_DEAD; - if (thread_group_leader(p) && - !do_notify_parent(p, p->exit_signal)) - state = EXIT_ZOMBIE; + + /* If parent wants a zombie, don't release it now */ + state = EXIT_ZOMBIE; + if (do_notify_parent(p, p->exit_signal)) + state = EXIT_DEAD; p->exit_state = state; write_unlock_irq(&tasklist_lock); } -- cgit v1.2.3 From b3ab03160dfaf8ab78d476b670de319f4c1a5685 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 7 Apr 2014 15:38:45 -0700 Subject: wait: completely ignore the EXIT_DEAD tasks Now that EXIT_DEAD is the terminal state it doesn't make sense to call eligible_child() or security_task_wait() if the task is really dead. Signed-off-by: Oleg Nesterov Tested-by: Michal Schmidt Cc: Jan Kratochvil Cc: Al Viro Cc: Lennart Poettering Cc: Roland McGrath Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 4773ed990907..33cf8dba0a61 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1329,7 +1329,12 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) static int wait_consider_task(struct wait_opts *wo, int ptrace, struct task_struct *p) { - int ret = eligible_child(wo, p); + int ret; + + if (unlikely(p->exit_state == EXIT_DEAD)) + return 0; + + ret = eligible_child(wo, p); if (!ret) return ret; @@ -1347,10 +1352,6 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, return 0; } - /* dead body doesn't have much to contribute */ - if (unlikely(p->exit_state == EXIT_DEAD)) - return 0; - if (unlikely(p->exit_state == EXIT_TRACE)) { /* * ptrace == 0 means we are the natural parent. In this case -- cgit v1.2.3 From 377d75dafa07ee0da64223c9169f4e17b26c2b9a Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 7 Apr 2014 15:38:47 -0700 Subject: wait: WSTOPPED|WCONTINUED hangs if a zombie child is traced by real_parent "A zombie is only visible to its ptracer" logic in wait_consider_task() is very wrong. Trivial test-case: #include #include #include #include int main(void) { int child = fork(); if (!child) { assert(ptrace(PTRACE_TRACEME, 0,0,0) == 0); return 0x23; } assert(waitid(P_ALL, child, NULL, WEXITED | WNOWAIT) == 0); assert(waitid(P_ALL, 0, NULL, WSTOPPED) == -1); return 0; } it hangs in waitpid(WSTOPPED) despite the fact it has a single zombie child. This is because wait_consider_task(ptrace => 0) sees p->ptrace and cleares ->notask_error assuming that the debugger should detach and notify us. Change wait_consider_task(ptrace => 0) to pretend that ptrace == T if the child is traced by us. This really simplifies the logic and allows us to do more fixes, see the next changes. This also hides the unwanted group stop state automatically, we can remove another ptrace_reparented() check. Unfortunately, this adds the following behavioural changes: 1. Before this patch wait(WEXITED | __WNOTHREAD) does not reap a natural child if it is traced by the caller's sub-thread. Hopefully nobody will ever notice this change, and I think that nobody should rely on this behaviour anyway. 2. SIGNAL_STOP_CONTINUED is no longer hidden from debugger if it is real parent. While this change comes as a side effect, I think it is good by itself. The group continued state can not be consumed by another process in this case, it doesn't depend on ptrace, it doesn't make sense to hide it from real parent. Perhaps we should add the thread_group_leader() check before wait_task_continued()? May be, but this shouldn't depend on ptrace_reparented(). Signed-off-by: Oleg Nesterov Cc: Al Viro Cc: Jan Kratochvil Cc: Lennart Poettering Cc: Michal Schmidt Cc: Roland McGrath Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 33cf8dba0a61..92d38d4da4b1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1362,6 +1362,22 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, return 0; } + if (likely(!ptrace) && unlikely(p->ptrace)) { + /* + * If it is traced by its real parent's group, just pretend + * the caller is ptrace_do_wait() and reap this child if it + * is zombie. + * + * This also hides group stop state from real parent; otherwise + * a single stop can be reported twice as group and ptrace stop. + * If a ptracer wants to distinguish these two events for its + * own children it should create a separate process which takes + * the role of real parent. + */ + if (!ptrace_reparented(p)) + ptrace = 1; + } + /* slay zombie? */ if (p->exit_state == EXIT_ZOMBIE) { /* @@ -1402,19 +1418,6 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) wo->notask_error = 0; } else { - /* - * If @p is ptraced by a task in its real parent's group, - * hide group stop/continued state when looking at @p as - * the real parent; otherwise, a single stop can be - * reported twice as group and ptrace stops. - * - * If a ptracer wants to distinguish the two events for its - * own children, it should create a separate process which - * takes the role of real parent. - */ - if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p)) - return 0; - /* * @p is alive and it's gonna stop, continue or exit, so * there always is something to wait for. -- cgit v1.2.3 From 7c733eb3eac0e3d091aaf37c183d2175eeebfb2b Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 7 Apr 2014 15:38:49 -0700 Subject: wait: WSTOPPED|WCONTINUED doesn't work if a zombie leader is traced by another process Even if the main thread is dead the process still can stop/continue. However, if the leader is ptraced wait_consider_task(ptrace => false) always skips wait_task_stopped/wait_task_continued, so WSTOPPED or WCONTINUED can never work for the natural parent in this case. Move the "A zombie ptracee is only visible to its ptracer" check into the "if (!delay_group_leader(p))" block. ->notask_error is cleared by the "fall through" code below. This depends on the previous change, wait_task_stopped/continued must be avoided if !delay_group_leader() and the tracer is ->real_parent. Otherwise WSTOPPED|WEXITED could wrongly report "stopped" when the child is already dead (single-threaded or not). If it is traced by another task then the "stopped" state is fine until the debugger detaches and reveals a zombie state. Stupid test-case: void *tfunc(void *arg) { sleep(1); // wait for zombie leader raise(SIGSTOP); exit(0x13); return NULL; } int run_child(void) { pthread_t thread; if (!fork()) { int tracee = getppid(); assert(ptrace(PTRACE_ATTACH, tracee, 0,0) == 0); do ptrace(PTRACE_CONT, tracee, 0,0); while (wait(NULL) > 0); return 0; } sleep(1); // wait for PTRACE_ATTACH assert(pthread_create(&thread, NULL, tfunc, NULL) == 0); pthread_exit(NULL); } int main(void) { int child, stat; child = fork(); if (!child) return run_child(); assert(child == waitpid(-1, &stat, WSTOPPED)); assert(stat == 0x137f); kill(child, SIGCONT); assert(child == waitpid(-1, &stat, WCONTINUED)); assert(stat == 0xffff); assert(child == waitpid(-1, &stat, 0)); assert(stat == 0x1300); return 0; } Without this patch it hangs in waitpid(WSTOPPED), wait_task_stopped() is never called. Note: this doesn't fix all problems with a zombie delay_group_leader(), WCONTINUED | WEXITED check is not exactly right. debugger can't assume it will be notified if another thread reaps the whole thread group. Signed-off-by: Oleg Nesterov Cc: Al Viro Cc: Jan Kratochvil Cc: Lennart Poettering Cc: Michal Schmidt Cc: Roland McGrath Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/exit.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'kernel') diff --git a/kernel/exit.c b/kernel/exit.c index 92d38d4da4b1..6ed6a1d552b5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1380,20 +1380,16 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, /* slay zombie? */ if (p->exit_state == EXIT_ZOMBIE) { - /* - * A zombie ptracee is only visible to its ptracer. - * Notification and reaping will be cascaded to the real - * parent when the ptracer detaches. - */ - if (likely(!ptrace) && unlikely(p->ptrace)) { - /* it will become visible, clear notask_error */ - wo->notask_error = 0; - return 0; - } - /* we don't reap group leaders with subthreads */ - if (!delay_group_leader(p)) - return wait_task_zombie(wo, p); + if (!delay_group_leader(p)) { + /* + * A zombie ptracee is only visible to its ptracer. + * Notification and reaping will be cascaded to the + * real parent when the ptracer detaches. + */ + if (unlikely(ptrace) || likely(!p->ptrace)) + return wait_task_zombie(wo, p); + } /* * Allow access to stopped/continued state via zombie by -- cgit v1.2.3 From 80df28476505ed4e6701c3448c63c9229a50c655 Mon Sep 17 00:00:00 2001 From: Liu Hua Date: Mon, 7 Apr 2014 15:38:57 -0700 Subject: hung_task: check the value of "sysctl_hung_task_timeout_sec" As sysctl_hung_task_timeout_sec is unsigned long, when this value is larger then LONG_MAX/HZ, the function schedule_timeout_interruptible in watchdog will return immediately without sleep and with print : schedule_timeout: wrong timeout value ffffffffffffff83 and then the funtion watchdog will call schedule_timeout_interruptible again and again. The screen will be filled with "schedule_timeout: wrong timeout value ffffffffffffff83" This patch does some check and correction in sysctl, to let the function schedule_timeout_interruptible allways get the valid parameter. Signed-off-by: Liu Hua Tested-by: Satoru Takeuchi Cc: [3.4+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sysctl.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 5c14b547882e..74f5b580fe34 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -141,6 +141,11 @@ static int min_percpu_pagelist_fract = 8; static int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; +/*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ +#ifdef CONFIG_DETECT_HUNG_TASK +static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); +#endif + #ifdef CONFIG_INOTIFY_USER #include #endif @@ -985,6 +990,7 @@ static struct ctl_table kern_table[] = { .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_dohung_task_timeout_secs, + .extra2 = &hung_task_timeout_max, }, { .procname = "hung_task_warnings", -- cgit v1.2.3 From d7c0847fe3682a026ee6d147c5b6b8ab457fffc8 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Mon, 7 Apr 2014 15:39:03 -0700 Subject: kernel/panic.c: display reason at end + pr_emerg Currently, booting without initrd specified on 80x25 screen gives a call trace followed by atkbd : Spurious ACK. Original message ("VFS: Unable to mount root fs") is not available. Of course this could happen in other situations... This patch displays panic reason after call trace which could help lot of people even if it's not the very last line on screen. Also, convert all panic.c printk(KERN_EMERG to pr_emerg( [akpm@linux-foundation.org: missed a couple of pr_ conversions] Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/panic.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index 79fd820bb5e8..d02fa9fef46a 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -100,7 +100,7 @@ void panic(const char *fmt, ...) va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); - printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); + pr_emerg("Kernel panic - not syncing: %s\n", buf); #ifdef CONFIG_DEBUG_BUGVERBOSE /* * Avoid nested stack-dumping if a panic occurs during oops processing @@ -141,7 +141,7 @@ void panic(const char *fmt, ...) * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked. */ - printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout); + pr_emerg("Rebooting in %d seconds..", panic_timeout); for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { touch_nmi_watchdog(); @@ -165,7 +165,7 @@ void panic(const char *fmt, ...) extern int stop_a_enabled; /* Make sure the user can actually press Stop-A (L1-A) */ stop_a_enabled = 1; - printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n"); + pr_emerg("Press Stop-A (L1-A) to return to the boot prom\n"); } #endif #if defined(CONFIG_S390) @@ -176,6 +176,7 @@ void panic(const char *fmt, ...) disabled_wait(caller); } #endif + pr_emerg("---[ end Kernel panic - not syncing: %s\n", buf); local_irq_enable(); for (i = 0; ; i += PANIC_TIMER_STEP) { touch_softlockup_watchdog(); @@ -276,8 +277,7 @@ unsigned long get_taint(void) void add_taint(unsigned flag, enum lockdep_ok lockdep_ok) { if (lockdep_ok == LOCKDEP_NOW_UNRELIABLE && __debug_locks_off()) - printk(KERN_WARNING - "Disabling lock debugging due to kernel taint\n"); + pr_warn("Disabling lock debugging due to kernel taint\n"); set_bit(flag, &tainted_mask); } @@ -382,8 +382,7 @@ late_initcall(init_oops_id); void print_oops_end_marker(void) { init_oops_id(); - printk(KERN_WARNING "---[ end trace %016llx ]---\n", - (unsigned long long)oops_id); + pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id); } /* -- cgit v1.2.3 From 52f5684c8e1ec7463192aba8e2916df49807511a Mon Sep 17 00:00:00 2001 From: Gideon Israel Dsouza Date: Mon, 7 Apr 2014 15:39:20 -0700 Subject: kernel: use macros from compiler.h instead of __attribute__((...)) To increase compiler portability there is which provides convenience macros for various gcc constructs. Eg: __weak for __attribute__((weak)). I've replaced all instances of gcc attributes with the right macro in the kernel subsystem. Signed-off-by: Gideon Israel Dsouza Cc: "Rafael J. Wysocki" Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/fork.c | 3 ++- kernel/kallsyms.c | 11 ++++++----- kernel/kexec.c | 5 +++-- kernel/ksysfs.c | 5 +++-- kernel/power/power.h | 3 ++- kernel/power/snapshot.c | 3 ++- kernel/power/suspend.c | 5 +++-- kernel/power/swap.c | 2 +- kernel/sched/clock.c | 3 ++- kernel/sched/core.c | 3 ++- kernel/signal.c | 4 +++- kernel/time/timekeeping.c | 5 +++-- kernel/trace/trace.h | 3 ++- 13 files changed, 34 insertions(+), 21 deletions(-) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index e905e9c6b224..54a8d26f612f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include @@ -286,7 +287,7 @@ void __init fork_init(unsigned long mempages) init_task.signal->rlim[RLIMIT_NPROC]; } -int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, +int __weak arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 3127ad52cdb2..cb0cf37dac3a 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -36,8 +37,8 @@ * These will be re-linked against their real values * during the second link stage. */ -extern const unsigned long kallsyms_addresses[] __attribute__((weak)); -extern const u8 kallsyms_names[] __attribute__((weak)); +extern const unsigned long kallsyms_addresses[] __weak; +extern const u8 kallsyms_names[] __weak; /* * Tell the compiler that the count isn't in the small data section if the arch @@ -46,10 +47,10 @@ extern const u8 kallsyms_names[] __attribute__((weak)); extern const unsigned long kallsyms_num_syms __attribute__((weak, section(".rodata"))); -extern const u8 kallsyms_token_table[] __attribute__((weak)); -extern const u16 kallsyms_token_index[] __attribute__((weak)); +extern const u8 kallsyms_token_table[] __weak; +extern const u16 kallsyms_token_index[] __weak; -extern const unsigned long kallsyms_markers[] __attribute__((weak)); +extern const unsigned long kallsyms_markers[] __weak; static inline int is_kernel_inittext(unsigned long addr) { diff --git a/kernel/kexec.c b/kernel/kexec.c index c0d261c7db7b..c8380ad203bc 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -1551,10 +1552,10 @@ void vmcoreinfo_append_str(const char *fmt, ...) * provide an empty default implementation here -- architecture * code may override this */ -void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void) +void __weak arch_crash_save_vmcoreinfo(void) {} -unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void) +unsigned long __weak paddr_vmcoreinfo_note(void) { return __pa((unsigned long)(char *)&vmcoreinfo_note); } diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index e660964086e2..2495a9b14ac8 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -18,6 +18,7 @@ #include #include #include +#include #include /* rcu_expedited */ @@ -162,8 +163,8 @@ KERNEL_ATTR_RW(rcu_expedited); /* * Make /sys/kernel/notes give the raw contents of our kernel .notes section. */ -extern const void __start_notes __attribute__((weak)); -extern const void __stop_notes __attribute__((weak)); +extern const void __start_notes __weak; +extern const void __stop_notes __weak; #define notes_size (&__stop_notes - &__start_notes) static ssize_t notes_read(struct file *filp, struct kobject *kobj, diff --git a/kernel/power/power.h b/kernel/power/power.h index 1ca753106557..15f37ea08719 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -2,6 +2,7 @@ #include #include #include +#include struct swsusp_info { struct new_utsname uts; @@ -11,7 +12,7 @@ struct swsusp_info { unsigned long image_pages; unsigned long pages; unsigned long size; -} __attribute__((aligned(PAGE_SIZE))); +} __aligned(PAGE_SIZE); #ifdef CONFIG_HIBERNATION /* kernel/power/snapshot.c */ diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 149e745eaa52..18fb7a2fb14b 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -155,7 +156,7 @@ static inline void free_image_page(void *addr, int clear_nosave_free) struct linked_page { struct linked_page *next; char data[LINKED_PAGE_DATA_SIZE]; -} __attribute__((packed)); +} __packed; static inline void free_list_of_pages(struct linked_page *list, int clear_page_nosave) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 90b3d9366d1a..c3ad9cafe930 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "power.h" @@ -156,13 +157,13 @@ static int suspend_prepare(suspend_state_t state) } /* default implementation */ -void __attribute__ ((weak)) arch_suspend_disable_irqs(void) +void __weak arch_suspend_disable_irqs(void) { local_irq_disable(); } /* default implementation */ -void __attribute__ ((weak)) arch_suspend_enable_irqs(void) +void __weak arch_suspend_enable_irqs(void) { local_irq_enable(); } diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 7c33ed200410..8c9a4819f798 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -101,7 +101,7 @@ struct swsusp_header { unsigned int flags; /* Flags to pass to the "boot" kernel */ char orig_sig[10]; char sig[10]; -} __attribute__((packed)); +} __packed; static struct swsusp_header *swsusp_header; diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index b30a2924ef14..3ef6451e972e 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -60,13 +60,14 @@ #include #include #include +#include /* * Scheduler clock - returns current time in nanosec units. * This is default implementation. * Architectures and sub-architectures can override this. */ -unsigned long long __attribute__((weak)) sched_clock(void) +unsigned long long __weak sched_clock(void) { return (unsigned long long)(jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1d1b87b36778..80bd491b718c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include @@ -6498,7 +6499,7 @@ static cpumask_var_t fallback_doms; * cpu core maps. It is supposed to return 1 if the topology changed * or 0 if it stayed the same. */ -int __attribute__((weak)) arch_update_cpu_topology(void) +int __weak arch_update_cpu_topology(void) { return 0; } diff --git a/kernel/signal.c b/kernel/signal.c index 5d4b05a229a6..6ea13c09ae56 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -33,6 +33,8 @@ #include #include #include +#include + #define CREATE_TRACE_POINTS #include @@ -3618,7 +3620,7 @@ SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) } #endif -__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) +__weak const char *arch_vma_name(struct vm_area_struct *vma) { return NULL; } diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5b40279ecd71..f7df8ea21707 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "tick-internal.h" #include "ntp_internal.h" @@ -760,7 +761,7 @@ u64 timekeeping_max_deferment(void) * * XXX - Do be sure to remove it once all arches implement it. */ -void __attribute__((weak)) read_persistent_clock(struct timespec *ts) +void __weak read_persistent_clock(struct timespec *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; @@ -775,7 +776,7 @@ void __attribute__((weak)) read_persistent_clock(struct timespec *ts) * * XXX - Do be sure to remove it once all arches implement it. */ -void __attribute__((weak)) read_boot_clock(struct timespec *ts) +void __weak read_boot_clock(struct timespec *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ffc314b7e92b..2e29d7ba5a52 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -13,6 +13,7 @@ #include #include #include +#include #ifdef CONFIG_FTRACE_SYSCALLS #include /* For NR_SYSCALLS */ @@ -1279,7 +1280,7 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); #undef FTRACE_ENTRY #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ extern struct ftrace_event_call \ - __attribute__((__aligned__(4))) event_##call; + __aligned(4) event_##call; #undef FTRACE_ENTRY_DUP #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ -- cgit v1.2.3 From 08f141d3dbddacb70aba1541bc5f950e466591e9 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 7 Apr 2014 15:39:39 -0700 Subject: modules: use raw_cpu_write for initialization of per cpu refcount. The initialization of a structure is not subject to synchronization. The use of __this_cpu would trigger a false positive with the additional preemption checks for __this_cpu ops. So simply disable the check through the use of raw_cpu ops. Trace: __this_cpu_write operation in preemptible [00000000] code: modprobe/286 caller is __this_cpu_preempt_check+0x38/0x60 CPU: 3 PID: 286 Comm: modprobe Tainted: GF 3.12.0-rc4+ #187 Call Trace: dump_stack+0x4e/0x82 check_preemption_disabled+0xec/0x110 __this_cpu_preempt_check+0x38/0x60 load_module+0xcfd/0x2650 SyS_init_module+0xa6/0xd0 tracesys+0xe1/0xe6 Signed-off-by: Christoph Lameter Acked-by: Ingo Molnar Acked-by: Rusty Russell Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/module.c b/kernel/module.c index 29f7790eaa14..11869408f79b 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -640,7 +640,7 @@ static int module_unload_init(struct module *mod) INIT_LIST_HEAD(&mod->target_list); /* Hold reference count during initialization. */ - __this_cpu_write(mod->refptr->incs, 1); + raw_cpu_write(mod->refptr->incs, 1); return 0; } -- cgit v1.2.3 From 64b47e8fdb40a0d46e8cf458dd3e24f8afa073f6 Mon Sep 17 00:00:00 2001 From: Josh Triplett Date: Mon, 7 Apr 2014 15:39:45 -0700 Subject: lglock: map to spinlock when !CONFIG_SMP When the system has only one CPU, lglock is effectively a spinlock; map it directly to spinlock to eliminate the indirection and duplicate code. In addition to removing overhead, this drops 1.6k of code with a defconfig modified to have !CONFIG_SMP, and 1.1k with a minimal config. Signed-off-by: Josh Triplett Cc: Rusty Russell Cc: Michal Marek Cc: Thomas Gleixner Cc: David Howells Cc: "H. Peter Anvin" Cc: Nick Piggin Cc: Peter Zijlstra Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/locking/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 306a76b51e0f..b8bdcd4785b7 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -1,5 +1,5 @@ -obj-y += mutex.o semaphore.o rwsem.o lglock.o mcs_spinlock.o +obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = -pg @@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o endif obj-$(CONFIG_SMP) += spinlock.o +obj-$(CONFIG_SMP) += lglock.o obj-$(CONFIG_PROVE_LOCKING) += spinlock.o obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o -- cgit v1.2.3 From de7b2973903c6cc50b31ee5682a69b2219b9919d Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 8 Apr 2014 17:26:21 -0400 Subject: tracepoint: Use struct pointer instead of name hash for reg/unreg tracepoints Register/unregister tracepoint probes with struct tracepoint pointer rather than tracepoint name. This change, which vastly simplifies tracepoint.c, has been proposed by Steven Rostedt. It also removes 8.8kB (mostly of text) to the vmlinux size. From this point on, the tracers need to pass a struct tracepoint pointer to probe register/unregister. A probe can now only be connected to a tracepoint that exists. Moreover, tracers are responsible for unregistering the probe before the module containing its associated tracepoint is unloaded. text data bss dec hex filename 10443444 4282528 10391552 25117524 17f4354 vmlinux.orig 10434930 4282848 10391552 25109330 17f2352 vmlinux Link: http://lkml.kernel.org/r/1396992381-23785-2-git-send-email-mathieu.desnoyers@efficios.com CC: Ingo Molnar CC: Frederic Weisbecker CC: Andrew Morton CC: Frank Ch. Eigler CC: Johannes Berg Signed-off-by: Mathieu Desnoyers [ SDR - fixed return val in void func in tracepoint_module_going() ] Signed-off-by: Steven Rostedt --- kernel/trace/trace_events.c | 55 ++-- kernel/trace/trace_events_trigger.c | 2 +- kernel/trace/trace_kprobe.c | 21 +- kernel/trace/trace_output.c | 2 +- kernel/trace/trace_uprobe.c | 20 +- kernel/tracepoint.c | 511 ++++++++++++++++-------------------- 6 files changed, 280 insertions(+), 331 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 83a4378dc5e0..3ddfd8f62c05 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -223,24 +223,25 @@ int ftrace_event_reg(struct ftrace_event_call *call, { struct ftrace_event_file *file = data; + WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT)); switch (type) { case TRACE_REG_REGISTER: - return tracepoint_probe_register(call->name, + return tracepoint_probe_register(call->tp, call->class->probe, file); case TRACE_REG_UNREGISTER: - tracepoint_probe_unregister(call->name, + tracepoint_probe_unregister(call->tp, call->class->probe, file); return 0; #ifdef CONFIG_PERF_EVENTS case TRACE_REG_PERF_REGISTER: - return tracepoint_probe_register(call->name, + return tracepoint_probe_register(call->tp, call->class->perf_probe, call); case TRACE_REG_PERF_UNREGISTER: - tracepoint_probe_unregister(call->name, + tracepoint_probe_unregister(call->tp, call->class->perf_probe, call); return 0; @@ -352,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file, if (ret) { tracing_stop_cmdline_record(); pr_info("event trace: Could not enable event " - "%s\n", call->name); + "%s\n", ftrace_event_name(call)); break; } set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags); @@ -481,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match, { struct ftrace_event_file *file; struct ftrace_event_call *call; + const char *name; int ret = -EINVAL; list_for_each_entry(file, &tr->events, list) { call = file->event_call; + name = ftrace_event_name(call); - if (!call->name || !call->class || !call->class->reg) + if (!name || !call->class || !call->class->reg) continue; if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) continue; if (match && - strcmp(match, call->name) != 0 && + strcmp(match, name) != 0 && strcmp(match, call->class->system) != 0) continue; if (sub && strcmp(sub, call->class->system) != 0) continue; - if (event && strcmp(event, call->name) != 0) + if (event && strcmp(event, name) != 0) continue; ftrace_event_enable_disable(file, set); @@ -699,7 +702,7 @@ static int t_show(struct seq_file *m, void *v) if (strcmp(call->class->system, TRACE_SYSTEM) != 0) seq_printf(m, "%s:", call->class->system); - seq_printf(m, "%s\n", call->name); + seq_printf(m, "%s\n", ftrace_event_name(call)); return 0; } @@ -792,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, mutex_lock(&event_mutex); list_for_each_entry(file, &tr->events, list) { call = file->event_call; - if (!call->name || !call->class || !call->class->reg) + if (!ftrace_event_name(call) || !call->class || !call->class->reg) continue; if (system && strcmp(call->class->system, system->name) != 0) @@ -907,7 +910,7 @@ static int f_show(struct seq_file *m, void *v) switch ((unsigned long)v) { case FORMAT_HEADER: - seq_printf(m, "name: %s\n", call->name); + seq_printf(m, "name: %s\n", ftrace_event_name(call)); seq_printf(m, "ID: %d\n", call->event.type); seq_printf(m, "format:\n"); return 0; @@ -1527,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) struct trace_array *tr = file->tr; struct list_head *head; struct dentry *d_events; + const char *name; int ret; /* @@ -1540,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) } else d_events = parent; - file->dir = debugfs_create_dir(call->name, d_events); + name = ftrace_event_name(call); + file->dir = debugfs_create_dir(name, d_events); if (!file->dir) { pr_warning("Could not create debugfs '%s' directory\n", - call->name); + name); return -1; } @@ -1567,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) ret = call->class->define_fields(call); if (ret < 0) { pr_warning("Could not initialize trace point" - " events/%s\n", call->name); + " events/%s\n", name); return -1; } } @@ -1631,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call) static int event_init(struct ftrace_event_call *call) { int ret = 0; + const char *name; - if (WARN_ON(!call->name)) + name = ftrace_event_name(call); + if (WARN_ON(!name)) return -EINVAL; if (call->class->raw_init) { ret = call->class->raw_init(call); if (ret < 0 && ret != -ENOSYS) pr_warn("Could not initialize trace events/%s\n", - call->name); + name); } return ret; @@ -1885,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr) ret = __trace_add_new_event(call, tr); if (ret < 0) pr_warning("Could not create directory for event %s\n", - call->name); + ftrace_event_name(call)); } } @@ -1894,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system, const char *event) { struct ftrace_event_file *file; struct ftrace_event_call *call; + const char *name; list_for_each_entry(file, &tr->events, list) { call = file->event_call; + name = ftrace_event_name(call); - if (!call->name || !call->class || !call->class->reg) + if (!name || !call->class || !call->class->reg) continue; if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) continue; - if (strcmp(event, call->name) == 0 && + if (strcmp(event, name) == 0 && strcmp(system, call->class->system) == 0) return file; } @@ -1973,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, seq_printf(m, "%s:%s:%s", data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, data->file->event_call->class->system, - data->file->event_call->name); + ftrace_event_name(data->file->event_call)); if (data->count == -1) seq_printf(m, ":unlimited\n"); @@ -2193,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) ret = event_create_dir(tr->event_dir, file); if (ret < 0) pr_warning("Could not create directory for event %s\n", - file->event_call->name); + ftrace_event_name(file->event_call)); } } @@ -2217,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr) ret = __trace_early_add_new_event(call, tr); if (ret < 0) pr_warning("Could not create early event %s\n", - call->name); + ftrace_event_name(call)); } } @@ -2549,7 +2558,7 @@ static __init void event_trace_self_tests(void) continue; #endif - pr_info("Testing event %s: ", call->name); + pr_info("Testing event %s: ", ftrace_event_name(call)); /* * If an event is already enabled, someone is using diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 8efbb69b04f0..925f537f07d1 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, seq_printf(m, "%s:%s:%s", enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR, enable_data->file->event_call->class->system, - enable_data->file->event_call->name); + ftrace_event_name(enable_data->file->event_call)); if (data->count == -1) seq_puts(m, ":unlimited"); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d021d21dd150..903ae28962be 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -341,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, struct trace_kprobe *tk; list_for_each_entry(tk, &probe_list, list) - if (strcmp(tk->tp.call.name, event) == 0 && + if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 && strcmp(tk->tp.call.class->system, group) == 0) return tk; return NULL; @@ -516,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk) mutex_lock(&probe_lock); /* Delete old (same name) event if exist */ - old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system); + old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call), + tk->tp.call.class->system); if (old_tk) { ret = unregister_trace_kprobe(old_tk); if (ret < 0) @@ -564,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, if (ret) pr_warning("Failed to re-register probe %s on" "%s: %d\n", - tk->tp.call.name, mod->name, ret); + ftrace_event_name(&tk->tp.call), + mod->name, ret); } } mutex_unlock(&probe_lock); @@ -818,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v) int i; seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); - seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name); + seq_printf(m, ":%s/%s", tk->tp.call.class->system, + ftrace_event_name(&tk->tp.call)); if (!tk->symbol) seq_printf(m, " 0x%p", tk->rp.kp.addr); @@ -876,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) { struct trace_kprobe *tk = v; - seq_printf(m, " %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit, + seq_printf(m, " %-44s %15lu %15lu\n", + ftrace_event_name(&tk->tp.call), tk->nhit, tk->rp.kp.nmissed); return 0; @@ -1011,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, field = (struct kprobe_trace_entry_head *)iter->ent; tp = container_of(event, struct trace_probe, call.event); - if (!trace_seq_printf(s, "%s: (", tp->call.name)) + if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) goto partial; if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) @@ -1047,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, field = (struct kretprobe_trace_entry_head *)iter->ent; tp = container_of(event, struct trace_probe, call.event); - if (!trace_seq_printf(s, "%s: (", tp->call.name)) + if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) goto partial; if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) @@ -1286,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk) call->data = tk; ret = trace_add_event_call(call); if (ret) { - pr_info("Failed to register kprobe event: %s\n", call->name); + pr_info("Failed to register kprobe event: %s\n", + ftrace_event_name(call)); kfree(call->print_fmt); unregister_ftrace_event(&call->event); } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index ca0e79e2abaa..a436de18aa99 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, } trace_seq_init(p); - ret = trace_seq_printf(s, "%s: ", event->name); + ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); if (!ret) return TRACE_TYPE_PARTIAL_LINE; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e4473367e7a4..930e51462dc8 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -294,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou struct trace_uprobe *tu; list_for_each_entry(tu, &uprobe_list, list) - if (strcmp(tu->tp.call.name, event) == 0 && + if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && strcmp(tu->tp.call.class->system, group) == 0) return tu; @@ -324,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu) mutex_lock(&uprobe_lock); /* register as an event */ - old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); + old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), + tu->tp.call.class->system); if (old_tu) { /* delete old event */ ret = unregister_trace_uprobe(old_tu); @@ -599,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v) char c = is_ret_probe(tu) ? 'r' : 'p'; int i; - seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); + seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, + ftrace_event_name(&tu->tp.call)); seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); for (i = 0; i < tu->tp.nr_args; i++) @@ -649,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) { struct trace_uprobe *tu = v; - seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); + seq_printf(m, " %s %-44s %15lu\n", tu->filename, + ftrace_event_name(&tu->tp.call), tu->nhit); return 0; } @@ -844,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e tu = container_of(event, struct trace_uprobe, tp.call.event); if (is_ret_probe(tu)) { - if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, + if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", + ftrace_event_name(&tu->tp.call), entry->vaddr[1], entry->vaddr[0])) goto partial; data = DATAOF_TRACE_ENTRY(entry, true); } else { - if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, + if (!trace_seq_printf(s, "%s: (0x%lx)", + ftrace_event_name(&tu->tp.call), entry->vaddr[0])) goto partial; data = DATAOF_TRACE_ENTRY(entry, false); @@ -1275,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu) ret = trace_add_event_call(call); if (ret) { - pr_info("Failed to register uprobe event: %s\n", call->name); + pr_info("Failed to register uprobe event: %s\n", + ftrace_event_name(call)); kfree(call->print_fmt); unregister_ftrace_event(&call->event); } diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 50f8329c2042..01b3bd84daa1 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Mathieu Desnoyers + * Copyright (C) 2008-2014 Mathieu Desnoyers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,39 +33,27 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[]; /* Set to 1 to enable tracepoint debug output */ static const int tracepoint_debug; +#ifdef CONFIG_MODULES /* - * Tracepoints mutex protects the builtin and module tracepoints and the hash - * table, as well as the local module list. + * Tracepoint module list mutex protects the local module list. */ -static DEFINE_MUTEX(tracepoints_mutex); +static DEFINE_MUTEX(tracepoint_module_list_mutex); -#ifdef CONFIG_MODULES -/* Local list of struct module */ +/* Local list of struct tp_module */ static LIST_HEAD(tracepoint_module_list); #endif /* CONFIG_MODULES */ /* - * Tracepoint hash table, containing the active tracepoints. - * Protected by tracepoints_mutex. + * tracepoints_mutex protects the builtin and module tracepoints. + * tracepoints_mutex nests inside tracepoint_module_list_mutex. */ -#define TRACEPOINT_HASH_BITS 6 -#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) -static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; +static DEFINE_MUTEX(tracepoints_mutex); /* * Note about RCU : * It is used to delay the free of multiple probes array until a quiescent * state is reached. - * Tracepoint entries modifications are protected by the tracepoints_mutex. */ -struct tracepoint_entry { - struct hlist_node hlist; - struct tracepoint_func *funcs; - int refcount; /* Number of times armed. 0 if disarmed. */ - int enabled; /* Tracepoint enabled */ - char name[0]; -}; - struct tp_probes { struct rcu_head rcu; struct tracepoint_func probes[0]; @@ -92,34 +80,33 @@ static inline void release_probes(struct tracepoint_func *old) } } -static void debug_print_probes(struct tracepoint_entry *entry) +static void debug_print_probes(struct tracepoint_func *funcs) { int i; - if (!tracepoint_debug || !entry->funcs) + if (!tracepoint_debug || !funcs) return; - for (i = 0; entry->funcs[i].func; i++) - printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func); + for (i = 0; funcs[i].func; i++) + printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); } -static struct tracepoint_func * -tracepoint_entry_add_probe(struct tracepoint_entry *entry, - void *probe, void *data) +static struct tracepoint_func *func_add(struct tracepoint_func **funcs, + struct tracepoint_func *tp_func) { int nr_probes = 0; struct tracepoint_func *old, *new; - if (WARN_ON(!probe)) + if (WARN_ON(!tp_func->func)) return ERR_PTR(-EINVAL); - debug_print_probes(entry); - old = entry->funcs; + debug_print_probes(*funcs); + old = *funcs; if (old) { /* (N -> N+1), (N != 0, 1) probes */ for (nr_probes = 0; old[nr_probes].func; nr_probes++) - if (old[nr_probes].func == probe && - old[nr_probes].data == data) + if (old[nr_probes].func == tp_func->func && + old[nr_probes].data == tp_func->data) return ERR_PTR(-EEXIST); } /* + 2 : one for new probe, one for NULL func */ @@ -128,33 +115,30 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, return ERR_PTR(-ENOMEM); if (old) memcpy(new, old, nr_probes * sizeof(struct tracepoint_func)); - new[nr_probes].func = probe; - new[nr_probes].data = data; + new[nr_probes] = *tp_func; new[nr_probes + 1].func = NULL; - entry->refcount = nr_probes + 1; - entry->funcs = new; - debug_print_probes(entry); + *funcs = new; + debug_print_probes(*funcs); return old; } -static void * -tracepoint_entry_remove_probe(struct tracepoint_entry *entry, - void *probe, void *data) +static void *func_remove(struct tracepoint_func **funcs, + struct tracepoint_func *tp_func) { int nr_probes = 0, nr_del = 0, i; struct tracepoint_func *old, *new; - old = entry->funcs; + old = *funcs; if (!old) return ERR_PTR(-ENOENT); - debug_print_probes(entry); + debug_print_probes(*funcs); /* (N -> M), (N > 1, M >= 0) probes */ - if (probe) { + if (tp_func->func) { for (nr_probes = 0; old[nr_probes].func; nr_probes++) { - if (old[nr_probes].func == probe && - old[nr_probes].data == data) + if (old[nr_probes].func == tp_func->func && + old[nr_probes].data == tp_func->data) nr_del++; } } @@ -165,9 +149,8 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, */ if (nr_probes - nr_del == 0) { /* N -> 0, (N > 1) */ - entry->funcs = NULL; - entry->refcount = 0; - debug_print_probes(entry); + *funcs = NULL; + debug_print_probes(*funcs); return old; } else { int j = 0; @@ -177,91 +160,34 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, if (new == NULL) return ERR_PTR(-ENOMEM); for (i = 0; old[i].func; i++) - if (old[i].func != probe || old[i].data != data) + if (old[i].func != tp_func->func + || old[i].data != tp_func->data) new[j++] = old[i]; new[nr_probes - nr_del].func = NULL; - entry->refcount = nr_probes - nr_del; - entry->funcs = new; + *funcs = new; } - debug_print_probes(entry); + debug_print_probes(*funcs); return old; } /* - * Get tracepoint if the tracepoint is present in the tracepoint hash table. - * Must be called with tracepoints_mutex held. - * Returns NULL if not present. + * Add the probe function to a tracepoint. */ -static struct tracepoint_entry *get_tracepoint(const char *name) +static int tracepoint_add_func(struct tracepoint *tp, + struct tracepoint_func *func) { - struct hlist_head *head; - struct tracepoint_entry *e; - u32 hash = jhash(name, strlen(name), 0); - - head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; - hlist_for_each_entry(e, head, hlist) { - if (!strcmp(name, e->name)) - return e; - } - return NULL; -} + struct tracepoint_func *old, *tp_funcs; -/* - * Add the tracepoint to the tracepoint hash table. Must be called with - * tracepoints_mutex held. - */ -static struct tracepoint_entry *add_tracepoint(const char *name) -{ - struct hlist_head *head; - struct tracepoint_entry *e; - size_t name_len = strlen(name) + 1; - u32 hash = jhash(name, name_len-1, 0); - - head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; - hlist_for_each_entry(e, head, hlist) { - if (!strcmp(name, e->name)) { - printk(KERN_NOTICE - "tracepoint %s busy\n", name); - return ERR_PTR(-EEXIST); /* Already there */ - } - } - /* - * Using kmalloc here to allocate a variable length element. Could - * cause some memory fragmentation if overused. - */ - e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL); - if (!e) - return ERR_PTR(-ENOMEM); - memcpy(&e->name[0], name, name_len); - e->funcs = NULL; - e->refcount = 0; - e->enabled = 0; - hlist_add_head(&e->hlist, head); - return e; -} + if (tp->regfunc && !static_key_enabled(&tp->key)) + tp->regfunc(); -/* - * Remove the tracepoint from the tracepoint hash table. Must be called with - * mutex_lock held. - */ -static inline void remove_tracepoint(struct tracepoint_entry *e) -{ - hlist_del(&e->hlist); - kfree(e); -} - -/* - * Sets the probe callback corresponding to one tracepoint. - */ -static void set_tracepoint(struct tracepoint_entry **entry, - struct tracepoint *elem, int active) -{ - WARN_ON(strcmp((*entry)->name, elem->name) != 0); - - if (elem->regfunc && !static_key_enabled(&elem->key) && active) - elem->regfunc(); - else if (elem->unregfunc && static_key_enabled(&elem->key) && !active) - elem->unregfunc(); + tp_funcs = tp->funcs; + old = func_add(&tp_funcs, func); + if (IS_ERR(old)) { + WARN_ON_ONCE(1); + return PTR_ERR(old); + } + release_probes(old); /* * rcu_assign_pointer has a smp_wmb() which makes sure that the new @@ -270,199 +196,163 @@ static void set_tracepoint(struct tracepoint_entry **entry, * include/linux/tracepoints.h. A matching smp_read_barrier_depends() * is used. */ - rcu_assign_pointer(elem->funcs, (*entry)->funcs); - if (active && !static_key_enabled(&elem->key)) - static_key_slow_inc(&elem->key); - else if (!active && static_key_enabled(&elem->key)) - static_key_slow_dec(&elem->key); + rcu_assign_pointer(tp->funcs, tp_funcs); + if (!static_key_enabled(&tp->key)) + static_key_slow_inc(&tp->key); + return 0; } /* - * Disable a tracepoint and its probe callback. + * Remove a probe function from a tracepoint. * Note: only waiting an RCU period after setting elem->call to the empty * function insures that the original callback is not used anymore. This insured * by preempt_disable around the call site. */ -static void disable_tracepoint(struct tracepoint *elem) +static int tracepoint_remove_func(struct tracepoint *tp, + struct tracepoint_func *func) { - if (elem->unregfunc && static_key_enabled(&elem->key)) - elem->unregfunc(); - - if (static_key_enabled(&elem->key)) - static_key_slow_dec(&elem->key); - rcu_assign_pointer(elem->funcs, NULL); -} + struct tracepoint_func *old, *tp_funcs; -/** - * tracepoint_update_probe_range - Update a probe range - * @begin: beginning of the range - * @end: end of the range - * - * Updates the probe callback corresponding to a range of tracepoints. - * Called with tracepoints_mutex held. - */ -static void tracepoint_update_probe_range(struct tracepoint * const *begin, - struct tracepoint * const *end) -{ - struct tracepoint * const *iter; - struct tracepoint_entry *mark_entry; - - if (!begin) - return; - - for (iter = begin; iter < end; iter++) { - mark_entry = get_tracepoint((*iter)->name); - if (mark_entry) { - set_tracepoint(&mark_entry, *iter, - !!mark_entry->refcount); - mark_entry->enabled = !!mark_entry->refcount; - } else { - disable_tracepoint(*iter); - } + tp_funcs = tp->funcs; + old = func_remove(&tp_funcs, func); + if (IS_ERR(old)) { + WARN_ON_ONCE(1); + return PTR_ERR(old); } -} - -#ifdef CONFIG_MODULES -void module_update_tracepoints(void) -{ - struct tp_module *tp_mod; - - list_for_each_entry(tp_mod, &tracepoint_module_list, list) - tracepoint_update_probe_range(tp_mod->tracepoints_ptrs, - tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints); -} -#else /* CONFIG_MODULES */ -void module_update_tracepoints(void) -{ -} -#endif /* CONFIG_MODULES */ + release_probes(old); + if (!tp_funcs) { + /* Removed last function */ + if (tp->unregfunc && static_key_enabled(&tp->key)) + tp->unregfunc(); -/* - * Update probes, removing the faulty probes. - * Called with tracepoints_mutex held. - */ -static void tracepoint_update_probes(void) -{ - /* Core kernel tracepoints */ - tracepoint_update_probe_range(__start___tracepoints_ptrs, - __stop___tracepoints_ptrs); - /* tracepoints in modules. */ - module_update_tracepoints(); -} - -static struct tracepoint_func * -tracepoint_add_probe(const char *name, void *probe, void *data) -{ - struct tracepoint_entry *entry; - struct tracepoint_func *old; - - entry = get_tracepoint(name); - if (!entry) { - entry = add_tracepoint(name); - if (IS_ERR(entry)) - return (struct tracepoint_func *)entry; + if (static_key_enabled(&tp->key)) + static_key_slow_dec(&tp->key); } - old = tracepoint_entry_add_probe(entry, probe, data); - if (IS_ERR(old) && !entry->refcount) - remove_tracepoint(entry); - return old; + rcu_assign_pointer(tp->funcs, tp_funcs); + return 0; } /** * tracepoint_probe_register - Connect a probe to a tracepoint - * @name: tracepoint name + * @tp: tracepoint * @probe: probe handler - * @data: probe private data - * - * Returns: - * - 0 if the probe was successfully registered, and tracepoint - * callsites are currently loaded for that probe, - * - -ENODEV if the probe was successfully registered, but no tracepoint - * callsite is currently loaded for that probe, - * - other negative error value on error. - * - * When tracepoint_probe_register() returns either 0 or -ENODEV, - * parameters @name, @probe, and @data may be used by the tracepoint - * infrastructure until the probe is unregistered. * - * The probe address must at least be aligned on the architecture pointer size. + * Returns 0 if ok, error value on error. + * Note: if @tp is within a module, the caller is responsible for + * unregistering the probe before the module is gone. This can be + * performed either with a tracepoint module going notifier, or from + * within module exit functions. */ -int tracepoint_probe_register(const char *name, void *probe, void *data) +int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data) { - struct tracepoint_func *old; - struct tracepoint_entry *entry; - int ret = 0; + struct tracepoint_func tp_func; + int ret; mutex_lock(&tracepoints_mutex); - old = tracepoint_add_probe(name, probe, data); - if (IS_ERR(old)) { - mutex_unlock(&tracepoints_mutex); - return PTR_ERR(old); - } - tracepoint_update_probes(); /* may update entry */ - entry = get_tracepoint(name); - /* Make sure the entry was enabled */ - if (!entry || !entry->enabled) - ret = -ENODEV; + tp_func.func = probe; + tp_func.data = data; + ret = tracepoint_add_func(tp, &tp_func); mutex_unlock(&tracepoints_mutex); - release_probes(old); return ret; } EXPORT_SYMBOL_GPL(tracepoint_probe_register); -static struct tracepoint_func * -tracepoint_remove_probe(const char *name, void *probe, void *data) -{ - struct tracepoint_entry *entry; - struct tracepoint_func *old; - - entry = get_tracepoint(name); - if (!entry) - return ERR_PTR(-ENOENT); - old = tracepoint_entry_remove_probe(entry, probe, data); - if (IS_ERR(old)) - return old; - if (!entry->refcount) - remove_tracepoint(entry); - return old; -} - /** * tracepoint_probe_unregister - Disconnect a probe from a tracepoint - * @name: tracepoint name + * @tp: tracepoint * @probe: probe function pointer - * @data: probe private data * - * We do not need to call a synchronize_sched to make sure the probes have - * finished running before doing a module unload, because the module unload - * itself uses stop_machine(), which insures that every preempt disabled section - * have finished. + * Returns 0 if ok, error value on error. */ -int tracepoint_probe_unregister(const char *name, void *probe, void *data) +int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) { - struct tracepoint_func *old; + struct tracepoint_func tp_func; + int ret; mutex_lock(&tracepoints_mutex); - old = tracepoint_remove_probe(name, probe, data); - if (IS_ERR(old)) { - mutex_unlock(&tracepoints_mutex); - return PTR_ERR(old); - } - tracepoint_update_probes(); /* may update entry */ + tp_func.func = probe; + tp_func.data = data; + ret = tracepoint_remove_func(tp, &tp_func); mutex_unlock(&tracepoints_mutex); - release_probes(old); - return 0; + return ret; } EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); - #ifdef CONFIG_MODULES bool trace_module_has_bad_taint(struct module *mod) { return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); } +static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list); + +/** + * register_tracepoint_notifier - register tracepoint coming/going notifier + * @nb: notifier block + * + * Notifiers registered with this function are called on module + * coming/going with the tracepoint_module_list_mutex held. + * The notifier block callback should expect a "struct tp_module" data + * pointer. + */ +int register_tracepoint_module_notifier(struct notifier_block *nb) +{ + struct tp_module *tp_mod; + int ret; + + mutex_lock(&tracepoint_module_list_mutex); + ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb); + if (ret) + goto end; + list_for_each_entry(tp_mod, &tracepoint_module_list, list) + (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod); +end: + mutex_unlock(&tracepoint_module_list_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier); + +/** + * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier + * @nb: notifier block + * + * The notifier block callback should expect a "struct tp_module" data + * pointer. + */ +int unregister_tracepoint_module_notifier(struct notifier_block *nb) +{ + struct tp_module *tp_mod; + int ret; + + mutex_lock(&tracepoint_module_list_mutex); + ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb); + if (ret) + goto end; + list_for_each_entry(tp_mod, &tracepoint_module_list, list) + (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod); +end: + mutex_unlock(&tracepoint_module_list_mutex); + return ret; + +} +EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier); + +/* + * Ensure the tracer unregistered the module's probes before the module + * teardown is performed. Prevents leaks of probe and data pointers. + */ +static void tp_module_going_check_quiescent(struct tracepoint * const *begin, + struct tracepoint * const *end) +{ + struct tracepoint * const *iter; + + if (!begin) + return; + for (iter = begin; iter < end; iter++) + WARN_ON_ONCE((*iter)->funcs); +} + static int tracepoint_module_coming(struct module *mod) { struct tp_module *tp_mod; @@ -478,7 +368,7 @@ static int tracepoint_module_coming(struct module *mod) */ if (trace_module_has_bad_taint(mod)) return 0; - mutex_lock(&tracepoints_mutex); + mutex_lock(&tracepoint_module_list_mutex); tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); if (!tp_mod) { ret = -ENOMEM; @@ -487,27 +377,33 @@ static int tracepoint_module_coming(struct module *mod) tp_mod->num_tracepoints = mod->num_tracepoints; tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; list_add_tail(&tp_mod->list, &tracepoint_module_list); - tracepoint_update_probe_range(mod->tracepoints_ptrs, - mod->tracepoints_ptrs + mod->num_tracepoints); + blocking_notifier_call_chain(&tracepoint_notify_list, + MODULE_STATE_COMING, tp_mod); end: - mutex_unlock(&tracepoints_mutex); + mutex_unlock(&tracepoint_module_list_mutex); return ret; } -static int tracepoint_module_going(struct module *mod) +static void tracepoint_module_going(struct module *mod) { - struct tp_module *pos; + struct tp_module *tp_mod; if (!mod->num_tracepoints) - return 0; + return; - mutex_lock(&tracepoints_mutex); - tracepoint_update_probe_range(mod->tracepoints_ptrs, - mod->tracepoints_ptrs + mod->num_tracepoints); - list_for_each_entry(pos, &tracepoint_module_list, list) { - if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) { - list_del(&pos->list); - kfree(pos); + mutex_lock(&tracepoint_module_list_mutex); + list_for_each_entry(tp_mod, &tracepoint_module_list, list) { + if (tp_mod->tracepoints_ptrs == mod->tracepoints_ptrs) { + blocking_notifier_call_chain(&tracepoint_notify_list, + MODULE_STATE_GOING, tp_mod); + list_del(&tp_mod->list); + kfree(tp_mod); + /* + * Called the going notifier before checking for + * quiescence. + */ + tp_module_going_check_quiescent(mod->tracepoints_ptrs, + mod->tracepoints_ptrs + mod->num_tracepoints); break; } } @@ -517,12 +413,11 @@ static int tracepoint_module_going(struct module *mod) * flag on "going", in case a module taints the kernel only after being * loaded. */ - mutex_unlock(&tracepoints_mutex); - return 0; + mutex_unlock(&tracepoint_module_list_mutex); } -int tracepoint_module_notify(struct notifier_block *self, - unsigned long val, void *data) +static int tracepoint_module_notify(struct notifier_block *self, + unsigned long val, void *data) { struct module *mod = data; int ret = 0; @@ -534,24 +429,58 @@ int tracepoint_module_notify(struct notifier_block *self, case MODULE_STATE_LIVE: break; case MODULE_STATE_GOING: - ret = tracepoint_module_going(mod); + tracepoint_module_going(mod); + break; + case MODULE_STATE_UNFORMED: break; } return ret; } -struct notifier_block tracepoint_module_nb = { +static struct notifier_block tracepoint_module_nb = { .notifier_call = tracepoint_module_notify, .priority = 0, }; -static int init_tracepoints(void) +static __init int init_tracepoints(void) { - return register_module_notifier(&tracepoint_module_nb); + int ret; + + ret = register_module_notifier(&tracepoint_module_nb); + if (ret) { + pr_warning("Failed to register tracepoint module enter notifier\n"); + } + return ret; } __initcall(init_tracepoints); #endif /* CONFIG_MODULES */ +static void for_each_tracepoint_range(struct tracepoint * const *begin, + struct tracepoint * const *end, + void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + struct tracepoint * const *iter; + + if (!begin) + return; + for (iter = begin; iter < end; iter++) + fct(*iter, priv); +} + +/** + * for_each_kernel_tracepoint - iteration on all kernel tracepoints + * @fct: callback + * @priv: private data + */ +void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + for_each_tracepoint_range(__start___tracepoints_ptrs, + __stop___tracepoints_ptrs, fct, priv); +} +EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint); + #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ -- cgit v1.2.3 From eb7d035c59431bb12e1aa6e69ddd3940352faddb Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Tue, 8 Apr 2014 20:09:40 -0400 Subject: tracepoint: Simplify tracepoint module search Instead of copying the num_tracepoints and tracepoints_ptrs from the module structure to the tp_mod structure, which only uses it to find the module associated to tracepoints of modules that are coming and going, simply copy the pointer to the module struct to the tracepoint tp_module structure. Also removed un-needed brackets around an if statement. Link: http://lkml.kernel.org/r/20140408201705.4dad2c4a@gandalf.local.home Acked-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- kernel/tracepoint.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 01b3bd84daa1..162be198a247 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -374,8 +374,7 @@ static int tracepoint_module_coming(struct module *mod) ret = -ENOMEM; goto end; } - tp_mod->num_tracepoints = mod->num_tracepoints; - tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs; + tp_mod->mod = mod; list_add_tail(&tp_mod->list, &tracepoint_module_list); blocking_notifier_call_chain(&tracepoint_notify_list, MODULE_STATE_COMING, tp_mod); @@ -393,7 +392,7 @@ static void tracepoint_module_going(struct module *mod) mutex_lock(&tracepoint_module_list_mutex); list_for_each_entry(tp_mod, &tracepoint_module_list, list) { - if (tp_mod->tracepoints_ptrs == mod->tracepoints_ptrs) { + if (tp_mod->mod == mod) { blocking_notifier_call_chain(&tracepoint_notify_list, MODULE_STATE_GOING, tp_mod); list_del(&tp_mod->list); @@ -447,9 +446,9 @@ static __init int init_tracepoints(void) int ret; ret = register_module_notifier(&tracepoint_module_nb); - if (ret) { + if (ret) pr_warning("Failed to register tracepoint module enter notifier\n"); - } + return ret; } __initcall(init_tracepoints); -- cgit v1.2.3 From b725dfea24b89de672c055b34b22398283a3f4bc Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Wed, 9 Apr 2014 09:24:43 -0400 Subject: tracepoint: Fix sparse warnings in tracepoint.c Fix the following sparse warnings: CHECK kernel/tracepoint.c kernel/tracepoint.c:184:18: warning: incorrect type in assignment (different address spaces) kernel/tracepoint.c:184:18: expected struct tracepoint_func *tp_funcs kernel/tracepoint.c:184:18: got struct tracepoint_func [noderef] *funcs kernel/tracepoint.c:216:18: warning: incorrect type in assignment (different address spaces) kernel/tracepoint.c:216:18: expected struct tracepoint_func *tp_funcs kernel/tracepoint.c:216:18: got struct tracepoint_func [noderef] *funcs kernel/tracepoint.c:392:24: error: return expression in void function CC kernel/tracepoint.o kernel/tracepoint.c: In function tracepoint_module_going: kernel/tracepoint.c:491:6: warning: symbol 'syscall_regfunc' was not declared. Should it be static? kernel/tracepoint.c:508:6: warning: symbol 'syscall_unregfunc' was not declared. Should it be static? Link: http://lkml.kernel.org/r/1397049883-28692-1-git-send-email-mathieu.desnoyers@efficios.com Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- kernel/tracepoint.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 162be198a247..ca2cfe21bb8e 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -181,7 +181,8 @@ static int tracepoint_add_func(struct tracepoint *tp, if (tp->regfunc && !static_key_enabled(&tp->key)) tp->regfunc(); - tp_funcs = tp->funcs; + tp_funcs = rcu_dereference_protected(tp->funcs, + lockdep_is_held(&tracepoints_mutex)); old = func_add(&tp_funcs, func); if (IS_ERR(old)) { WARN_ON_ONCE(1); @@ -213,7 +214,8 @@ static int tracepoint_remove_func(struct tracepoint *tp, { struct tracepoint_func *old, *tp_funcs; - tp_funcs = tp->funcs; + tp_funcs = rcu_dereference_protected(tp->funcs, + lockdep_is_held(&tracepoints_mutex)); old = func_remove(&tp_funcs, func); if (IS_ERR(old)) { WARN_ON_ONCE(1); -- cgit v1.2.3 From 69cd9eba38867a493a043bb13eb9b33cad5f1a9a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 8 Apr 2014 15:30:07 -0700 Subject: futex: avoid race between requeue and wake Jan Stancek reported: "pthread_cond_broadcast/4-1.c testcase from openposix testsuite (LTP) occasionally fails, because some threads fail to wake up. Testcase creates 5 threads, which are all waiting on same condition. Main thread then calls pthread_cond_broadcast() without holding mutex, which calls: futex(uaddr1, FUTEX_CMP_REQUEUE_PRIVATE, 1, 2147483647, uaddr2, ..) This immediately wakes up single thread A, which unlocks mutex and tries to wake up another thread: futex(uaddr2, FUTEX_WAKE_PRIVATE, 1) If thread A manages to call futex_wake() before any waiters are requeued for uaddr2, no other thread is woken up" The ordering constraints for the hash bucket waiter counting are that the waiter counts have to be incremented _before_ getting the spinlock (because the spinlock acts as part of the memory barrier), but the "requeue" operation didn't honor those rules, and nobody had even thought about that case. This fairly simple patch just increments the waiter count for the target hash bucket (hb2) when requeing a futex before taking the locks. It then decrements them again after releasing the lock - the code that actually moves the futex(es) between hash buckets will do the additional required waiter count housekeeping. Reported-and-tested-by: Jan Stancek Acked-by: Davidlohr Bueso Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@vger.kernel.org # 3.14 Signed-off-by: Linus Torvalds --- kernel/futex.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 67dacaf93e56..6801b3751a95 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1452,6 +1452,7 @@ retry: hb2 = hash_futex(&key2); retry_private: + hb_waiters_inc(hb2); double_lock_hb(hb1, hb2); if (likely(cmpval != NULL)) { @@ -1461,6 +1462,7 @@ retry_private: if (unlikely(ret)) { double_unlock_hb(hb1, hb2); + hb_waiters_dec(hb2); ret = get_user(curval, uaddr1); if (ret) @@ -1510,6 +1512,7 @@ retry_private: break; case -EFAULT: double_unlock_hb(hb1, hb2); + hb_waiters_dec(hb2); put_futex_key(&key2); put_futex_key(&key1); ret = fault_in_user_writeable(uaddr2); @@ -1519,6 +1522,7 @@ retry_private: case -EAGAIN: /* The owner was exiting, try again. */ double_unlock_hb(hb1, hb2); + hb_waiters_dec(hb2); put_futex_key(&key2); put_futex_key(&key1); cond_resched(); @@ -1594,6 +1598,7 @@ retry_private: out_unlock: double_unlock_hb(hb1, hb2); + hb_waiters_dec(hb2); /* * drop_futex_key_refs() must be called outside the spinlocks. During -- cgit v1.2.3 From abb43f6998eb6466ea392d3757e673bbdb6ae171 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Wed, 9 Apr 2014 17:06:08 -0400 Subject: tracing: Fix anonymous unions in struct ftrace_event_call gcc <= 4.5.x has significant limitations with respect to initialization of anonymous unions within structures. They need to be surrounded by brackets, _and_ they need to be initialized in the same order in which they appear in the structure declaration. Link: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=10676 Link: http://lkml.kernel.org/r/1397077568-3156-1-git-send-email-mathieu.desnoyers@efficios.com Signed-off-by: Mathieu Desnoyers Signed-off-by: Steven Rostedt --- kernel/trace/trace_export.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index ee0a5098ac43..d4ddde28a81a 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c @@ -173,9 +173,11 @@ struct ftrace_event_class __refdata event_class_ftrace_##call = { \ }; \ \ struct ftrace_event_call __used event_##call = { \ - .name = #call, \ - .event.type = etype, \ .class = &event_class_ftrace_##call, \ + { \ + .name = #call, \ + }, \ + .event.type = etype, \ .print_fmt = print, \ .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ }; \ -- cgit v1.2.3 From 17a280ea8111c66791c18c0353b7986aafcb24fe Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Thu, 10 Apr 2014 22:43:37 -0400 Subject: tracing: Add missing function triggers dump and cpudump to README The debugfs tracing README file lists all the function triggers except for dump and cpudump. These should be added too. Signed-off-by: Steven Rostedt --- kernel/trace/trace.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9be67c5e5b0f..e3e665685ee5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3611,6 +3611,8 @@ static const char readme_msg[] = #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif + "\t\t dump\n" + "\t\t cpudump\n" "\t example: echo do_fault:traceoff > set_ftrace_filter\n" "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" "\t The first one will disable tracing every time do_fault is hit\n" -- cgit v1.2.3 From a227960fe0cafcc229a8d6bb8b454a3a0b33719d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 10 Apr 2014 16:15:59 +0200 Subject: locking/mutex: Fix debug_mutexes debug_mutex_unlock() would bail when !debug_locks and forgets to actually unlock. Reported-by: "Michael L. Semon" Reported-by: "Kirill A. Shutemov" Reported-by: Valdis Kletnieks Fixes: 6f008e72cd11 ("locking/mutex: Fix debug checks") Tested-by: Dave Jones Cc: Jason Low Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20140410141559.GE13658@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/locking/mutex-debug.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index e1191c996c59..5cf6731b98e9 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c @@ -71,18 +71,17 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, void debug_mutex_unlock(struct mutex *lock) { - if (unlikely(!debug_locks)) - return; + if (likely(debug_locks)) { + DEBUG_LOCKS_WARN_ON(lock->magic != lock); - DEBUG_LOCKS_WARN_ON(lock->magic != lock); + if (!lock->owner) + DEBUG_LOCKS_WARN_ON(!lock->owner); + else + DEBUG_LOCKS_WARN_ON(lock->owner != current); - if (!lock->owner) - DEBUG_LOCKS_WARN_ON(!lock->owner); - else - DEBUG_LOCKS_WARN_ON(lock->owner != current); - - DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); - mutex_clear_owner(lock); + DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); + mutex_clear_owner(lock); + } /* * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug -- cgit v1.2.3 From a786c06d9f2719203c00b3d97b21f9a96980d0b5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 11 Apr 2014 12:01:03 -0400 Subject: missing bits of "splice: fix racy pipe->buffers uses" that commit has fixed only the parts of that mess in fs/splice.c itself; there had been more in several other ->splice_read() instances... Signed-off-by: Al Viro --- kernel/relay.c | 2 +- kernel/trace/trace.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/relay.c b/kernel/relay.c index 98833f664fb6..7d38607649a3 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -1251,7 +1251,7 @@ static ssize_t subbuf_splice_actor(struct file *in, subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; pidx = (read_start / PAGE_SIZE) % subbuf_pages; poff = read_start & ~PAGE_MASK; - nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers); + nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max); for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { unsigned int this_len, this_end, private; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7511de35257f..27924caaa124 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4410,7 +4410,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ - for (i = 0, rem = len; i < pipe->buffers && rem; i++) { + for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; @@ -5267,7 +5267,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); - for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) { + for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) { struct page *page; int r; -- cgit v1.2.3 From d7e8af1afeffb03ab250b91cd70ba8c701f0f2b7 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 9 Apr 2014 11:55:07 -0700 Subject: futex: update documentation for ordering guarantees Commits 11d4616bd07f ("futex: revert back to the explicit waiter counting code") and 69cd9eba3886 ("futex: avoid race between requeue and wake") changed some of the finer details of how we think about futexes. One was a late fix and the other a consequence of overlooking the whole requeuing logic. The first change caused our documentation to be incorrect, and the second made us aware that we need to explicitly add more details to it. Signed-off-by: Davidlohr Bueso Signed-off-by: Linus Torvalds --- kernel/futex.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/futex.c b/kernel/futex.c index 6801b3751a95..5f589279e462 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -70,7 +70,10 @@ #include "locking/rtmutex_common.h" /* - * Basic futex operation and ordering guarantees: + * READ this before attempting to hack on futexes! + * + * Basic futex operation and ordering guarantees + * ============================================= * * The waiter reads the futex value in user space and calls * futex_wait(). This function computes the hash bucket and acquires @@ -119,7 +122,7 @@ * sys_futex(WAIT, futex, val); * futex_wait(futex, val); * - * waiters++; + * waiters++; (a) * mb(); (A) <-- paired with -. * | * lock(hash_bucket(futex)); | @@ -135,14 +138,14 @@ * unlock(hash_bucket(futex)); * schedule(); if (waiters) * lock(hash_bucket(futex)); - * wake_waiters(futex); - * unlock(hash_bucket(futex)); + * else wake_waiters(futex); + * waiters--; (b) unlock(hash_bucket(futex)); * - * Where (A) orders the waiters increment and the futex value read -- this - * is guaranteed by the head counter in the hb spinlock; and where (B) - * orders the write to futex and the waiters read -- this is done by the - * barriers in get_futex_key_refs(), through either ihold or atomic_inc, - * depending on the futex type. + * Where (A) orders the waiters increment and the futex value read through + * atomic operations (see hb_waiters_inc) and where (B) orders the write + * to futex and the waiters read -- this is done by the barriers in + * get_futex_key_refs(), through either ihold or atomic_inc, depending on the + * futex type. * * This yields the following case (where X:=waiters, Y:=futex): * @@ -155,6 +158,17 @@ * Which guarantees that x==0 && y==0 is impossible; which translates back into * the guarantee that we cannot both miss the futex variable change and the * enqueue. + * + * Note that a new waiter is accounted for in (a) even when it is possible that + * the wait call can return error, in which case we backtrack from it in (b). + * Refer to the comment in queue_lock(). + * + * Similarly, in order to account for waiters being requeued on another + * address we always increment the waiters for the destination bucket before + * acquiring the lock. It then decrements them again after releasing it - + * the code that actually moves the futex(es) between hash buckets (requeue_futex) + * will do the additional required waiter count housekeeping. This is done for + * double_lock_hb() and double_unlock_hb(), respectively. */ #ifndef CONFIG_HAVE_FUTEX_CMPXCHG -- cgit v1.2.3 From 2eac7648321f4a08aa4078504d7727af0af7173b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 14 Apr 2014 21:02:59 +0200 Subject: seccomp: fix populating a0-a5 syscall args in 32-bit x86 BPF Linus reports that on 32-bit x86 Chromium throws the following seccomp resp. audit log messages: audit: type=1326 audit(1397359304.356:28108): auid=500 uid=500 gid=500 ses=2 subj=unconfined_u:unconfined_r:chrome_sandbox_t:s0-s0:c0.c1023 pid=3677 comm="chrome" exe="/opt/google/chrome/chrome" sig=0 syscall=172 compat=0 ip=0xb2dd9852 code=0x30000 audit: type=1326 audit(1397359304.356:28109): auid=500 uid=500 gid=500 ses=2 subj=unconfined_u:unconfined_r:chrome_sandbox_t:s0-s0:c0.c1023 pid=3677 comm="chrome" exe="/opt/google/chrome/chrome" sig=0 syscall=5 compat=0 ip=0xb2dd9852 code=0x50000 These audit messages are being triggered via audit_seccomp() through __secure_computing() in seccomp mode (BPF) filter with seccomp return codes 0x30000 (== SECCOMP_RET_TRAP) and 0x50000 (== SECCOMP_RET_ERRNO) during filter runtime. Moreover, Linus reports that x86_64 Chromium seems fine. The underlying issue that explains this is that the implementation of populate_seccomp_data() is wrong. Our seccomp data structure sd that is being shared with user ABI is: struct seccomp_data { int nr; __u32 arch; __u64 instruction_pointer; __u64 args[6]; }; Therefore, a simple cast to 'unsigned long *' for storing the value of the syscall argument via syscall_get_arguments() is just wrong as on 32-bit x86 (or any other 32bit arch), it would result in storing a0-a5 at wrong offsets in args[] member, and thus i) could leak stack memory to user space and ii) tampers with the logic of seccomp BPF programs that read out and check for syscall arguments: syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); Tested on 32-bit x86 with Google Chrome, unfortunately only via remote test machine through slow ssh X forwarding, but it fixes the issue on my side. So fix it up by storing args in type correct variables, gcc is clever and optimizes the copy away in other cases, e.g. x86_64. Fixes: bd4cf0ed331a ("net: filter: rework/optimize internal BPF interpreter's instruction set") Reported-and-bisected-by: Linus Torvalds Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Cc: Linus Torvalds Cc: Eric Paris Cc: James Morris Cc: Kees Cook Signed-off-by: David S. Miller --- kernel/seccomp.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/seccomp.c b/kernel/seccomp.c index d8d046c0726a..590c37925084 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -69,18 +69,17 @@ static void populate_seccomp_data(struct seccomp_data *sd) { struct task_struct *task = current; struct pt_regs *regs = task_pt_regs(task); + unsigned long args[6]; sd->nr = syscall_get_nr(task, regs); sd->arch = syscall_get_arch(); - - /* Unroll syscall_get_args to help gcc on arm. */ - syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]); - syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]); - syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]); - syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]); - syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]); - syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]); - + syscall_get_arguments(task, regs, 0, 6, args); + sd->args[0] = args[0]; + sd->args[1] = args[1]; + sd->args[2] = args[2]; + sd->args[3] = args[3]; + sd->args[4] = args[4]; + sd->args[5] = args[5]; sd->instruction_pointer = KSTK_EIP(task); } -- cgit v1.2.3 From e79323bd87808fdfbc68ce6c5371bd224d9672ee Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 14 Apr 2014 16:58:55 -0400 Subject: user namespace: fix incorrect memory barriers smp_read_barrier_depends() can be used if there is data dependency between the readers - i.e. if the read operation after the barrier uses address that was obtained from the read operation before the barrier. In this file, there is only control dependency, no data dependecy, so the use of smp_read_barrier_depends() is incorrect. The code could fail in the following way: * the cpu predicts that idx < entries is true and starts executing the body of the for loop * the cpu fetches map->extent[0].first and map->extent[0].count * the cpu fetches map->nr_extents * the cpu verifies that idx < extents is true, so it commits the instructions in the body of the for loop The problem is that in this scenario, the cpu read map->extent[0].first and map->nr_extents in the wrong order. We need a full read memory barrier to prevent it. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- kernel/user_namespace.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 0d8f6023fd8d..bf71b4b2d632 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) /* Find the matching extent */ extents = map->nr_extents; - smp_read_barrier_depends(); + smp_rmb(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; @@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id) /* Find the matching extent */ extents = map->nr_extents; - smp_read_barrier_depends(); + smp_rmb(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].first; last = first + map->extent[idx].count - 1; @@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id) /* Find the matching extent */ extents = map->nr_extents; - smp_read_barrier_depends(); + smp_rmb(); for (idx = 0; idx < extents; idx++) { first = map->extent[idx].lower_first; last = first + map->extent[idx].count - 1; @@ -615,9 +615,8 @@ static ssize_t map_write(struct file *file, const char __user *buf, * were written before the count of the extents. * * To achieve this smp_wmb() is used on guarantee the write - * order and smp_read_barrier_depends() is guaranteed that we - * don't have crazy architectures returning stale data. - * + * order and smp_rmb() is guaranteed that we don't have crazy + * architectures returning stale data. */ mutex_lock(&id_map_mutex); -- cgit v1.2.3 From 521c42990e9d561ed5ed9f501f07639d0512b3c9 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 15 Apr 2014 10:54:37 +0530 Subject: tick-common: Fix wrong check in tick_check_replacement() tick_check_replacement() returns if a replacement of clock_event_device is possible or not. It does this as the first check: if (tick_check_percpu(curdev, newdev, smp_processor_id())) return false; Thats wrong. tick_check_percpu() returns true when the device is useable. Check for false instead. [ tglx: Massaged changelog ] Signed-off-by: Viresh Kumar Cc: # v3.11+ Cc: linaro-kernel@lists.linaro.org Cc: fweisbec@gmail.com Cc: Arvind.Chauhan@arm.com Cc: linaro-networking@linaro.org Link: http://lkml.kernel.org/r/486a02efe0246635aaba786e24b42d316438bf3b.1397537987.git.viresh.kumar@linaro.org Signed-off-by: Thomas Gleixner --- kernel/time/tick-common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 015661279b68..0a0608edeb26 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -276,7 +276,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev, bool tick_check_replacement(struct clock_event_device *curdev, struct clock_event_device *newdev) { - if (tick_check_percpu(curdev, newdev, smp_processor_id())) + if (!tick_check_percpu(curdev, newdev, smp_processor_id())) return false; return tick_check_preferred(curdev, newdev); -- cgit v1.2.3 From 03e6bdc5c4d0fc166bfd5d3cf749a5a0c1b5b1bd Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 15 Apr 2014 10:54:40 +0530 Subject: tick-sched: Don't call update_wall_time() when delta is lesser than tick_period In tick_do_update_jiffies64() we are processing ticks only if delta is greater than tick_period. This is what we are supposed to do here and it broke a bit with this patch: commit 47a1b796 (tick/timekeeping: Call update_wall_time outside the jiffies lock) With above patch, we might end up calling update_wall_time() even if delta is found to be smaller that tick_period. Fix this by returning when the delta is less than tick period. [ tglx: Made it a 3 liner and massaged changelog ] Signed-off-by: Viresh Kumar Cc: linaro-kernel@lists.linaro.org Cc: fweisbec@gmail.com Cc: Arvind.Chauhan@arm.com Cc: linaro-networking@linaro.org Cc: John Stultz Cc: # v3.14+ Link: http://lkml.kernel.org/r/80afb18a494b0bd9710975bcc4de134ae323c74f.1397537987.git.viresh.kumar@linaro.org Signed-off-by: Thomas Gleixner --- kernel/time/tick-sched.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 9f8af69c67ec..e947d968c5b5 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -84,6 +84,9 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); + } else { + write_sequnlock(&jiffies_lock); + return; } write_sequnlock(&jiffies_lock); update_wall_time(); -- cgit v1.2.3 From 27630532ef5ead28b98cfe28d8f95222ef91c2b7 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Tue, 15 Apr 2014 10:54:41 +0530 Subject: tick-sched: Check tick_nohz_enabled in tick_nohz_switch_to_nohz() Since commit d689fe222 (NOHZ: Check for nohz active instead of nohz enabled) the tick_nohz_switch_to_nohz() function returns because it checks for the tick_nohz_active flag. This can't be set, because the function itself sets it. Undo the change in tick_nohz_switch_to_nohz(). Signed-off-by: Viresh Kumar Cc: linaro-kernel@lists.linaro.org Cc: fweisbec@gmail.com Cc: Arvind.Chauhan@arm.com Cc: linaro-networking@linaro.org Cc: # 3.13+ Link: http://lkml.kernel.org/r/40939c05f2d65d781b92b20302b02243d0654224.1397537987.git.viresh.kumar@linaro.org Signed-off-by: Thomas Gleixner --- kernel/time/tick-sched.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index e947d968c5b5..6558b7ac112d 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -970,7 +970,7 @@ static void tick_nohz_switch_to_nohz(void) struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t next; - if (!tick_nohz_active) + if (!tick_nohz_enabled) return; local_irq_disable(); -- cgit v1.2.3 From 0acf07d240a84069c4a6651e6030cf35d30c7159 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Apr 2014 10:54:34 -0700 Subject: seccomp: fix memory leak on filter attach This sets the correct error code when final filter memory is unavailable, and frees the raw filter no matter what. unreferenced object 0xffff8800d6ea4000 (size 512): comm "sshd", pid 278, jiffies 4294898315 (age 46.653s) hex dump (first 32 bytes): 21 00 00 00 04 00 00 00 15 00 01 00 3e 00 00 c0 !...........>... 06 00 00 00 00 00 00 00 21 00 00 00 00 00 00 00 ........!....... backtrace: [] kmemleak_alloc+0x4e/0xb0 [] __kmalloc+0x280/0x320 [] prctl_set_seccomp+0x11e/0x3b0 [] SyS_prctl+0x3bb/0x4a0 [] system_call_fastpath+0x1a/0x1f [] 0xffffffffffffffff Reported-by: Masami Ichikawa Signed-off-by: Kees Cook Tested-by: Masami Ichikawa Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/seccomp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 590c37925084..b35c21503a36 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -255,6 +255,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) goto free_prog; /* Allocate a new seccomp_filter */ + ret = -ENOMEM; filter = kzalloc(sizeof(struct seccomp_filter) + sizeof(struct sock_filter_int) * new_len, GFP_KERNEL|__GFP_NOWARN); @@ -264,6 +265,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog) ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len); if (ret) goto free_filter; + kfree(fp); atomic_set(&filter->usage, 1); filter->len = new_len; -- cgit v1.2.3 From 5d6c97c55984b3b991400692f9e8568a702b93c0 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Wed, 16 Apr 2014 19:21:53 -0400 Subject: tracing: Do not try to recreated toplevel set_ftrace_* files With the restructing of the function tracer working with instances, the "top level" buffer is a bit special, as the function tracing is mapped to the same set of filters. This is done by using a "global_ops" descriptor and having the "set_ftrace_filter" and "set_ftrace_notrace" map to it. When an instance is created, it creates the same files but its for the local instance and not the global_ops. The issues is that the local instance creation shares some code with the global instance one and we end up trying to create th top level "set_ftrace_*" files twice, and on boot up, we get an error like this: Could not create debugfs 'set_ftrace_filter' entry Could not create debugfs 'set_ftrace_notrace' entry The reason they failed to be created was because they were created twice, and the second time gives this error as you can not create the same file twice. Reported-by: Borislav Petkov Signed-off-by: Steven Rostedt --- kernel/trace/trace_functions.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 5b781d2be383..ffd56351b521 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -58,12 +58,16 @@ int ftrace_create_function_files(struct trace_array *tr, { int ret; - /* The top level array uses the "global_ops". */ - if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL)) { - ret = allocate_ftrace_ops(tr); - if (ret) - return ret; - } + /* + * The top level array uses the "global_ops", and the files are + * created on boot up. + */ + if (tr->flags & TRACE_ARRAY_FL_GLOBAL) + return 0; + + ret = allocate_ftrace_ops(tr); + if (ret) + return ret; ftrace_create_filter_files(tr->ops, parent); -- cgit v1.2.3 From 6ea6215fe394e320468589d9bba464a48f6d823a Mon Sep 17 00:00:00 2001 From: "zhangwei(Jovi)" Date: Thu, 17 Apr 2014 16:05:19 +0800 Subject: tracing/uprobes: Fix uprobe_cpu_buffer memory leak Forgot to free uprobe_cpu_buffer percpu page in uprobe_buffer_disable(). Link: http://lkml.kernel.org/p/534F8B3F.1090407@huawei.com Cc: stable@vger.kernel.org # v3.14+ Acked-by: Namhyung Kim Signed-off-by: zhangwei(Jovi) Signed-off-by: Steven Rostedt --- kernel/trace/trace_uprobe.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'kernel') diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 930e51462dc8..c082a7441345 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -732,9 +732,15 @@ static int uprobe_buffer_enable(void) static void uprobe_buffer_disable(void) { + int cpu; + BUG_ON(!mutex_is_locked(&event_mutex)); if (--uprobe_buffer_refcnt == 0) { + for_each_possible_cpu(cpu) + free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, + cpu)->buf); + free_percpu(uprobe_cpu_buffer); uprobe_cpu_buffer = NULL; } -- cgit v1.2.3 From 7861144b8cb217634d738e94a748deeae139a1e2 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 18 Apr 2014 15:07:12 -0700 Subject: kernel/watchdog.c:touch_softlockup_watchdog(): use raw_cpu_write() Fix: BUG: using __this_cpu_write() in preemptible [00000000] code: systemd-udevd/497 caller is __this_cpu_preempt_check+0x13/0x20 CPU: 3 PID: 497 Comm: systemd-udevd Tainted: G W 3.15.0-rc1 #9 Hardware name: Hewlett-Packard HP EliteBook 8470p/179B, BIOS 68ICF Ver. F.02 04/27/2012 Call Trace: check_preemption_disabled+0xe1/0xf0 __this_cpu_preempt_check+0x13/0x20 touch_nmi_watchdog+0x28/0x40 Reported-by: Luis Henriques Tested-by: Luis Henriques Cc: Eric Piel Cc: Robert Moore Cc: Lv Zheng Cc: "Rafael J. Wysocki" Cc: Len Brown Cc: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index e90089fd78e0..516203e665fc 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -138,7 +138,11 @@ static void __touch_watchdog(void) void touch_softlockup_watchdog(void) { - __this_cpu_write(watchdog_touch_ts, 0); + /* + * Preemption can be enabled. It doesn't matter which CPU's timestamp + * gets zeroed here, so use the raw_ operation. + */ + raw_cpu_write(watchdog_touch_ts, 0); } EXPORT_SYMBOL(touch_softlockup_watchdog); -- cgit v1.2.3 From db66d756c74acb886c51f11b501c2fe622018a0a Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Fri, 18 Apr 2014 01:59:15 +0900 Subject: sched/docbook: Fix 'make htmldocs' warnings caused by missing description When 'flags' argument to sched_{set,get}attr() syscalls were added in: 6d35ab48090b ("sched: Add 'flags' argument to sched_{set,get}attr() syscalls") no description for 'flags' was added. It causes the following warnings on "make htmldocs": Warning(/kernel/sched/core.c:3645): No description found for parameter 'flags' Warning(/kernel/sched/core.c:3789): No description found for parameter 'flags' Signed-off-by: Masanari Iida Cc: peterz@infradead.org Link: http://lkml.kernel.org/r/1397753955-2914-1-git-send-email-standby24x7@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 268a45ea238c..9fe2190005cb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3639,6 +3639,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) * sys_sched_setattr - same as above, but with extended sched_attr * @pid: the pid in question. * @uattr: structure containing the extended parameters. + * @flags: for future extension. */ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, unsigned int, flags) @@ -3783,6 +3784,7 @@ err_size: * @pid: the pid in question. * @uattr: structure containing the extended parameters. * @size: sizeof(attr) for fwd/bwd comp. + * @flags: for future extension. */ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, unsigned int, size, unsigned int, flags) -- cgit v1.2.3 From 2d513868e2a33e1d5315490ef4c861ee65babd65 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 2 May 2014 23:26:24 +0200 Subject: sched: Sanitize irq accounting madness Russell reported, that irqtime_account_idle_ticks() takes ages due to: for (i = 0; i < ticks; i++) irqtime_account_process_tick(current, 0, rq); It's sad, that this code was written way _AFTER_ the NOHZ idle functionality was available. I charge myself guitly for not paying attention when that crap got merged with commit abb74cefa ("sched: Export ns irqtimes through /proc/stat") So instead of looping nr_ticks times just apply the whole thing at once. As a side note: The whole cputime_t vs. u64 business in that context wants to be cleaned up as well. There is no point in having all these back and forth conversions. Lets standardise on u64 nsec for all kernel internal accounting and be done with it. Everything else does not make sense at all for fine grained accounting. Frederic, can you please take care of that? Reported-by: Russell King Signed-off-by: Thomas Gleixner Reviewed-by: Paul E. McKenney Signed-off-by: Peter Zijlstra Cc: Venkatesh Pallipadi Cc: Shaun Ruffell Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1405022307000.6261@ionos.tec.linutronix.de Signed-off-by: Ingo Molnar --- kernel/sched/cputime.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index a95097cb4591..72fdf06ef865 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -332,50 +332,50 @@ out: * softirq as those do not count in task exec_runtime any more. */ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, - struct rq *rq) + struct rq *rq, int ticks) { - cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); + cputime_t scaled = cputime_to_scaled(cputime_one_jiffy); + u64 cputime = (__force u64) cputime_one_jiffy; u64 *cpustat = kcpustat_this_cpu->cpustat; if (steal_account_process_tick()) return; + cputime *= ticks; + scaled *= ticks; + if (irqtime_account_hi_update()) { - cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; + cpustat[CPUTIME_IRQ] += cputime; } else if (irqtime_account_si_update()) { - cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; + cpustat[CPUTIME_SOFTIRQ] += cputime; } else if (this_cpu_ksoftirqd() == p) { /* * ksoftirqd time do not get accounted in cpu_softirq_time. * So, we have to handle it separately here. * Also, p->stime needs to be updated for ksoftirqd. */ - __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - CPUTIME_SOFTIRQ); + __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ); } else if (user_tick) { - account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); + account_user_time(p, cputime, scaled); } else if (p == rq->idle) { - account_idle_time(cputime_one_jiffy); + account_idle_time(cputime); } else if (p->flags & PF_VCPU) { /* System time or guest time */ - account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); + account_guest_time(p, cputime, scaled); } else { - __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - CPUTIME_SYSTEM); + __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM); } } static void irqtime_account_idle_ticks(int ticks) { - int i; struct rq *rq = this_rq(); - for (i = 0; i < ticks; i++) - irqtime_account_process_tick(current, 0, rq); + irqtime_account_process_tick(current, 0, rq, ticks); } #else /* CONFIG_IRQ_TIME_ACCOUNTING */ static inline void irqtime_account_idle_ticks(int ticks) {} static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, - struct rq *rq) {} + struct rq *rq, int nr_ticks) {} #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ /* @@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick) return; if (sched_clock_irqtime) { - irqtime_account_process_tick(p, user_tick, rq); + irqtime_account_process_tick(p, user_tick, rq, 1); return; } -- cgit v1.2.3 From 5bfd126e80dca70431aef8fdbc1cf14535f3c338 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Tue, 15 Apr 2014 13:49:04 +0200 Subject: sched/deadline: Fix sched_yield() behavior yield_task_dl() is broken: o it forces current to be throttled setting its runtime to zero; o it sets current's dl_se->dl_new to one, expecting that dl_task_timer() will queue it back with proper parameters at replenish time. Unfortunately, dl_task_timer() has this check at the very beginning: if (!dl_task(p) || dl_se->dl_new) goto unlock; So, it just bails out and the task is never replenished. It actually yielded forever. To fix this, introduce a new flag indicating that the task properly yielded the CPU before its current runtime expired. While this is a little overdoing at the moment, the flag would be useful in the future to discriminate between "good" jobs (of which remaining runtime could be reclaimed, i.e. recycled) and "bad" jobs (for which dl_throttled task has been set) that needed to be stopped. Reported-by: yjay.kim Signed-off-by: Juri Lelli Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20140429103953.e68eba1b2ac3309214e3dc5a@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 1 + kernel/sched/deadline.c | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9fe2190005cb..e62c65a12d5b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3124,6 +3124,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); dl_se->dl_throttled = 0; dl_se->dl_new = 1; + dl_se->dl_yielded = 0; } static void __setscheduler_params(struct task_struct *p, diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b08095786cb8..800e99b99075 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) sched_clock_tick(); update_rq_clock(rq); dl_se->dl_throttled = 0; + dl_se->dl_yielded = 0; if (p->on_rq) { enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); if (task_has_dl_policy(rq->curr)) @@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq) * We make the task go to sleep until its current deadline by * forcing its runtime to zero. This way, update_curr_dl() stops * it and the bandwidth timer will wake it up and will give it - * new scheduling parameters (thanks to dl_new=1). + * new scheduling parameters (thanks to dl_yielded=1). */ if (p->dl.runtime > 0) { - rq->curr->dl.dl_new = 1; + rq->curr->dl.dl_yielded = 1; p->dl.runtime = 0; } update_curr_dl(rq); -- cgit v1.2.3 From 6a7cd273dc4bc3246f37ebe874754a54ccb29141 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 17 Apr 2014 10:05:02 +0800 Subject: sched/deadline: Fix memory leak Free cpudl->free_cpus allocated in cpudl_init(). Signed-off-by: Li Zefan Acked-by: Juri Lelli Signed-off-by: Peter Zijlstra Cc: # 3.14+ Link: http://lkml.kernel.org/r/534F36CE.2000409@huawei.com Signed-off-by: Ingo Molnar --- kernel/sched/cpudeadline.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5b9bb42b2d47..ab001b5d5048 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp) */ void cpudl_cleanup(struct cpudl *cp) { - /* - * nothing to do for the moment - */ + free_cpumask_var(cp->free_cpus); } -- cgit v1.2.3 From 6227cb00cc120f9a43ce8313bb0475ddabcb7d01 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Red Hat)" Date: Sun, 13 Apr 2014 09:34:53 -0400 Subject: sched: Use CPUPRI_NR_PRIORITIES instead of MAX_RT_PRIO in cpupri check The check at the beginning of cpupri_find() makes sure that the task_pri variable does not exceed the cp->pri_to_cpu array length. But that length is CPUPRI_NR_PRIORITIES not MAX_RT_PRIO, where it will miss the last two priorities in that array. As task_pri is computed from convert_prio() which should never be bigger than CPUPRI_NR_PRIORITIES, if the check should cause a panic if it is hit. Reported-by: Mike Galbraith Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1397015410.5212.13.camel@marge.simpson.net Signed-off-by: Ingo Molnar --- kernel/sched/cpupri.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 8b836b376d91..3031bac8aa3e 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, int idx = 0; int task_pri = convert_prio(p->prio); - if (task_pri >= MAX_RT_PRIO) - return 0; + BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; -- cgit v1.2.3 From 6ccdc84b81a0a6c09a7f0427761d2f8cecfc2218 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 24 Apr 2014 12:00:47 +0200 Subject: sched: Skip double execution of pick_next_task_fair() Tim wrote: "The current code will call pick_next_task_fair a second time in the slow path if we did not pull any task in our first try. This is really unnecessary as we already know no task can be pulled and it doubles the delay for the cpu to enter idle. We instrumented some network workloads and that saw that pick_next_task_fair is frequently called twice before a cpu enters idle. The call to pick_next_task_fair can add non trivial latency as it calls load_balance which runs find_busiest_group on an hierarchy of sched domains spanning the cpus for a large system. For some 4 socket systems, we saw almost 0.25 msec spent per call of pick_next_task_fair before a cpu can be idled." Optimize the second call away for the common case and document the dependency. Reported-by: Tim Chen Signed-off-by: Peter Zijlstra Cc: Linus Torvalds Cc: Len Brown Link: http://lkml.kernel.org/r/20140424100047.GP11096@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e62c65a12d5b..28921ec91b3d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev) if (likely(prev->sched_class == class && rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq, prev); - if (likely(p && p != RETRY_TASK)) - return p; + if (unlikely(p == RETRY_TASK)) + goto again; + + /* assumes fair_sched_class->next == idle_sched_class */ + if (unlikely(!p)) + p = idle_sched_class.pick_next_task(rq, prev); + + return p; } again: -- cgit v1.2.3 From 0e5b5337f0da073e1f17aec3c322ea7826975d0d Mon Sep 17 00:00:00 2001 From: Jason Low Date: Mon, 28 Apr 2014 15:45:54 -0700 Subject: sched: Fix updating rq->max_idle_balance_cost and rq->next_balance in idle_balance() The following commit: e5fc66119ec9 ("sched: Fix race in idle_balance()") can potentially cause rq->max_idle_balance_cost to not be updated, even when load_balance(NEWLY_IDLE) is attempted and the per-sd max cost value is updated. Preeti noticed a similar issue with updating rq->next_balance. In this patch, we fix this by making sure we still check/update those values even if a task gets enqueued while browsing the domains. Signed-off-by: Jason Low Reviewed-by: Preeti U Murthy Signed-off-by: Peter Zijlstra Cc: morten.rasmussen@arm.com Cc: aswin@hp.com Cc: daniel.lezcano@linaro.org Cc: alex.shi@linaro.org Cc: efault@gmx.de Cc: vincent.guittot@linaro.org Link: http://lkml.kernel.org/r/1398725155-7591-2-git-send-email-jason.low2@hp.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7570dd969c28..0fdb96de81a5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq) int this_cpu = this_rq->cpu; idle_enter_fair(this_rq); + /* * We must set idle_stamp _before_ calling idle_balance(), such that we * measure the duration of idle_balance() as idle time. @@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq) raw_spin_lock(&this_rq->lock); + if (curr_cost > this_rq->max_idle_balance_cost) + this_rq->max_idle_balance_cost = curr_cost; + /* - * While browsing the domains, we released the rq lock. - * A task could have be enqueued in the meantime + * While browsing the domains, we released the rq lock, a task could + * have been enqueued in the meantime. Since we're not going idle, + * pretend we pulled a task. */ - if (this_rq->cfs.h_nr_running && !pulled_task) { + if (this_rq->cfs.h_nr_running && !pulled_task) pulled_task = 1; - goto out; - } if (pulled_task || time_after(jiffies, this_rq->next_balance)) { /* @@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq) this_rq->next_balance = next_balance; } - if (curr_cost > this_rq->max_idle_balance_cost) - this_rq->max_idle_balance_cost = curr_cost; - out: /* Is there a task of a high priority class? */ if (this_rq->nr_running != this_rq->cfs.h_nr_running && -- cgit v1.2.3 From 2b4cfe64dee0d84506b951d81bf55d9891744d25 Mon Sep 17 00:00:00 2001 From: Jason Low Date: Wed, 23 Apr 2014 18:30:34 -0700 Subject: sched/numa: Initialize newidle balance stats in sd_numa_init() Also initialize the per-sd variables for newidle load balancing in sd_numa_init(). Signed-off-by: Jason Low Acked-by: morten.rasmussen@arm.com Cc: daniel.lezcano@linaro.org Cc: alex.shi@linaro.org Cc: preeti@linux.vnet.ibm.com Cc: efault@gmx.de Cc: vincent.guittot@linaro.org Cc: aswin@hp.com Cc: chegu_vinod@hp.com Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1398303035-18255-3-git-send-email-jason.low2@hp.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 28921ec91b3d..13584f1cccfc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6026,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) , .last_balance = jiffies, .balance_interval = sd_weight, + .max_newidle_lb_cost = 0, + .next_decay_max_lb_cost = jiffies, }; SD_INIT_NAME(sd, NUMA); sd->private = &tl->data; -- cgit v1.2.3