summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d1f6dc5a715d..7535ef32a75b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -196,7 +196,6 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
spinlock_t lock; /* for from, to */
- struct mm_struct *mm;
struct mem_cgroup *from;
struct mem_cgroup *to;
unsigned long flags;
@@ -4801,8 +4800,6 @@ static void __mem_cgroup_clear_mc(void)
static void mem_cgroup_clear_mc(void)
{
- struct mm_struct *mm = mc.mm;
-
/*
* we must clear moving_task before waking up waiters at the end of
* task migration.
@@ -4812,10 +4809,7 @@ static void mem_cgroup_clear_mc(void)
spin_lock(&mc.lock);
mc.from = NULL;
mc.to = NULL;
- mc.mm = NULL;
spin_unlock(&mc.lock);
-
- mmput(mm);
}
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4872,7 +4866,6 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
VM_BUG_ON(mc.moved_swap);
spin_lock(&mc.lock);
- mc.mm = mm;
mc.from = from;
mc.to = memcg;
mc.flags = move_flags;
@@ -4882,9 +4875,8 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
ret = mem_cgroup_precharge_mc(mm);
if (ret)
mem_cgroup_clear_mc();
- } else {
- mmput(mm);
}
+ mmput(mm);
return ret;
}
@@ -4998,11 +4990,11 @@ put: /* get_mctgt_type() gets the page */
return ret;
}
-static void mem_cgroup_move_charge(void)
+static void mem_cgroup_move_charge(struct mm_struct *mm)
{
struct mm_walk mem_cgroup_move_charge_walk = {
.pmd_entry = mem_cgroup_move_charge_pte_range,
- .mm = mc.mm,
+ .mm = mm,
};
lru_add_drain_all();
@@ -5014,7 +5006,7 @@ static void mem_cgroup_move_charge(void)
atomic_inc(&mc.from->moving_account);
synchronize_rcu();
retry:
- if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
+ if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
/*
* Someone who are holding the mmap_sem might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
@@ -5031,16 +5023,23 @@ retry:
* additional charge, the page walk just aborts.
*/
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
- up_read(&mc.mm->mmap_sem);
+ up_read(&mm->mmap_sem);
atomic_dec(&mc.from->moving_account);
}
-static void mem_cgroup_move_task(void)
+static void mem_cgroup_move_task(struct cgroup_taskset *tset)
{
- if (mc.to) {
- mem_cgroup_move_charge();
- mem_cgroup_clear_mc();
+ struct cgroup_subsys_state *css;
+ struct task_struct *p = cgroup_taskset_first(tset, &css);
+ struct mm_struct *mm = get_task_mm(p);
+
+ if (mm) {
+ if (mc.to)
+ mem_cgroup_move_charge(mm);
+ mmput(mm);
}
+ if (mc.to)
+ mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -5054,7 +5053,7 @@ static int mem_cgroup_allow_attach(struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(void)
+static void mem_cgroup_move_task(struct cgroup_taskset *tset)
{
}
#endif
@@ -5270,7 +5269,6 @@ struct cgroup_subsys memory_cgrp_subsys = {
.cancel_attach = mem_cgroup_cancel_attach,
.attach = mem_cgroup_move_task,
.allow_attach = mem_cgroup_allow_attach,
- .post_attach = mem_cgroup_move_task,
.bind = mem_cgroup_bind,
.dfl_cftypes = memory_files,
.legacy_cftypes = mem_cgroup_legacy_files,