diff --git a/mm/memcontrol.c b/mm/memcontrol.c index be810c1fbfc3..2289a0299331 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2003,6 +2003,19 @@ static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock, return flush; } +static void schedule_drain_work(int cpu, struct work_struct *work) +{ + /* + * Protect housekeeping cpumask read and work enqueue together + * in the same RCU critical section so that later cpuset isolated + * partition update only need to wait for an RCU GP and flush the + * pending work on newly isolated CPUs. + */ + guard(rcu)(); + if (!cpu_is_isolated(cpu)) + schedule_work_on(cpu, work); +} + /* * Drains all per-CPU charge caches for given root_memcg resp. subtree * of the hierarchy under it. @@ -2032,8 +2045,8 @@ void drain_all_stock(struct mem_cgroup *root_memcg) &memcg_st->flags)) { if (cpu == curcpu) drain_local_memcg_stock(&memcg_st->work); - else if (!cpu_is_isolated(cpu)) - schedule_work_on(cpu, &memcg_st->work); + else + schedule_drain_work(cpu, &memcg_st->work); } if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) && @@ -2042,8 +2055,8 @@ void drain_all_stock(struct mem_cgroup *root_memcg) &obj_st->flags)) { if (cpu == curcpu) drain_local_obj_stock(&obj_st->work); - else if (!cpu_is_isolated(cpu)) - schedule_work_on(cpu, &obj_st->work); + else + schedule_drain_work(cpu, &obj_st->work); } } migrate_enable();