mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:04:43 +01:00
memcg: move mem_cgroup_usage memcontrol-v1.c
Patch series "memcg cleanups", v3. Two code moves/removals with no behavior change. This patch (of 2): Currently, mem_cgroup_usage is only used for v1, just move it to memcontrol-v1.c Link: https://lkml.kernel.org/r/20251211013019.2080004-1-chenridong@huaweicloud.com Link: https://lkml.kernel.org/r/20251211013019.2080004-2-chenridong@huaweicloud.com Signed-off-by: Chen Ridong <chenridong@huawei.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Acked-by: Michal Koutný <mkoutny@suse.com> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Lu Jialin <lujialin4@huawei.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Wei Xu <weixugc@google.com> Cc: Yuanchu Xie <yuanchu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
85aa391974
commit
558605a530
3 changed files with 22 additions and 24 deletions
|
|
@ -427,6 +427,28 @@ static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
|
|||
}
|
||||
#endif
|
||||
|
||||
static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
if (mem_cgroup_is_root(memcg)) {
|
||||
/*
|
||||
* Approximate root's usage from global state. This isn't
|
||||
* perfect, but the root usage was always an approximation.
|
||||
*/
|
||||
val = global_node_page_state(NR_FILE_PAGES) +
|
||||
global_node_page_state(NR_ANON_MAPPED);
|
||||
if (swap)
|
||||
val += total_swap_pages - get_nr_swap_pages();
|
||||
} else {
|
||||
if (!swap)
|
||||
val = page_counter_read(&memcg->memory);
|
||||
else
|
||||
val = page_counter_read(&memcg->memsw);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
|
||||
{
|
||||
struct mem_cgroup_threshold_ary *t;
|
||||
|
|
|
|||
|
|
@ -22,8 +22,6 @@
|
|||
iter != NULL; \
|
||||
iter = mem_cgroup_iter(NULL, iter, NULL))
|
||||
|
||||
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
|
||||
|
||||
void drain_all_stock(struct mem_cgroup *root_memcg);
|
||||
|
||||
unsigned long memcg_events(struct mem_cgroup *memcg, int event);
|
||||
|
|
|
|||
|
|
@ -3272,28 +3272,6 @@ void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
|
|||
css_get_many(&__folio_memcg(folio)->css, new_refs);
|
||||
}
|
||||
|
||||
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
if (mem_cgroup_is_root(memcg)) {
|
||||
/*
|
||||
* Approximate root's usage from global state. This isn't
|
||||
* perfect, but the root usage was always an approximation.
|
||||
*/
|
||||
val = global_node_page_state(NR_FILE_PAGES) +
|
||||
global_node_page_state(NR_ANON_MAPPED);
|
||||
if (swap)
|
||||
val += total_swap_pages - get_nr_swap_pages();
|
||||
} else {
|
||||
if (!swap)
|
||||
val = page_counter_read(&memcg->memory);
|
||||
else
|
||||
val = page_counter_read(&memcg->memsw);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
static int memcg_online_kmem(struct mem_cgroup *memcg)
|
||||
{
|
||||
struct obj_cgroup *objcg;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue