mm/mincore, swap: consolidate swap cache checking for mincore

Patch series "mm/mincore: minor clean up for swap cache checking".

This series cleans up a swap cache helper only used by mincore, move it
back into mincore code.  Also separate the swap cache related logics out
of shmem / page cache logics in mincore.

With this series we have less lines of code and better performance.

Before this series:
mincore on a swaped out 16G anon mmap range:
Took 488220 us
mincore on 16G shmem mmap range:
Took 530272 us.

After this series:
mincore on a swaped out 16G anon mmap range:
Took 446763 us
mincore on 16G shmem mmap range:
Took 460496 us.

About ~10% faster.


This patch (of 2):

The filemap_get_incore_folio (previously find_get_incore_page) helper was
introduced by commit 61ef186557 ("mm: factor find_get_incore_page out of
mincore_page") to be used by later commit f5df8635c5 ("mm: use
find_get_incore_page in memcontrol"), so memory cgroup charge move code
can be simplified.

But commit 6b611388b6 ("memcg-v1: remove charge move code") removed that
user completely, it's only used by mincore now.

So this commit basically reverts commit 61ef186557 ("mm: factor
find_get_incore_page out of mincore_page").  Move it back to mincore side
to simplify the code.

Link: https://lkml.kernel.org/r/20250811172018.48901-1-ryncsn@gmail.com
Link: https://lkml.kernel.org/r/20250811172018.48901-2-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Acked-by: Nhat Pham <nphamcs@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kairui Song 2025-08-12 01:20:17 +08:00 committed by Andrew Morton
parent 7bca1760cd
commit 27763edac9
3 changed files with 27 additions and 50 deletions

View file

@ -64,8 +64,33 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
* any other file mapping (ie. marked !present and faulted in with
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
*/
folio = filemap_get_incore_folio(mapping, index);
if (!IS_ERR(folio)) {
if (IS_ENABLED(CONFIG_SWAP) && shmem_mapping(mapping)) {
folio = filemap_get_entry(mapping, index);
/*
* shmem/tmpfs may return swap: account for swapcache
* page too.
*/
if (xa_is_value(folio)) {
struct swap_info_struct *si;
swp_entry_t swp = radix_to_swp_entry(folio);
/* There might be swapin error entries in shmem mapping. */
if (non_swap_entry(swp))
return 0;
/* Prevent swap device to being swapoff under us */
si = get_swap_device(swp);
if (si) {
folio = filemap_get_folio(swap_address_space(swp),
swap_cache_index(swp));
put_swap_device(si);
} else {
return 0;
}
}
} else {
folio = filemap_get_folio(mapping, index);
}
if (!IS_ERR_OR_NULL(folio)) {
present = folio_test_uptodate(folio);
folio_put(folio);
}

View file

@ -64,9 +64,6 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
struct folio *swap_cache_get_folio(swp_entry_t entry,
struct vm_area_struct *vma, unsigned long addr);
struct folio *filemap_get_incore_folio(struct address_space *mapping,
pgoff_t index);
struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
struct swap_iocb **plug);
@ -178,13 +175,6 @@ static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
return NULL;
}
static inline
struct folio *filemap_get_incore_folio(struct address_space *mapping,
pgoff_t index)
{
return filemap_get_folio(mapping, index);
}
static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
{
return NULL;

View file

@ -323,44 +323,6 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
return folio;
}
/**
* filemap_get_incore_folio - Find and get a folio from the page or swap caches.
* @mapping: The address_space to search.
* @index: The page cache index.
*
* This differs from filemap_get_folio() in that it will also look for the
* folio in the swap cache.
*
* Return: The found folio or %NULL.
*/
struct folio *filemap_get_incore_folio(struct address_space *mapping,
pgoff_t index)
{
swp_entry_t swp;
struct swap_info_struct *si;
struct folio *folio = filemap_get_entry(mapping, index);
if (!folio)
return ERR_PTR(-ENOENT);
if (!xa_is_value(folio))
return folio;
if (!shmem_mapping(mapping))
return ERR_PTR(-ENOENT);
swp = radix_to_swp_entry(folio);
/* There might be swapin error entries in shmem mapping. */
if (non_swap_entry(swp))
return ERR_PTR(-ENOENT);
/* Prevent swapoff from happening to us */
si = get_swap_device(swp);
if (!si)
return ERR_PTR(-ENOENT);
index = swap_cache_index(swp);
folio = filemap_get_folio(swap_address_space(swp), index);
put_swap_device(si);
return folio;
}
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists)