fs: hugetlbfs: cleanup folio in adjust_range_hwpoison()

Let's cleanup and simplify the function a bit.

Link: https://lkml.kernel.org/r/20250901150359.867252-17-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2025-09-01 17:03:37 +02:00 committed by Andrew Morton
parent 06d42cf49e
commit a638ee7f19

View file

@ -192,37 +192,25 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset.
* Returns the maximum number of bytes one can read without touching the 1st raw
* HWPOISON page.
*
* The implementation borrows the iteration logic from copy_page_to_iter*.
*/
static size_t adjust_range_hwpoison(struct folio *folio, size_t offset,
size_t bytes)
{
struct page *page;
size_t n = 0;
size_t res = 0;
struct page *page = folio_page(folio, offset / PAGE_SIZE);
size_t safe_bytes;
/* First page to start the loop. */
page = folio_page(folio, offset / PAGE_SIZE);
offset %= PAGE_SIZE;
while (1) {
if (is_raw_hwpoison_page_in_hugepage(page))
return 0;
/* Safe to read the remaining bytes in this page. */
safe_bytes = PAGE_SIZE - (offset % PAGE_SIZE);
page++;
/* Check each remaining page as long as we are not done yet. */
for (; safe_bytes < bytes; safe_bytes += PAGE_SIZE, page++)
if (is_raw_hwpoison_page_in_hugepage(page))
break;
/* Safe to read n bytes without touching HWPOISON subpage. */
n = min(bytes, (size_t)PAGE_SIZE - offset);
res += n;
bytes -= n;
if (!bytes || !n)
break;
offset += n;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
}
return res;
return min(safe_bytes, bytes);
}
/*