drivers/nvdimm: Use local kmaps

Replace the now deprecated kmap_atomic() with kmap_local_page().

Optimizing nvdimm/pmem for highmem makes no sense as this is always
64bit, and the mapped regions for both btt and pmem do not require
disabling preemption and pagefaults. Specifically, kmap does not care
about the caller's atomic context (such as reads holding the btt arena
spinlock) or NVDIMM_IO_ATOMIC semantics to avoid error handling when
accessing the btt arena in general. Same for the memcpy cases. kmap
local temporary mappings will hold valid across any context switches.

Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>> ---
Link: https://patch.msgid.link/20251128212303.2170933-1-dave@stgolabs.net
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
This commit is contained in:
Davidlohr Bueso 2025-11-28 13:23:03 -08:00 committed by Ira Weiny
parent 63804fed14
commit 9c0fc1d37f
2 changed files with 10 additions and 10 deletions

View file

@ -1104,10 +1104,10 @@ static int btt_data_read(struct arena_info *arena, struct page *page,
{
int ret;
u64 nsoff = to_namespace_offset(arena, lba);
void *mem = kmap_atomic(page);
void *mem = kmap_local_page(page);
ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
kunmap_atomic(mem);
kunmap_local(mem);
return ret;
}
@ -1117,20 +1117,20 @@ static int btt_data_write(struct arena_info *arena, u32 lba,
{
int ret;
u64 nsoff = to_namespace_offset(arena, lba);
void *mem = kmap_atomic(page);
void *mem = kmap_local_page(page);
ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
kunmap_atomic(mem);
kunmap_local(mem);
return ret;
}
static void zero_fill_data(struct page *page, unsigned int off, u32 len)
{
void *mem = kmap_atomic(page);
void *mem = kmap_local_page(page);
memset(mem + off, 0, len);
kunmap_atomic(mem);
kunmap_local(mem);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY

View file

@ -128,10 +128,10 @@ static void write_pmem(void *pmem_addr, struct page *page,
void *mem;
while (len) {
mem = kmap_atomic(page);
mem = kmap_local_page(page);
chunk = min_t(unsigned int, len, PAGE_SIZE - off);
memcpy_flushcache(pmem_addr, mem + off, chunk);
kunmap_atomic(mem);
kunmap_local(mem);
len -= chunk;
off = 0;
page++;
@ -147,10 +147,10 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
void *mem;
while (len) {
mem = kmap_atomic(page);
mem = kmap_local_page(page);
chunk = min_t(unsigned int, len, PAGE_SIZE - off);
rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
kunmap_atomic(mem);
kunmap_local(mem);
if (rem)
return BLK_STS_IOERR;
len -= chunk;