dma-mapping fixes for Linux 6.19

- important fix for ARM 32-bit based systems using cma= kernel parameter
   (Oreoluwa Babatunde)
 - a fix for the corner case of the DMA atomic pool based allocations
   (Sai Sree Kartheek Adivi)
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQSrngzkoBtlA8uaaJ+Jp1EFxbsSRAUCaXyw5wAKCRCJp1EFxbsS
 RBN7AP9rEfEEB3JBOglcZG3TTrLoCKLnw+16uroyKuD95RLWrQD/bWeJnRYcEZB5
 ox1peKYBA4SsDB3bCUWFDfW4I0OZFQQ=
 =9Nx1
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-6.19-2026-01-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux

Pull dma-mapping fixes from Marek Szyprowski:

 - important fix for ARM 32-bit based systems using cma= kernel
   parameter (Oreoluwa Babatunde)

 - a fix for the corner case of the DMA atomic pool based allocations
   (Sai Sree Kartheek Adivi)

* tag 'dma-mapping-6.19-2026-01-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux:
  dma/pool: distinguish between missing and exhausted atomic pools
  of: reserved_mem: Allow reserved_mem framework detect "cma=" kernel param
This commit is contained in:
Linus Torvalds 2026-01-30 13:15:04 -08:00
commit 2b54ac9e0c
4 changed files with 42 additions and 9 deletions

View file

@ -157,13 +157,19 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
phys_addr_t base, size;
int i, len;
const __be32 *prop;
bool nomap;
bool nomap, default_cma;
prop = of_flat_dt_get_addr_size_prop(node, "reg", &len);
if (!prop)
return -ENOENT;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
if (default_cma && cma_skip_dt_default_reserved_mem()) {
pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
return -EINVAL;
}
for (i = 0; i < len; i++) {
u64 b, s;
@ -248,10 +254,13 @@ void __init fdt_scan_reserved_mem_reg_nodes(void)
fdt_for_each_subnode(child, fdt, node) {
const char *uname;
bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL);
u64 b, s;
if (!of_fdt_device_is_available(fdt, child))
continue;
if (default_cma && cma_skip_dt_default_reserved_mem())
continue;
if (!of_flat_dt_get_addr_size(child, "reg", &b, &s))
continue;
@ -389,7 +398,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
phys_addr_t base = 0, align = 0, size;
int i, len;
const __be32 *prop;
bool nomap;
bool nomap, default_cma;
int ret;
prop = of_get_flat_dt_prop(node, "size", &len);
@ -413,6 +422,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
if (default_cma && cma_skip_dt_default_reserved_mem()) {
pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n");
return -EINVAL;
}
/* Need adjust the alignment to satisfy the CMA requirement */
if (IS_ENABLED(CONFIG_CMA)

View file

@ -57,6 +57,15 @@ extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long e
extern void cma_reserve_pages_on_error(struct cma *cma);
#ifdef CONFIG_DMA_CMA
extern bool cma_skip_dt_default_reserved_mem(void);
#else
static inline bool cma_skip_dt_default_reserved_mem(void)
{
return false;
}
#endif
#ifdef CONFIG_CMA
struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
bool cma_free_folio(struct cma *cma, const struct folio *folio);

View file

@ -91,6 +91,16 @@ static int __init early_cma(char *p)
}
early_param("cma", early_cma);
/*
* cma_skip_dt_default_reserved_mem - This is called from the
* reserved_mem framework to detect if the default cma region is being
* set by the "cma=" kernel parameter.
*/
bool __init cma_skip_dt_default_reserved_mem(void)
{
return size_cmdline != -1;
}
#ifdef CONFIG_DMA_NUMA_CMA
static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
@ -470,12 +480,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
struct cma *cma;
int err;
if (size_cmdline != -1 && default_cma) {
pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
rmem->name);
return -EBUSY;
}
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;

View file

@ -277,15 +277,20 @@ struct page *dma_alloc_from_pool(struct device *dev, size_t size,
{
struct gen_pool *pool = NULL;
struct page *page;
bool pool_found = false;
while ((pool = dma_guess_pool(pool, gfp))) {
pool_found = true;
page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
phys_addr_ok);
if (page)
return page;
}
WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
if (pool_found)
WARN(!(gfp & __GFP_NOWARN), "DMA pool exhausted for %s\n", dev_name(dev));
else
WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
return NULL;
}