iommu/pages: Remove the order argument to iommu_free_pages()

Now that we have a folio under the allocation iommu_free_pages() can know
the order of the original allocation and do the correct thing to free it.

The next patch will rename iommu_free_page() to iommu_free_pages() so we
have naming consistency with iommu_alloc_pages_node().

Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Mostafa Saleh <smostafa@google.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/5-v4-c8663abbb606+3f7-iommu_pages_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jason Gunthorpe 2025-04-08 13:53:53 -03:00 committed by Joerg Roedel
parent c11a1a4792
commit 4316ba4a50
13 changed files with 36 additions and 47 deletions

View file

@ -644,8 +644,7 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
iommu_free_pages(pci_seg->dev_table,
get_order(pci_seg->dev_table_size));
iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
@ -662,8 +661,7 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
{
iommu_free_pages(pci_seg->rlookup_table,
get_order(pci_seg->rlookup_table_size));
iommu_free_pages(pci_seg->rlookup_table);
pci_seg->rlookup_table = NULL;
}
@ -682,8 +680,7 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
{
kmemleak_free(pci_seg->irq_lookup_table);
iommu_free_pages(pci_seg->irq_lookup_table,
get_order(pci_seg->rlookup_table_size));
iommu_free_pages(pci_seg->irq_lookup_table);
pci_seg->irq_lookup_table = NULL;
}
@ -707,8 +704,7 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
{
iommu_free_pages(pci_seg->alias_table,
get_order(pci_seg->alias_table_size));
iommu_free_pages(pci_seg->alias_table);
pci_seg->alias_table = NULL;
}
@ -817,7 +813,7 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
static void __init free_command_buffer(struct amd_iommu *iommu)
{
iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
iommu_free_pages(iommu->cmd_buf);
}
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
@ -829,7 +825,7 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
if (buf &&
check_feature(FEATURE_SNP) &&
set_memory_4k((unsigned long)buf, (1 << order))) {
iommu_free_pages(buf, order);
iommu_free_pages(buf);
buf = NULL;
}
@ -873,14 +869,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
static void __init free_event_buffer(struct amd_iommu *iommu)
{
iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
iommu_free_pages(iommu->evt_buf);
}
static void free_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
iommu_free_pages(iommu->ga_log_tail, get_order(8));
iommu_free_pages(iommu->ga_log);
iommu_free_pages(iommu->ga_log_tail);
#endif
}
@ -2789,8 +2785,7 @@ static void early_enable_iommus(void)
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
iommu_free_pages(pci_seg->old_dev_tbl_cpy,
get_order(pci_seg->dev_table_size));
iommu_free_pages(pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@ -2803,8 +2798,7 @@ static void early_enable_iommus(void)
pr_info("Copied DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
iommu_free_pages(pci_seg->dev_table,
get_order(pci_seg->dev_table_size));
iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
}

View file

@ -3246,7 +3246,7 @@ out_unlock:
spin_unlock_irqrestore(&iommu_table_lock, flags);
if (new_table) {
iommu_free_pages(new_table->table, order);
iommu_free_pages(new_table->table);
kfree(new_table);
}
return table;

View file

@ -48,7 +48,7 @@ void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
{
iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
iommu_free_pages(iommu->ppr_log);
}
/*

View file

@ -932,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
return &domain->domain;
err_lv2ent:
iommu_free_pages(domain->lv2entcnt, 1);
iommu_free_pages(domain->lv2entcnt);
err_counter:
iommu_free_pages(domain->pgtable, 2);
iommu_free_pages(domain->pgtable);
err_pgtable:
kfree(domain);
return NULL;
@ -975,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
phys_to_virt(base));
}
iommu_free_pages(domain->pgtable, 2);
iommu_free_pages(domain->lv2entcnt, 1);
iommu_free_pages(domain->pgtable);
iommu_free_pages(domain->lv2entcnt);
kfree(domain);
}

View file

@ -612,7 +612,7 @@ out_free_fwnode:
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER);
iommu_free_pages(ir_table_base);
out_free_table:
kfree(ir_table);
@ -633,7 +633,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
irq_domain_free_fwnode(fn);
iommu->ir_domain = NULL;
}
iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER);
iommu_free_pages(iommu->ir_table->base);
bitmap_free(iommu->ir_table->bitmap);
kfree(iommu->ir_table);
iommu->ir_table = NULL;

View file

@ -67,7 +67,6 @@ int intel_pasid_alloc_table(struct device *dev)
}
pasid_table->table = dir;
pasid_table->order = order;
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
info->pasid_table = pasid_table;
@ -100,7 +99,7 @@ void intel_pasid_free_table(struct device *dev)
iommu_free_page(table);
}
iommu_free_pages(pasid_table->table, pasid_table->order);
iommu_free_pages(pasid_table->table);
kfree(pasid_table);
}

View file

@ -47,7 +47,6 @@ struct pasid_entry {
/* The representative of a PASID table */
struct pasid_table {
void *table; /* pasid table pointer */
int order; /* page order of pasid table */
u32 max_pasid; /* max pasid */
};

View file

@ -340,7 +340,7 @@ free_hwirq:
dmar_free_hwirq(irq);
iommu->pr_irq = 0;
free_prq:
iommu_free_pages(iommu->prq, PRQ_ORDER);
iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return ret;
@ -363,7 +363,7 @@ int intel_iommu_finish_prq(struct intel_iommu *iommu)
iommu->iopf_queue = NULL;
}
iommu_free_pages(iommu->prq, PRQ_ORDER);
iommu_free_pages(iommu->prq);
iommu->prq = NULL;
return 0;

View file

@ -300,7 +300,7 @@ out_free:
if (cfg->free)
cfg->free(cookie, pages, size);
else
iommu_free_pages(pages, order);
iommu_free_pages(pages);
return NULL;
}
@ -316,7 +316,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
if (cfg->free)
cfg->free(cookie, pages, size);
else
iommu_free_pages(pages, get_order(size));
iommu_free_pages(pages);
}
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,

View file

@ -262,7 +262,7 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
pte = dart_install_table(cptep, ptep, 0, data);
if (pte)
iommu_free_pages(cptep, get_order(tblsz));
iommu_free_pages(cptep);
/* L2 table is present (now) */
pte = READ_ONCE(*ptep);
@ -423,8 +423,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
out_free_data:
while (--i >= 0) {
iommu_free_pages(data->pgd[i],
get_order(DART_GRANULE(data)));
iommu_free_pages(data->pgd[i]);
}
kfree(data);
return NULL;
@ -433,7 +432,6 @@ out_free_data:
static void apple_dart_free_pgtable(struct io_pgtable *iop)
{
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
int order = get_order(DART_GRANULE(data));
dart_iopte *ptep, *end;
int i;
@ -445,9 +443,9 @@ static void apple_dart_free_pgtable(struct io_pgtable *iop)
dart_iopte pte = *ptep++;
if (pte)
iommu_free_pages(iopte_deref(pte, data), order);
iommu_free_pages(iopte_deref(pte, data));
}
iommu_free_pages(data->pgd[i], order);
iommu_free_pages(data->pgd[i]);
}
kfree(data);

View file

@ -105,11 +105,12 @@ static inline void *iommu_alloc_page(gfp_t gfp)
}
/**
* iommu_free_pages - free page of a given order
* iommu_free_pages - free pages
* @virt: virtual address of the page to be freed.
* @order: page order
*
* The page must have have been allocated by iommu_alloc_pages_node()
*/
static inline void iommu_free_pages(void *virt, int order)
static inline void iommu_free_pages(void *virt)
{
struct page *page;
@ -127,7 +128,7 @@ static inline void iommu_free_pages(void *virt, int order)
*/
static inline void iommu_free_page(void *virt)
{
iommu_free_pages(virt, 0);
iommu_free_pages(virt);
}
/**

View file

@ -48,14 +48,13 @@ static DEFINE_IDA(riscv_iommu_pscids);
/* Device resource-managed allocations */
struct riscv_iommu_devres {
void *addr;
int order;
};
static void riscv_iommu_devres_pages_release(struct device *dev, void *res)
{
struct riscv_iommu_devres *devres = res;
iommu_free_pages(devres->addr, devres->order);
iommu_free_pages(devres->addr);
}
static int riscv_iommu_devres_pages_match(struct device *dev, void *res, void *p)
@ -80,12 +79,11 @@ static void *riscv_iommu_get_pages(struct riscv_iommu_device *iommu, int order)
sizeof(struct riscv_iommu_devres), GFP_KERNEL);
if (unlikely(!devres)) {
iommu_free_pages(addr, order);
iommu_free_pages(addr);
return NULL;
}
devres->addr = addr;
devres->order = order;
devres_add(iommu->dev, devres);

View file

@ -713,7 +713,7 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE));
iommu_free_pages(sun50i_domain->dt);
sun50i_domain->dt = NULL;
kfree(sun50i_domain);