mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:24:47 +01:00
ioremap_prot() currently accepts pgprot_val parameter as an unsigned long, thus implicitly assuming that pgprot_val and pgprot_t could never be bigger than unsigned long. But this assumption soon will not be true on arm64 when using D128 pgtables. In 128 bit page table configuration, unsigned long is 64 bit, but pgprot_t is 128 bit. Passing platform abstracted pgprot_t argument is better as compared to size based data types. Let's change the parameter to directly pass pgprot_t like another similar helper generic_ioremap_prot(). Without this change in place, D128 configuration does not work on arm64 as the top 64 bits gets silently stripped when passing the protection value to this function. Link: https://lkml.kernel.org/r/20250218101954.415331-1-anshuman.khandual@arm.com Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Co-developed-by: Anshuman Khandual <anshuman.khandual@arm.com> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64] Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
72 lines
1.8 KiB
C
72 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
#include <linux/io.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <asm/io-workarounds.h>
|
|
|
|
unsigned long ioremap_bot;
|
|
EXPORT_SYMBOL(ioremap_bot);
|
|
|
|
void __iomem *ioremap(phys_addr_t addr, unsigned long size)
|
|
{
|
|
pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
|
|
void *caller = __builtin_return_address(0);
|
|
|
|
if (iowa_is_active())
|
|
return iowa_ioremap(addr, size, prot, caller);
|
|
return __ioremap_caller(addr, size, prot, caller);
|
|
}
|
|
EXPORT_SYMBOL(ioremap);
|
|
|
|
void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
|
|
{
|
|
pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
|
|
void *caller = __builtin_return_address(0);
|
|
|
|
if (iowa_is_active())
|
|
return iowa_ioremap(addr, size, prot, caller);
|
|
return __ioremap_caller(addr, size, prot, caller);
|
|
}
|
|
EXPORT_SYMBOL(ioremap_wc);
|
|
|
|
void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
|
|
{
|
|
pgprot_t prot = pgprot_cached(PAGE_KERNEL);
|
|
void *caller = __builtin_return_address(0);
|
|
|
|
if (iowa_is_active())
|
|
return iowa_ioremap(addr, size, prot, caller);
|
|
return __ioremap_caller(addr, size, prot, caller);
|
|
}
|
|
|
|
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot)
|
|
{
|
|
pte_t pte = __pte(pgprot_val(prot));
|
|
void *caller = __builtin_return_address(0);
|
|
|
|
/* writeable implies dirty for kernel addresses */
|
|
if (pte_write(pte))
|
|
pte = pte_mkdirty(pte);
|
|
|
|
if (iowa_is_active())
|
|
return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
|
|
return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
|
|
}
|
|
EXPORT_SYMBOL(ioremap_prot);
|
|
|
|
int early_ioremap_range(unsigned long ea, phys_addr_t pa,
|
|
unsigned long size, pgprot_t prot)
|
|
{
|
|
unsigned long i;
|
|
|
|
for (i = 0; i < size; i += PAGE_SIZE) {
|
|
int err = map_kernel_page(ea + i, pa + i, pgprot_nx(prot));
|
|
|
|
if (WARN_ON_ONCE(err)) /* Should clean up */
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|