mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
alpha: Convert mapping routine to rely on physical address
Alpha doesn't need struct *page and can perform mapping based on physical addresses. So convert it to implement new .map_phys callback. As part of this change, remove useless BUG_ON() as DMA mapping layer ensures that right direction is provided. Tested-by: Magnus Lindholm <linmag7@gmail.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-7-3bbfe3a25cdf@kernel.org
This commit is contained in:
parent
14cb413af0
commit
6aaecdf0d8
1 changed files with 21 additions and 27 deletions
|
|
@ -224,28 +224,26 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
|
|||
until either pci_unmap_single or pci_dma_sync_single is performed. */
|
||||
|
||||
static dma_addr_t
|
||||
pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
||||
pci_map_single_1(struct pci_dev *pdev, phys_addr_t paddr, size_t size,
|
||||
int dac_allowed)
|
||||
{
|
||||
struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
|
||||
dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
|
||||
unsigned long offset = offset_in_page(paddr);
|
||||
struct pci_iommu_arena *arena;
|
||||
long npages, dma_ofs, i;
|
||||
unsigned long paddr;
|
||||
dma_addr_t ret;
|
||||
unsigned int align = 0;
|
||||
struct device *dev = pdev ? &pdev->dev : NULL;
|
||||
|
||||
paddr = __pa(cpu_addr);
|
||||
|
||||
#if !DEBUG_NODIRECT
|
||||
/* First check to see if we can use the direct map window. */
|
||||
if (paddr + size + __direct_map_base - 1 <= max_dma
|
||||
&& paddr + size <= __direct_map_size) {
|
||||
ret = paddr + __direct_map_base;
|
||||
|
||||
DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
|
||||
cpu_addr, size, ret, __builtin_return_address(0));
|
||||
DBGA2("pci_map_single: [%pa,%zx] -> direct %llx from %ps\n",
|
||||
&paddr, size, ret, __builtin_return_address(0));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -255,8 +253,8 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
|||
if (dac_allowed) {
|
||||
ret = paddr + alpha_mv.pci_dac_offset;
|
||||
|
||||
DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
|
||||
cpu_addr, size, ret, __builtin_return_address(0));
|
||||
DBGA2("pci_map_single: [%pa,%zx] -> DAC %llx from %ps\n",
|
||||
&paddr, size, ret, __builtin_return_address(0));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -290,10 +288,10 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
|||
arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
|
||||
|
||||
ret = arena->dma_base + dma_ofs * PAGE_SIZE;
|
||||
ret += (unsigned long)cpu_addr & ~PAGE_MASK;
|
||||
ret += offset;
|
||||
|
||||
DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
|
||||
cpu_addr, size, npages, ret, __builtin_return_address(0));
|
||||
DBGA2("pci_map_single: [%pa,%zx] np %ld -> sg %llx from %ps\n",
|
||||
&paddr, size, npages, ret, __builtin_return_address(0));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -322,19 +320,18 @@ static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
static dma_addr_t alpha_pci_map_phys(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
|
||||
int dac_allowed;
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
if (unlikely(attrs & DMA_ATTR_MMIO))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
|
||||
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
|
||||
size, dac_allowed);
|
||||
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
|
||||
return pci_map_single_1(pdev, phys, size, dac_allowed);
|
||||
}
|
||||
|
||||
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
|
||||
|
|
@ -343,7 +340,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
|
|||
the cpu to the buffer are guaranteed to see whatever the device
|
||||
wrote there. */
|
||||
|
||||
static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
static void alpha_pci_unmap_phys(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
|
@ -353,8 +350,6 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
|||
struct pci_iommu_arena *arena;
|
||||
long dma_ofs, npages;
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
if (dma_addr >= __direct_map_base
|
||||
&& dma_addr < __direct_map_base + __direct_map_size) {
|
||||
/* Nothing to do. */
|
||||
|
|
@ -429,7 +424,7 @@ try_again:
|
|||
}
|
||||
memset(cpu_addr, 0, size);
|
||||
|
||||
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
|
||||
*dma_addrp = pci_map_single_1(pdev, virt_to_phys(cpu_addr), size, 0);
|
||||
if (*dma_addrp == DMA_MAPPING_ERROR) {
|
||||
free_pages((unsigned long)cpu_addr, order);
|
||||
if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
|
||||
|
|
@ -643,9 +638,8 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
/* Fast path single entry scatterlists. */
|
||||
if (nents == 1) {
|
||||
sg->dma_length = sg->length;
|
||||
sg->dma_address
|
||||
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
|
||||
sg->length, dac_allowed);
|
||||
sg->dma_address = pci_map_single_1(pdev, sg_phys(sg),
|
||||
sg->length, dac_allowed);
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
return -EIO;
|
||||
return 1;
|
||||
|
|
@ -917,8 +911,8 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
|
|||
const struct dma_map_ops alpha_pci_ops = {
|
||||
.alloc = alpha_pci_alloc_coherent,
|
||||
.free = alpha_pci_free_coherent,
|
||||
.map_page = alpha_pci_map_page,
|
||||
.unmap_page = alpha_pci_unmap_page,
|
||||
.map_phys = alpha_pci_map_phys,
|
||||
.unmap_phys = alpha_pci_unmap_phys,
|
||||
.map_sg = alpha_pci_map_sg,
|
||||
.unmap_sg = alpha_pci_unmap_sg,
|
||||
.dma_supported = alpha_pci_supported,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue