* Reuse common phys_vec, phase out dma_buf_phys_vec

Signed-off-by: Alex Williamson <alex@shazbot.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEQvbATlQL0amee4qQI5ubbjuwiyIFAmluaLsRHGFsZXhAc2hh
 emJvdC5vcmcACgkQI5ubbjuwiyIZow/+N/BbZYkRub0lFbufNQ/+M/N80kRzfjjQ
 dE2/AN+YDfLdFxNvmq/P1fYI2B/Oc8msuGwT+C+eJMSVMzZIdCDvnhxH+aPH9roo
 5zmE7fxLjWp/k1lOwb8TupOIR3Jl334AiC/t9PvqraIUMGvt3Uv+/7KoCx/xDxb7
 ID6NbDKiXCZpvlQN1dukf8TCzDVFJnWOOMKLiDnez14okfd5rIqiFpOWMjtAhwlY
 VrwIy/eVqX6YktniwunqU3f4/BeurlHdCS29LdDnHdZzL6HER5onvMwIO8qMeuFZ
 yS8Bf8KJTYtqEedMtFUi5a/ipYu4vuK0KEqIE6USKZLuhQCqLVjFDKs5zUGRQ48X
 qLs59BBmP1WgOnM63OGXzBAAvelLNoh/D5KVzzXmQyNkBn6mFy1MWeR38Ozgl0FA
 +GjK+iwV/GRo+CgDa6Vz+eVwvCV2RhcYlT4cK6BodIQbwd9SWAMEcRxI/IEvOxfC
 YY/1U2JRhOSaQb9j65xgwylEbwoi8BMVbFWE3DydYMr+9PVaOyTKLcJLKrYmhwLn
 cuPetgLaK3UtxdcfhnZyrwzpmtvA56SAReQYg9s+TXFGFurQjNlGVlcKk4dB45nX
 JcOtWHm/6+3D8qoN6FY8Vj5QPePn48urSw1R1/D0LP7951gxknILiQpI7aqEPHyU
 rjAZH6nH6bI=
 =c1Gn
 -----END PGP SIGNATURE-----

Merge tag 'common_phys_vec_via_vfio' into v6.20/vfio/next

 * Reuse common phys_vec, phase out dma_buf_phys_vec

Signed-off-by: Alex Williamson <alex@shazbot.org>
This commit is contained in:
Alex Williamson 2026-01-19 10:25:24 -07:00
commit fab06e956f
13 changed files with 33 additions and 41 deletions

View file

@ -6,11 +6,6 @@
#include <linux/blk-mq-dma.h>
#include "blk.h"
struct phys_vec {
phys_addr_t paddr;
u32 len;
};
static bool __blk_map_iter_next(struct blk_map_iter *iter)
{
if (iter->iter.bi_size)
@ -112,8 +107,8 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
struct phys_vec *vec)
{
enum dma_data_direction dir = rq_dma_dir(req);
unsigned int mapped = 0;
unsigned int attrs = 0;
size_t mapped = 0;
int error;
iter->addr = state->addr;
@ -297,6 +292,8 @@ int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
blk_rq_map_iter_init(rq, &iter);
while (blk_map_iter_next(rq, &iter, &vec)) {
*last_sg = blk_next_sg(last_sg, sglist);
WARN_ON_ONCE(overflows_type(vec.len, unsigned int));
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
offset_in_page(vec.paddr));
nsegs++;
@ -417,6 +414,8 @@ int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
while (blk_map_iter_next(rq, &iter, &vec)) {
sg = blk_next_sg(&sg, sglist);
WARN_ON_ONCE(overflows_type(vec.len, unsigned int));
sg_set_page(sg, phys_to_page(vec.paddr), vec.len,
offset_in_page(vec.paddr));
segments++;

View file

@ -33,8 +33,8 @@ static struct scatterlist *fill_sg_entry(struct scatterlist *sgl, size_t length,
}
static unsigned int calc_sg_nents(struct dma_iova_state *state,
struct dma_buf_phys_vec *phys_vec,
size_t nr_ranges, size_t size)
struct phys_vec *phys_vec, size_t nr_ranges,
size_t size)
{
unsigned int nents = 0;
size_t i;
@ -91,7 +91,7 @@ struct dma_buf_dma {
*/
struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
struct p2pdma_provider *provider,
struct dma_buf_phys_vec *phys_vec,
struct phys_vec *phys_vec,
size_t nr_ranges, size_t size,
enum dma_data_direction dir)
{

View file

@ -202,7 +202,7 @@ struct iopt_pages_dmabuf_track {
struct iopt_pages_dmabuf {
struct dma_buf_attachment *attach;
struct dma_buf_phys_vec phys;
struct phys_vec phys;
/* Always PAGE_SIZE aligned */
unsigned long start;
struct list_head tracker;

View file

@ -20,7 +20,6 @@ struct iommu_group;
struct iommu_option;
struct iommufd_device;
struct dma_buf_attachment;
struct dma_buf_phys_vec;
struct iommufd_sw_msi_map {
struct list_head sw_msi_item;
@ -718,7 +717,7 @@ int __init iommufd_test_init(void);
void iommufd_test_exit(void);
bool iommufd_selftest_is_mock_dev(struct device *dev);
int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
struct dma_buf_phys_vec *phys);
struct phys_vec *phys);
#else
static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id,
@ -742,7 +741,7 @@ static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
}
static inline int
iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
struct dma_buf_phys_vec *phys)
struct phys_vec *phys)
{
return -EOPNOTSUPP;
}

View file

@ -1077,7 +1077,7 @@ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
}
struct pfn_reader_dmabuf {
struct dma_buf_phys_vec phys;
struct phys_vec phys;
unsigned long start_offset;
};
@ -1460,7 +1460,7 @@ static struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = {
*/
static int
sym_vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
struct dma_buf_phys_vec *phys)
struct phys_vec *phys)
{
typeof(&vfio_pci_dma_buf_iommufd_map) fn;
int rc;

View file

@ -2002,7 +2002,7 @@ static const struct dma_buf_ops iommufd_test_dmabuf_ops = {
};
int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
struct dma_buf_phys_vec *phys)
struct phys_vec *phys)
{
struct iommufd_test_dma_buf *priv = attachment->dmabuf->priv;

View file

@ -290,14 +290,14 @@ struct nvme_iod {
u8 flags;
u8 nr_descriptors;
unsigned int total_len;
size_t total_len;
struct dma_iova_state dma_state;
void *descriptors[NVME_MAX_NR_DESCRIPTORS];
struct nvme_dma_vec *dma_vecs;
unsigned int nr_dma_vecs;
dma_addr_t meta_dma;
unsigned int meta_total_len;
size_t meta_total_len;
struct dma_iova_state meta_dma_state;
struct nvme_sgl_desc *meta_descriptor;
};

View file

@ -889,7 +889,7 @@ nvgrace_gpu_write(struct vfio_device *core_vdev,
static int nvgrace_get_dmabuf_phys(struct vfio_pci_core_device *core_vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
struct dma_buf_phys_vec *phys_vec,
struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges)
{

View file

@ -14,7 +14,7 @@ struct vfio_pci_dma_buf {
struct vfio_pci_core_device *vdev;
struct list_head dmabufs_elm;
size_t size;
struct dma_buf_phys_vec *phys_vec;
struct phys_vec *phys_vec;
struct p2pdma_provider *provider;
u32 nr_ranges;
u8 revoked : 1;
@ -94,7 +94,7 @@ static const struct dma_buf_ops vfio_pci_dmabuf_ops = {
* will fail if it is currently revoked
*/
int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
struct dma_buf_phys_vec *phys)
struct phys_vec *phys)
{
struct vfio_pci_dma_buf *priv;
@ -116,7 +116,7 @@ int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
}
EXPORT_SYMBOL_FOR_MODULES(vfio_pci_dma_buf_iommufd_map, "iommufd");
int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
int vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len)
@ -148,7 +148,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_fill_phys_vec);
int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
struct dma_buf_phys_vec *phys_vec,
struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges)
{

View file

@ -9,7 +9,7 @@
struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
struct p2pdma_provider *provider,
struct dma_buf_phys_vec *phys_vec,
struct phys_vec *phys_vec,
size_t nr_ranges, size_t size,
enum dma_data_direction dir);
void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,

View file

@ -531,16 +531,6 @@ struct dma_buf_export_info {
void *priv;
};
/**
* struct dma_buf_phys_vec - describe continuous chunk of memory
* @paddr: physical address of that chunk
* @len: Length of this chunk
*/
struct dma_buf_phys_vec {
phys_addr_t paddr;
size_t len;
};
/**
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name

View file

@ -171,6 +171,11 @@ typedef u64 phys_addr_t;
typedef u32 phys_addr_t;
#endif
struct phys_vec {
phys_addr_t paddr;
size_t len;
};
typedef phys_addr_t resource_size_t;
/*

View file

@ -28,7 +28,6 @@
struct vfio_pci_core_device;
struct vfio_pci_region;
struct p2pdma_provider;
struct dma_buf_phys_vec;
struct dma_buf_attachment;
struct vfio_pci_eventfd {
@ -62,25 +61,25 @@ struct vfio_pci_device_ops {
int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
struct dma_buf_phys_vec *phys_vec,
struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges);
};
#if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF)
int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
int vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len);
int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
struct p2pdma_provider **provider,
unsigned int region_index,
struct dma_buf_phys_vec *phys_vec,
struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges);
#else
static inline int
vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
phys_addr_t len)
@ -89,7 +88,7 @@ vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
}
static inline int vfio_pci_core_get_dmabuf_phys(
struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider,
unsigned int region_index, struct dma_buf_phys_vec *phys_vec,
unsigned int region_index, struct phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges, size_t nr_ranges)
{
return -EOPNOTSUPP;
@ -236,6 +235,6 @@ static inline bool is_aligned_for_order(struct vm_area_struct *vma,
}
int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
struct dma_buf_phys_vec *phys);
struct phys_vec *phys);
#endif /* VFIO_PCI_CORE_H */