mirror of
https://github.com/torvalds/linux.git
synced 2026-03-13 23:46:14 +01:00
virtio_ring: switch to use vring_virtqueue for virtqueue_add variants
Those variants are used internally so let's switch to use vring_virtqueue as parameter to be consistent with other internal virtqueue helpers. Acked-by: Eugenio Pérez <eperezma@redhat.com> Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Message-Id: <20251230064649.55597-7-jasowang@redhat.com>
This commit is contained in:
parent
8b8590b708
commit
4a0fa90b10
1 changed files with 19 additions and 20 deletions
|
|
@ -476,7 +476,7 @@ out:
|
|||
return extra->next;
|
||||
}
|
||||
|
||||
static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
|
||||
static struct vring_desc *alloc_indirect_split(struct vring_virtqueue *vq,
|
||||
unsigned int total_sg,
|
||||
gfp_t gfp)
|
||||
{
|
||||
|
|
@ -505,7 +505,7 @@ static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
|
|||
return desc;
|
||||
}
|
||||
|
||||
static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
|
||||
static inline unsigned int virtqueue_add_desc_split(struct vring_virtqueue *vq,
|
||||
struct vring_desc *desc,
|
||||
struct vring_desc_extra *extra,
|
||||
unsigned int i,
|
||||
|
|
@ -513,11 +513,12 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
|
|||
unsigned int len,
|
||||
u16 flags, bool premapped)
|
||||
{
|
||||
struct virtio_device *vdev = vq->vq.vdev;
|
||||
u16 next;
|
||||
|
||||
desc[i].flags = cpu_to_virtio16(vq->vdev, flags);
|
||||
desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
|
||||
desc[i].len = cpu_to_virtio32(vq->vdev, len);
|
||||
desc[i].flags = cpu_to_virtio16(vdev, flags);
|
||||
desc[i].addr = cpu_to_virtio64(vdev, addr);
|
||||
desc[i].len = cpu_to_virtio32(vdev, len);
|
||||
|
||||
extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
|
||||
extra[i].len = len;
|
||||
|
|
@ -525,12 +526,12 @@ static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq,
|
|||
|
||||
next = extra[i].next;
|
||||
|
||||
desc[i].next = cpu_to_virtio16(vq->vdev, next);
|
||||
desc[i].next = cpu_to_virtio16(vdev, next);
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
static inline int virtqueue_add_split(struct vring_virtqueue *vq,
|
||||
struct scatterlist *sgs[],
|
||||
unsigned int total_sg,
|
||||
unsigned int out_sgs,
|
||||
|
|
@ -540,7 +541,6 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||
bool premapped,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
struct vring_desc_extra *extra;
|
||||
struct scatterlist *sg;
|
||||
struct vring_desc *desc;
|
||||
|
|
@ -565,7 +565,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||
head = vq->free_head;
|
||||
|
||||
if (virtqueue_use_indirect(vq, total_sg))
|
||||
desc = alloc_indirect_split(_vq, total_sg, gfp);
|
||||
desc = alloc_indirect_split(vq, total_sg, gfp);
|
||||
else {
|
||||
desc = NULL;
|
||||
WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
|
||||
|
|
@ -612,7 +612,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||
/* Note that we trust indirect descriptor
|
||||
* table since it use stream DMA mapping.
|
||||
*/
|
||||
i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
|
||||
i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
|
||||
VRING_DESC_F_NEXT,
|
||||
premapped);
|
||||
}
|
||||
|
|
@ -629,14 +629,14 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||
/* Note that we trust indirect descriptor
|
||||
* table since it use stream DMA mapping.
|
||||
*/
|
||||
i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
|
||||
i = virtqueue_add_desc_split(vq, desc, extra, i, addr, len,
|
||||
VRING_DESC_F_NEXT |
|
||||
VRING_DESC_F_WRITE,
|
||||
premapped);
|
||||
}
|
||||
}
|
||||
/* Last one doesn't continue. */
|
||||
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
|
||||
desc[prev].flags &= cpu_to_virtio16(vq->vq.vdev, ~VRING_DESC_F_NEXT);
|
||||
if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
|
||||
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
|
||||
~VRING_DESC_F_NEXT;
|
||||
|
|
@ -649,7 +649,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||
if (vring_mapping_error(vq, addr))
|
||||
goto unmap_release;
|
||||
|
||||
virtqueue_add_desc_split(_vq, vq->split.vring.desc,
|
||||
virtqueue_add_desc_split(vq, vq->split.vring.desc,
|
||||
vq->split.desc_extra,
|
||||
head, addr,
|
||||
total_sg * sizeof(struct vring_desc),
|
||||
|
|
@ -675,13 +675,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||
/* Put entry in available array (but don't update avail->idx until they
|
||||
* do sync). */
|
||||
avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
|
||||
vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
|
||||
vq->split.vring.avail->ring[avail] = cpu_to_virtio16(vq->vq.vdev, head);
|
||||
|
||||
/* Descriptors and available array need to be set before we expose the
|
||||
* new available array entries. */
|
||||
virtio_wmb(vq->weak_barriers);
|
||||
vq->split.avail_idx_shadow++;
|
||||
vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
|
||||
vq->split.vring.avail->idx = cpu_to_virtio16(vq->vq.vdev,
|
||||
vq->split.avail_idx_shadow);
|
||||
vq->num_added++;
|
||||
|
||||
|
|
@ -691,7 +691,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
|||
/* This is very unlikely, but theoretically possible. Kick
|
||||
* just in case. */
|
||||
if (unlikely(vq->num_added == (1 << 16) - 1))
|
||||
virtqueue_kick(_vq);
|
||||
virtqueue_kick(&vq->vq);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
@ -1439,7 +1439,7 @@ unmap_release:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
||||
static inline int virtqueue_add_packed(struct vring_virtqueue *vq,
|
||||
struct scatterlist *sgs[],
|
||||
unsigned int total_sg,
|
||||
unsigned int out_sgs,
|
||||
|
|
@ -1449,7 +1449,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
|
|||
bool premapped,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
struct vring_packed_desc *desc;
|
||||
struct scatterlist *sg;
|
||||
unsigned int i, n, c, descs_used, err_idx, len;
|
||||
|
|
@ -2261,9 +2260,9 @@ static inline int virtqueue_add(struct virtqueue *_vq,
|
|||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
|
||||
return vq->packed_ring ? virtqueue_add_packed(vq, sgs, total_sg,
|
||||
out_sgs, in_sgs, data, ctx, premapped, gfp) :
|
||||
virtqueue_add_split(_vq, sgs, total_sg,
|
||||
virtqueue_add_split(vq, sgs, total_sg,
|
||||
out_sgs, in_sgs, data, ctx, premapped, gfp);
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue