io_uring-7.0-20260216

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmmTrLsQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgplYaEACgWcIcGa9/nWq1x02uN7Zi9vHWpDJqgEhq
 JCLpMLdn3ZG6Ksn8RAfI4dKAKZKS7MuXDrpoXgchQ8LQjpssN6kTj2TlKdZR8Je3
 NNWfkPnLUp/t3MN/V0vZiX5NQaJVCNblbcnauDzlN+6WkWku5p1wkwYwy3I7NPJ4
 P7HHqFJAOwhyBpk/Nr3sQEDnKIn/vOiedyOuO+3HB6rlmnSmjY1cQ+FUSaOI+rNQ
 D3i9TMEojHYhMDt76ql2YdKcksBu6HaZQ6JNpIiN9iqNB+96e+X2bcLPyfwkuHwC
 N7G1IMfyTsuV7JWktcZP+AT8WK4Qf45fuUN/1EkKEL9MWF2TUMob8toQ0GXRCb22
 NqSC1JyeVJ/sSnKzb2Z4wY4+BgRMo83ME3l6hi6QckWXfFyTAQe70JyUnu4w11qn
 62astpZXVRSfvbH3vT76BWTa+5HUZExQgLRgor19BTeVY4ihh+muaoMH6An6jf6i
 ZnqUSsn7nFB20MEudVqhgiKTvqVic2Atsl6JD4wjwWs5nEP9wzmmCSEGd3Nkrrji
 HPWN4zu+1qczDZxmCJAj3w29cRO/vZCNpFARlSCMcXNOQsZaFWVaaQlzt26ZMhTi
 AyMav25X8fNCERvGP++uo7cKzDGCuhhIR6y5GlXZ6yTHsGTcSgooW/NNz6Ik2jUW
 Bwa5GBK36A==
 =TgoD
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-7.0-20260216' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull more io_uring updates from Jens Axboe:
 "This is a mix of cleanups and fixes. No major fixes in here, just a
  bunch of little fixes. Some of them marked for stable as it fixes
  behavioral issues

   - Fix an issue with SOCKET_URING_OP_SETSOCKOPT for netlink sockets,
     due to a too restrictive check on it having an ioctl handler

   - Remove a redundant SQPOLL check in ring creation

   - Kill dead accounting for zero-copy send, which doesn't use ->buf
     or ->len post the initial setup

   - Fix missing clamp of the allocation hint, which could cause
     allocations to fall outside of the range the application asked
     for. Still within the allowed limits.

   - Fix for IORING_OP_PIPE's handling of direct descriptors

   - Tweak to the API for the newly added BPF filters, making them
     more future proof in terms of how applications deal with them

   - A few fixes for zcrx, fixing a few error handling conditions

   - Fix for zcrx request flag checking

   - Add support for querying the zcrx page size

   - Improve the NO_SQARRAY static branch inc/dec, avoiding busy
     conditions causing too much traffic

   - Various little cleanups"

* tag 'io_uring-7.0-20260216' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
  io_uring/bpf_filter: pass in expected filter payload size
  io_uring/bpf_filter: move filter size and populate helper into struct
  io_uring/cancel: de-unionize file and user_data in struct io_cancel_data
  io_uring/rsrc: improve regbuf iov validation
  io_uring: remove unneeded io_send_zc accounting
  io_uring/cmd_net: fix too strict requirement on ioctl
  io_uring: delay sqarray static branch disablement
  io_uring/query: add query.h copyright notice
  io_uring/query: return support for custom rx page size
  io_uring/zcrx: check unsupported flags on import
  io_uring/zcrx: fix post open error handling
  io_uring/zcrx: fix sgtable leak on mapping failures
  io_uring: use the right type for creds iteration
  io_uring/openclose: fix io_pipe_fixed() slot tracking for specific slots
  io_uring/filetable: clamp alloc_hint to the configured alloc range
  io_uring/rsrc: replace reg buffer bit field with flags
  io_uring/zcrx: improve types for size calculation
  io_uring/tctx: avoid modifying loop variable in io_ring_add_registered_file
  io_uring: simplify IORING_SETUP_DEFER_TASKRUN && !SQPOLL check
This commit is contained in:
Linus Torvalds 2026-02-17 08:33:49 -08:00
commit 7b751b01ad
18 changed files with 151 additions and 92 deletions

View file

@ -26,6 +26,8 @@ static const struct io_bpf_filter dummy_filter;
static void io_uring_populate_bpf_ctx(struct io_uring_bpf_ctx *bctx,
struct io_kiocb *req)
{
const struct io_issue_def *def = &io_issue_defs[req->opcode];
bctx->opcode = req->opcode;
bctx->sqe_flags = (__force int) req->flags & SQE_VALID_FLAGS;
bctx->user_data = req->cqe.user_data;
@ -34,19 +36,12 @@ static void io_uring_populate_bpf_ctx(struct io_uring_bpf_ctx *bctx,
sizeof(*bctx) - offsetof(struct io_uring_bpf_ctx, pdu_size));
/*
* Opcodes can provide a handler fo populating more data into bctx,
* Opcodes can provide a handler for populating more data into bctx,
* for filters to use.
*/
switch (req->opcode) {
case IORING_OP_SOCKET:
bctx->pdu_size = sizeof(bctx->socket);
io_socket_bpf_populate(bctx, req);
break;
case IORING_OP_OPENAT:
case IORING_OP_OPENAT2:
bctx->pdu_size = sizeof(bctx->open);
io_openat_bpf_populate(bctx, req);
break;
if (def->filter_pdu_size) {
bctx->pdu_size = def->filter_pdu_size;
def->filter_populate(bctx, req);
}
}
@ -313,7 +308,54 @@ err:
return ERR_PTR(-EBUSY);
}
#define IO_URING_BPF_FILTER_FLAGS IO_URING_BPF_FILTER_DENY_REST
#define IO_URING_BPF_FILTER_FLAGS (IO_URING_BPF_FILTER_DENY_REST | \
IO_URING_BPF_FILTER_SZ_STRICT)
static int io_bpf_filter_import(struct io_uring_bpf *reg,
struct io_uring_bpf __user *arg)
{
const struct io_issue_def *def;
int ret;
if (copy_from_user(reg, arg, sizeof(*reg)))
return -EFAULT;
if (reg->cmd_type != IO_URING_BPF_CMD_FILTER)
return -EINVAL;
if (reg->cmd_flags || reg->resv)
return -EINVAL;
if (reg->filter.opcode >= IORING_OP_LAST)
return -EINVAL;
if (reg->filter.flags & ~IO_URING_BPF_FILTER_FLAGS)
return -EINVAL;
if (!mem_is_zero(reg->filter.resv, sizeof(reg->filter.resv)))
return -EINVAL;
if (!mem_is_zero(reg->filter.resv2, sizeof(reg->filter.resv2)))
return -EINVAL;
if (!reg->filter.filter_len || reg->filter.filter_len > BPF_MAXINSNS)
return -EINVAL;
/* Verify filter size */
def = &io_issue_defs[array_index_nospec(reg->filter.opcode, IORING_OP_LAST)];
/* same size, always ok */
ret = 0;
if (reg->filter.pdu_size == def->filter_pdu_size)
;
/* size differs, fail in strict mode */
else if (reg->filter.flags & IO_URING_BPF_FILTER_SZ_STRICT)
ret = -EMSGSIZE;
/* userspace filter is bigger, always disallow */
else if (reg->filter.pdu_size > def->filter_pdu_size)
ret = -EMSGSIZE;
/* copy back kernel filter size */
reg->filter.pdu_size = def->filter_pdu_size;
if (copy_to_user(&arg->filter, &reg->filter, sizeof(reg->filter)))
return -EFAULT;
return ret;
}
int io_register_bpf_filter(struct io_restriction *res,
struct io_uring_bpf __user *arg)
@ -325,23 +367,9 @@ int io_register_bpf_filter(struct io_restriction *res,
struct sock_fprog fprog;
int ret;
if (copy_from_user(&reg, arg, sizeof(reg)))
return -EFAULT;
if (reg.cmd_type != IO_URING_BPF_CMD_FILTER)
return -EINVAL;
if (reg.cmd_flags || reg.resv)
return -EINVAL;
if (reg.filter.opcode >= IORING_OP_LAST)
return -EINVAL;
if (reg.filter.flags & ~IO_URING_BPF_FILTER_FLAGS)
return -EINVAL;
if (reg.filter.resv)
return -EINVAL;
if (!mem_is_zero(reg.filter.resv2, sizeof(reg.filter.resv2)))
return -EINVAL;
if (!reg.filter.filter_len || reg.filter.filter_len > BPF_MAXINSNS)
return -EINVAL;
ret = io_bpf_filter_import(&reg, arg);
if (ret)
return ret;
fprog.len = reg.filter.filter_len;
fprog.filter = u64_to_user_ptr(reg.filter.filter_ptr);

View file

@ -6,10 +6,8 @@
struct io_cancel_data {
struct io_ring_ctx *ctx;
union {
u64 data;
struct file *file;
};
u64 data;
struct file *file;
u8 opcode;
u32 flags;
int seq;

View file

@ -160,16 +160,19 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
struct proto *prot = READ_ONCE(sk->sk_prot);
int ret, arg = 0;
if (!prot || !prot->ioctl)
return -EOPNOTSUPP;
switch (cmd->cmd_op) {
case SOCKET_URING_OP_SIOCINQ:
if (!prot || !prot->ioctl)
return -EOPNOTSUPP;
ret = prot->ioctl(sk, SIOCINQ, &arg);
if (ret)
return ret;
return arg;
case SOCKET_URING_OP_SIOCOUTQ:
if (!prot || !prot->ioctl)
return -EOPNOTSUPP;
ret = prot->ioctl(sk, SIOCOUTQ, &arg);
if (ret)
return ret;

View file

@ -22,6 +22,10 @@ static int io_file_bitmap_get(struct io_ring_ctx *ctx)
if (!table->bitmap)
return -ENFILE;
if (table->alloc_hint < ctx->file_alloc_start ||
table->alloc_hint >= ctx->file_alloc_end)
table->alloc_hint = ctx->file_alloc_start;
do {
ret = find_next_zero_bit(table->bitmap, nr, table->alloc_hint);
if (ret != nr)

View file

@ -119,7 +119,7 @@
static void io_queue_sqe(struct io_kiocb *req, unsigned int extra_flags);
static void __io_req_caches_free(struct io_ring_ctx *ctx);
static __read_mostly DEFINE_STATIC_KEY_FALSE(io_key_has_sqarray);
static __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(io_key_has_sqarray, HZ);
struct kmem_cache *req_cachep;
static struct workqueue_struct *iou_wq __ro_after_init;
@ -1978,7 +1978,7 @@ static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
unsigned mask = ctx->sq_entries - 1;
unsigned head = ctx->cached_sq_head++ & mask;
if (static_branch_unlikely(&io_key_has_sqarray) &&
if (static_branch_unlikely(&io_key_has_sqarray.key) &&
(!(ctx->flags & IORING_SETUP_NO_SQARRAY))) {
head = READ_ONCE(ctx->sq_array[head]);
if (unlikely(head >= ctx->sq_entries)) {
@ -2173,7 +2173,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_rings_free(ctx);
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
static_branch_dec(&io_key_has_sqarray);
static_branch_slow_dec_deferred(&io_key_has_sqarray);
percpu_ref_exit(&ctx->refs);
free_uid(ctx->user);
@ -2398,7 +2398,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
unsigned long index;
struct creds *creds;
struct cred *creds;
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
@ -2946,11 +2946,10 @@ static __cold int io_uring_create(struct io_ctx_config *config)
ctx->clock_offset = 0;
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
static_branch_inc(&io_key_has_sqarray);
static_branch_deferred_inc(&io_key_has_sqarray);
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
!(ctx->flags & IORING_SETUP_IOPOLL) &&
!(ctx->flags & IORING_SETUP_SQPOLL))
!(ctx->flags & IORING_SETUP_IOPOLL))
ctx->task_complete = true;
if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))

View file

@ -1493,8 +1493,6 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
return -EAGAIN;
if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
zc->len -= ret;
zc->buf += ret;
zc->done_io += ret;
return -EAGAIN;
}

View file

@ -221,8 +221,10 @@ const struct io_issue_def io_issue_defs[] = {
.issue = io_fallocate,
},
[IORING_OP_OPENAT] = {
.filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, open),
.prep = io_openat_prep,
.issue = io_openat,
.filter_populate = io_openat_bpf_populate,
},
[IORING_OP_CLOSE] = {
.prep = io_close_prep,
@ -309,8 +311,10 @@ const struct io_issue_def io_issue_defs[] = {
#endif
},
[IORING_OP_OPENAT2] = {
.filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, open),
.prep = io_openat2_prep,
.issue = io_openat2,
.filter_populate = io_openat_bpf_populate,
},
[IORING_OP_EPOLL_CTL] = {
.unbound_nonreg_file = 1,
@ -406,8 +410,10 @@ const struct io_issue_def io_issue_defs[] = {
[IORING_OP_SOCKET] = {
.audit_skip = 1,
#if defined(CONFIG_NET)
.filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, socket),
.prep = io_socket_prep,
.issue = io_socket,
.filter_populate = io_socket_bpf_populate,
#else
.prep = io_eopnotsupp_prep,
#endif

View file

@ -2,6 +2,8 @@
#ifndef IOU_OP_DEF_H
#define IOU_OP_DEF_H
struct io_uring_bpf_ctx;
struct io_issue_def {
/* needs req->file assigned */
unsigned needs_file : 1;
@ -33,8 +35,12 @@ struct io_issue_def {
/* size of async data needed, if any */
unsigned short async_size;
/* bpf filter pdu size, if any */
unsigned short filter_pdu_size;
int (*issue)(struct io_kiocb *, unsigned int);
int (*prep)(struct io_kiocb *, const struct io_uring_sqe *);
void (*filter_populate)(struct io_uring_bpf_ctx *, struct io_kiocb *);
};
struct io_cold_def {

View file

@ -345,31 +345,34 @@ static int io_pipe_fixed(struct io_kiocb *req, struct file **files,
{
struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
struct io_ring_ctx *ctx = req->ctx;
bool alloc_slot;
int ret, fds[2] = { -1, -1 };
int slot = p->file_slot;
if (p->flags & O_CLOEXEC)
return -EINVAL;
alloc_slot = slot == IORING_FILE_INDEX_ALLOC;
io_ring_submit_lock(ctx, issue_flags);
ret = __io_fixed_fd_install(ctx, files[0], slot);
if (ret < 0)
goto err;
fds[0] = ret;
fds[0] = alloc_slot ? ret : slot - 1;
files[0] = NULL;
/*
* If a specific slot is given, next one will be used for
* the write side.
*/
if (slot != IORING_FILE_INDEX_ALLOC)
if (!alloc_slot)
slot++;
ret = __io_fixed_fd_install(ctx, files[1], slot);
if (ret < 0)
goto err;
fds[1] = ret;
fds[1] = alloc_slot ? ret : slot - 1;
files[1] = NULL;
io_ring_submit_unlock(ctx, issue_flags);

View file

@ -39,7 +39,7 @@ static ssize_t io_query_zcrx(union io_query_data *data)
e->nr_ctrl_opcodes = __ZCRX_CTRL_LAST;
e->rq_hdr_size = sizeof(struct io_uring);
e->rq_hdr_alignment = L1_CACHE_BYTES;
e->__resv1 = 0;
e->features = ZCRX_FEATURE_RX_PAGE_SIZE;
e->__resv2 = 0;
return sizeof(*e);
}

View file

@ -96,20 +96,6 @@ int io_validate_user_buf_range(u64 uaddr, u64 ulen)
return 0;
}
static int io_buffer_validate(struct iovec *iov)
{
/*
* Don't impose further limits on the size and buffer
* constraints here, we'll -EINVAL later when IO is
* submitted if they are wrong.
*/
if (!iov->iov_base)
return iov->iov_len ? -EFAULT : 0;
return io_validate_user_buf_range((unsigned long)iov->iov_base,
iov->iov_len);
}
static void io_release_ubuf(void *priv)
{
struct io_mapped_ubuf *imu = priv;
@ -319,9 +305,6 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
err = -EFAULT;
break;
}
err = io_buffer_validate(iov);
if (err)
break;
node = io_sqe_buffer_register(ctx, iov, &last_hpage);
if (IS_ERR(node)) {
err = PTR_ERR(node);
@ -790,8 +773,17 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
struct io_imu_folio_data data;
bool coalesced = false;
if (!iov->iov_base)
if (!iov->iov_base) {
if (iov->iov_len)
return ERR_PTR(-EFAULT);
/* remove the buffer without installing a new one */
return NULL;
}
ret = io_validate_user_buf_range((unsigned long)iov->iov_base,
iov->iov_len);
if (ret)
return ERR_PTR(ret);
node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
if (!node)
@ -828,7 +820,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
imu->folio_shift = PAGE_SHIFT;
imu->release = io_release_ubuf;
imu->priv = imu;
imu->is_kbuf = false;
imu->flags = 0;
imu->dir = IO_IMU_DEST | IO_IMU_SOURCE;
if (coalesced)
imu->folio_shift = data.folio_shift;
@ -897,9 +889,6 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
ret = PTR_ERR(iov);
break;
}
ret = io_buffer_validate(iov);
if (ret)
break;
if (ctx->compat)
arg += sizeof(struct compat_iovec);
else
@ -985,7 +974,7 @@ int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
refcount_set(&imu->refs, 1);
imu->release = release;
imu->priv = rq;
imu->is_kbuf = true;
imu->flags = IO_REGBUF_F_KBUF;
imu->dir = 1 << rq_data_dir(rq);
rq_for_each_bvec(bv, rq, rq_iter)
@ -1020,7 +1009,7 @@ int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
ret = -EINVAL;
goto unlock;
}
if (!node->buf->is_kbuf) {
if (!(node->buf->flags & IO_REGBUF_F_KBUF)) {
ret = -EBUSY;
goto unlock;
}
@ -1076,7 +1065,7 @@ static int io_import_fixed(int ddir, struct iov_iter *iter,
offset = buf_addr - imu->ubuf;
if (imu->is_kbuf)
if (imu->flags & IO_REGBUF_F_KBUF)
return io_import_kbuf(ddir, iter, imu, len, offset);
/*
@ -1496,7 +1485,7 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
iovec_off = vec->nr - nr_iovs;
iov = vec->iovec + iovec_off;
if (imu->is_kbuf) {
if (imu->flags & IO_REGBUF_F_KBUF) {
int ret = io_kern_bvec_size(iov, nr_iovs, imu, &nr_segs);
if (unlikely(ret))
@ -1534,7 +1523,7 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
req->flags |= REQ_F_NEED_CLEANUP;
}
if (imu->is_kbuf)
if (imu->flags & IO_REGBUF_F_KBUF)
return io_vec_fill_kern_bvec(ddir, iter, imu, iov, nr_iovs, vec);
return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec);

View file

@ -28,6 +28,10 @@ enum {
IO_IMU_SOURCE = 1 << ITER_SOURCE,
};
enum {
IO_REGBUF_F_KBUF = 1,
};
struct io_mapped_ubuf {
u64 ubuf;
unsigned int len;
@ -37,7 +41,7 @@ struct io_mapped_ubuf {
unsigned long acct_pages;
void (*release)(void *);
void *priv;
bool is_kbuf;
u8 flags;
u8 dir;
struct bio_vec bvec[] __counted_by(nr_bvecs);
};

View file

@ -702,7 +702,8 @@ static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
if ((kiocb->ki_flags & IOCB_NOWAIT) &&
!(kiocb->ki_filp->f_flags & O_NONBLOCK))
return -EAGAIN;
if ((req->flags & REQ_F_BUF_NODE) && req->buf_node->buf->is_kbuf)
if ((req->flags & REQ_F_BUF_NODE) &&
(req->buf_node->buf->flags & IO_REGBUF_F_KBUF))
return -EFAULT;
ppos = io_kiocb_ppos(kiocb);

View file

@ -240,14 +240,14 @@ void io_uring_unreg_ringfd(void)
int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
int start, int end)
{
int offset;
int offset, idx;
for (offset = start; offset < end; offset++) {
offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
if (tctx->registered_rings[offset])
idx = array_index_nospec(offset, IO_RINGFD_REG_MAX);
if (tctx->registered_rings[idx])
continue;
tctx->registered_rings[offset] = file;
return offset;
tctx->registered_rings[idx] = file;
return idx;
}
return -EBUSY;
}

View file

@ -205,7 +205,7 @@ static int io_import_umem(struct io_zcrx_ifq *ifq,
return PTR_ERR(pages);
ret = sg_alloc_table_from_pages(&mem->page_sg_table, pages, nr_pages,
0, nr_pages << PAGE_SHIFT,
0, (unsigned long)nr_pages << PAGE_SHIFT,
GFP_KERNEL_ACCOUNT);
if (ret) {
unpin_user_pages(pages, nr_pages);
@ -300,6 +300,9 @@ static int io_zcrx_map_area(struct io_zcrx_ifq *ifq, struct io_zcrx_area *area)
}
ret = io_populate_area_dma(ifq, area);
if (ret && !area->mem.is_dmabuf)
dma_unmap_sgtable(ifq->dev, &area->mem.page_sg_table,
DMA_FROM_DEVICE, IO_DMA_ATTR);
if (ret == 0)
area->is_mapped = true;
return ret;
@ -538,9 +541,6 @@ static void io_close_queue(struct io_zcrx_ifq *ifq)
.mp_priv = ifq,
};
if (ifq->if_rxq == -1)
return;
scoped_guard(mutex, &ifq->pp_lock) {
netdev = ifq->netdev;
netdev_tracker = ifq->netdev_tracker;
@ -548,7 +548,8 @@ static void io_close_queue(struct io_zcrx_ifq *ifq)
}
if (netdev) {
net_mp_close_rxq(netdev, ifq->if_rxq, &p);
if (ifq->if_rxq != -1)
net_mp_close_rxq(netdev, ifq->if_rxq, &p);
netdev_put(netdev, &netdev_tracker);
}
ifq->if_rxq = -1;
@ -702,6 +703,8 @@ static int import_zcrx(struct io_ring_ctx *ctx,
return -EINVAL;
if (reg->if_rxq || reg->rq_entries || reg->area_ptr || reg->region_ptr)
return -EINVAL;
if (reg->flags & ~ZCRX_REG_IMPORT)
return -EINVAL;
fd = reg->if_idx;
CLASS(fd, f)(fd);
@ -858,13 +861,12 @@ int io_register_zcrx_ifq(struct io_ring_ctx *ctx,
}
return 0;
netdev_put_unlock:
netdev_put(ifq->netdev, &ifq->netdev_tracker);
netdev_unlock(ifq->netdev);
err:
scoped_guard(mutex, &ctx->mmap_lock)
xa_erase(&ctx->zcrx_ctxs, id);
ifq_free:
io_zcrx_ifq_free(ifq);
zcrx_unregister(ifq);
return ret;
}