mirror of
https://github.com/torvalds/linux.git
synced 2026-03-13 23:46:14 +01:00
ublk: handle UBLK_U_IO_COMMIT_IO_CMDS
Handle UBLK_U_IO_COMMIT_IO_CMDS by walking the uring_cmd fixed buffer: - read each element into one temp buffer in batch style - parse and apply each element for committing io result Reviewed-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b256795b36
commit
1e500e106d
2 changed files with 109 additions and 2 deletions
|
|
@ -2267,7 +2267,7 @@ static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ublk_handle_auto_buf_reg(struct ublk_io *io,
|
||||
static void ublk_clear_auto_buf_reg(struct ublk_io *io,
|
||||
struct io_uring_cmd *cmd,
|
||||
u16 *buf_idx)
|
||||
{
|
||||
|
|
@ -2287,7 +2287,13 @@ static int ublk_handle_auto_buf_reg(struct ublk_io *io,
|
|||
if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
|
||||
*buf_idx = io->buf.auto_reg.index;
|
||||
}
|
||||
}
|
||||
|
||||
static int ublk_handle_auto_buf_reg(struct ublk_io *io,
|
||||
struct io_uring_cmd *cmd,
|
||||
u16 *buf_idx)
|
||||
{
|
||||
ublk_clear_auto_buf_reg(io, cmd, buf_idx);
|
||||
return ublk_set_auto_buf_reg(io, cmd);
|
||||
}
|
||||
|
||||
|
|
@ -2720,6 +2726,17 @@ static inline __u64 ublk_batch_buf_addr(const struct ublk_batch_io *uc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline __u64 ublk_batch_zone_lba(const struct ublk_batch_io *uc,
|
||||
const struct ublk_elem_header *elem)
|
||||
{
|
||||
const void *buf = elem;
|
||||
|
||||
if (uc->flags & UBLK_BATCH_F_HAS_ZONE_LBA)
|
||||
return *(const __u64 *)(buf + sizeof(*elem) +
|
||||
8 * !!(uc->flags & UBLK_BATCH_F_HAS_BUF_ADDR));
|
||||
return -1;
|
||||
}
|
||||
|
||||
static struct ublk_auto_buf_reg
|
||||
ublk_batch_auto_buf_reg(const struct ublk_batch_io *uc,
|
||||
const struct ublk_elem_header *elem)
|
||||
|
|
@ -2875,6 +2892,84 @@ static int ublk_handle_batch_prep_cmd(const struct ublk_batch_io_data *data)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ublk_batch_commit_io_check(const struct ublk_queue *ubq,
|
||||
struct ublk_io *io,
|
||||
union ublk_io_buf *buf)
|
||||
{
|
||||
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
|
||||
return -EBUSY;
|
||||
|
||||
/* BATCH_IO doesn't support UBLK_F_NEED_GET_DATA */
|
||||
if (ublk_need_map_io(ubq) && !buf->addr)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ublk_batch_commit_io(struct ublk_queue *ubq,
|
||||
const struct ublk_batch_io_data *data,
|
||||
const struct ublk_elem_header *elem)
|
||||
{
|
||||
struct ublk_io *io = &ubq->ios[elem->tag];
|
||||
const struct ublk_batch_io *uc = &data->header;
|
||||
u16 buf_idx = UBLK_INVALID_BUF_IDX;
|
||||
union ublk_io_buf buf = { 0 };
|
||||
struct request *req = NULL;
|
||||
bool auto_reg = false;
|
||||
bool compl = false;
|
||||
int ret;
|
||||
|
||||
if (ublk_dev_support_auto_buf_reg(data->ub)) {
|
||||
buf.auto_reg = ublk_batch_auto_buf_reg(uc, elem);
|
||||
auto_reg = true;
|
||||
} else if (ublk_dev_need_map_io(data->ub))
|
||||
buf.addr = ublk_batch_buf_addr(uc, elem);
|
||||
|
||||
ublk_io_lock(io);
|
||||
ret = ublk_batch_commit_io_check(ubq, io, &buf);
|
||||
if (!ret) {
|
||||
io->res = elem->result;
|
||||
io->buf = buf;
|
||||
req = ublk_fill_io_cmd(io, data->cmd);
|
||||
|
||||
if (auto_reg)
|
||||
ublk_clear_auto_buf_reg(io, data->cmd, &buf_idx);
|
||||
compl = ublk_need_complete_req(data->ub, io);
|
||||
}
|
||||
ublk_io_unlock(io);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
pr_warn_ratelimited("%s: dev %u queue %u io %u: commit failure %d\n",
|
||||
__func__, data->ub->dev_info.dev_id, ubq->q_id,
|
||||
elem->tag, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* can't touch 'ublk_io' any more */
|
||||
if (buf_idx != UBLK_INVALID_BUF_IDX)
|
||||
io_buffer_unregister_bvec(data->cmd, buf_idx, data->issue_flags);
|
||||
if (req_op(req) == REQ_OP_ZONE_APPEND)
|
||||
req->__sector = ublk_batch_zone_lba(uc, elem);
|
||||
if (compl)
|
||||
__ublk_complete_rq(req, io, ublk_dev_need_map_io(data->ub));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ublk_handle_batch_commit_cmd(const struct ublk_batch_io_data *data)
|
||||
{
|
||||
const struct ublk_batch_io *uc = &data->header;
|
||||
struct io_uring_cmd *cmd = data->cmd;
|
||||
struct ublk_batch_io_iter iter = {
|
||||
.uaddr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)),
|
||||
.total = uc->nr_elem * uc->elem_bytes,
|
||||
.elem_bytes = uc->elem_bytes,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = ublk_walk_cmd_buf(&iter, data, ublk_batch_commit_io);
|
||||
|
||||
return iter.done == 0 ? ret : iter.done;
|
||||
}
|
||||
|
||||
static int ublk_check_batch_cmd_flags(const struct ublk_batch_io *uc)
|
||||
{
|
||||
unsigned elem_bytes = sizeof(struct ublk_elem_header);
|
||||
|
|
@ -2950,7 +3045,7 @@ static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd,
|
|||
ret = ublk_check_batch_cmd(&data);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = -EOPNOTSUPP;
|
||||
ret = ublk_handle_batch_commit_cmd(&data);
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
|
|
@ -3659,6 +3754,10 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
|
|||
UBLK_F_AUTO_BUF_REG))
|
||||
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
|
||||
|
||||
/* UBLK_F_BATCH_IO doesn't support GET_DATA */
|
||||
if (ublk_dev_support_batch_io(ub))
|
||||
ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
|
||||
|
||||
/*
|
||||
* Zoned storage support requires reuse `ublksrv_io_cmd->addr` for
|
||||
* returning write_append_lba, which is only allowed in case of
|
||||
|
|
|
|||
|
|
@ -110,6 +110,14 @@
|
|||
*/
|
||||
#define UBLK_U_IO_PREP_IO_CMDS \
|
||||
_IOWR('u', 0x25, struct ublk_batch_io)
|
||||
/*
|
||||
* If failure code is returned, nothing in the command buffer is handled.
|
||||
* Otherwise, the returned value means how many bytes in command buffer
|
||||
* are handled actually, then number of handled IOs can be calculated with
|
||||
* `elem_bytes` for each IO. IOs in the remained bytes are not committed,
|
||||
* userspace has to check return value for dealing with partial committing
|
||||
* correctly.
|
||||
*/
|
||||
#define UBLK_U_IO_COMMIT_IO_CMDS \
|
||||
_IOWR('u', 0x26, struct ublk_batch_io)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue