block-6.19-20260205

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmmFEd8QHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpvaYD/4jQ5jb3h4ytaf5P36+5jxW9BL/JJI6n87J
 /KU+a7x8AzvgyJu6woKy3LlBSrOLLgootKz7bjwKRvyxNYtYngdmCIHQPXYnABhT
 rJEQpiYBPjMVEllhlEECknbrl8u5NwuUpbG/LGf8NR8SSqMBGJdjwpvcF0bd7V3V
 BpS4bEla3tkEiVZQLYNxyFLNleBbRW+rZB8jaUvrDuILZe2W22dW5cDXLx/jo0JI
 +RQch0fXa26dNmIJMWpmPq+PTwFWtxoZUdPYxsNN2UAcR3W0fLOeWioSRQJqunwo
 rGemiqL0UiC20mxOXWhJUENm9GZtJIJuOVvQd4gMZVwdS9gVMmmPck90G5XNOTH4
 BT1qQY+OSCd4xDNo/MozC6qSC/01mR525T278Y0cpwUvZDGK1Eb5dKyG/NncSASL
 zlKwBfC86M9J+nrDUSxBMXxYEfu6LnH4yiJuEWMmLxNBA98P7rsOUutPhgSfEKfy
 jJPuZNx4Mnmh5tu4c2C+IHUxpd0l5K3XZ4i1m/WF2JcMkWDZcLl9oHlbZY0f3GMb
 7Lhc9xdHr/I491dzQj2mK5ix3PP6eCEhUnbND8gzL2WGy1DULfhaFdYRWO9EAlQu
 z7TxrKhvThBgA1P38GL6ALFLhkjkKjE4kV17OGjRPinx4HLmCAPtWEjr3A4wFWAH
 cXhh1gWzZQ==
 =0oAC
 -----END PGP SIGNATURE-----

Merge tag 'block-6.19-20260205' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

 - Revert of a change for loop, which caused regressions for some users
   (Actually revert of two commits, where one is just an existing fix
   for the offending commit)

 - NVMe pull via Keith:
      - Fix NULL pointer access setting up dma mappings
      - Fix invalid memory access from malformed TCP PDU

* tag 'block-6.19-20260205' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
  loop: revert exclusive opener loop status change
  nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec
  nvme-pci: handle changing device dma map requirements
This commit is contained in:
Linus Torvalds 2026-02-05 15:00:53 -08:00
commit 06bc4e2631
3 changed files with 59 additions and 48 deletions

View file

@ -1225,28 +1225,16 @@ static int loop_clr_fd(struct loop_device *lo)
}
static int
loop_set_status(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev, const struct loop_info64 *info)
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
bool partscan = false;
bool size_changed = false;
unsigned int memflags;
/*
* If we don't hold exclusive handle for the device, upgrade to it
* here to avoid changing device under exclusive owner.
*/
if (!(mode & BLK_OPEN_EXCL)) {
err = bd_prepare_to_claim(bdev, loop_set_status, NULL);
if (err)
goto out_reread_partitions;
}
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
goto out_abort_claiming;
return err;
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
@ -1285,10 +1273,6 @@ out_unfreeze:
}
out_unlock:
mutex_unlock(&lo->lo_mutex);
out_abort_claiming:
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, loop_set_status);
out_reread_partitions:
if (partscan)
loop_reread_partitions(lo);
@ -1368,9 +1352,7 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
}
static int
loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_info __user *arg)
loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
{
struct loop_info info;
struct loop_info64 info64;
@ -1378,19 +1360,17 @@ loop_set_status_old(struct loop_device *lo, blk_mode_t mode,
if (copy_from_user(&info, arg, sizeof (struct loop_info)))
return -EFAULT;
loop_info64_from_old(&info, &info64);
return loop_set_status(lo, mode, bdev, &info64);
return loop_set_status(lo, &info64);
}
static int
loop_set_status64(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct loop_info64 __user *arg)
loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
{
struct loop_info64 info64;
if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
return -EFAULT;
return loop_set_status(lo, mode, bdev, &info64);
return loop_set_status(lo, &info64);
}
static int
@ -1569,14 +1549,14 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
case LOOP_SET_STATUS:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status_old(lo, mode, bdev, argp);
err = loop_set_status_old(lo, argp);
break;
case LOOP_GET_STATUS:
return loop_get_status_old(lo, argp);
case LOOP_SET_STATUS64:
err = -EPERM;
if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_status64(lo, mode, bdev, argp);
err = loop_set_status64(lo, argp);
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
@ -1670,9 +1650,8 @@ loop_info64_to_compat(const struct loop_info64 *info64,
}
static int
loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
struct block_device *bdev,
const struct compat_loop_info __user *arg)
loop_set_status_compat(struct loop_device *lo,
const struct compat_loop_info __user *arg)
{
struct loop_info64 info64;
int ret;
@ -1680,7 +1659,7 @@ loop_set_status_compat(struct loop_device *lo, blk_mode_t mode,
ret = loop_info64_from_compat(arg, &info64);
if (ret < 0)
return ret;
return loop_set_status(lo, mode, bdev, &info64);
return loop_set_status(lo, &info64);
}
static int
@ -1706,7 +1685,7 @@ static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
err = loop_set_status_compat(lo, mode, bdev,
err = loop_set_status_compat(lo,
(const struct compat_loop_info __user *)arg);
break;
case LOOP_GET_STATUS:

View file

@ -816,6 +816,32 @@ static void nvme_unmap_data(struct request *req)
nvme_free_descriptors(req);
}
static bool nvme_pci_prp_save_mapping(struct request *req,
struct device *dma_dev,
struct blk_dma_iter *iter)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
if (dma_use_iova(&iod->dma_state) || !dma_need_unmap(dma_dev))
return true;
if (!iod->nr_dma_vecs) {
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
GFP_ATOMIC);
if (!iod->dma_vecs) {
iter->status = BLK_STS_RESOURCE;
return false;
}
}
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
iod->nr_dma_vecs++;
return true;
}
static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
struct blk_dma_iter *iter)
{
@ -825,12 +851,7 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
return true;
if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
return false;
if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
iod->nr_dma_vecs++;
}
return true;
return nvme_pci_prp_save_mapping(req, dma_dev, iter);
}
static blk_status_t nvme_pci_setup_data_prp(struct request *req,
@ -843,15 +864,8 @@ static blk_status_t nvme_pci_setup_data_prp(struct request *req,
unsigned int prp_len, i;
__le64 *prp_list;
if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) {
iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
GFP_ATOMIC);
if (!iod->dma_vecs)
return BLK_STS_RESOURCE;
iod->dma_vecs[0].addr = iter->addr;
iod->dma_vecs[0].len = iter->len;
iod->nr_dma_vecs = 1;
}
if (!nvme_pci_prp_save_mapping(req, nvmeq->dev->dev, iter))
return iter->status;
/*
* PRP1 always points to the start of the DMA transfers.
@ -1219,6 +1233,7 @@ static blk_status_t nvme_prep_rq(struct request *req)
iod->nr_descriptors = 0;
iod->total_len = 0;
iod->meta_total_len = 0;
iod->nr_dma_vecs = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)

View file

@ -349,11 +349,14 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
cmd->req.sg = NULL;
}
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue);
static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
struct bio_vec *iov = cmd->iov;
struct scatterlist *sg;
u32 length, offset, sg_offset;
unsigned int sg_remaining;
int nr_pages;
length = cmd->pdu_len;
@ -361,9 +364,22 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
offset = cmd->rbytes_done;
cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) {
nvmet_tcp_fatal_error(cmd->queue);
return;
}
sg = &cmd->req.sg[cmd->sg_idx];
sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
while (length) {
if (!sg_remaining) {
nvmet_tcp_fatal_error(cmd->queue);
return;
}
if (!sg->length || sg->length <= sg_offset) {
nvmet_tcp_fatal_error(cmd->queue);
return;
}
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
bvec_set_page(iov, sg_page(sg), iov_len,
@ -371,6 +387,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length -= iov_len;
sg = sg_next(sg);
sg_remaining--;
iov++;
sg_offset = 0;
}