mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 04:04:43 +01:00
rnbd-clt: pass queue_limits to blk_mq_alloc_disk
Pass the limits rnbd-clt imposes directly to blk_mq_alloc_disk instead of setting them one at a time. While at it don't set an explicit number of discard segments, as 1 is the default (which most drivers rely on). Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Jack Wang <jinpu.wang@ionos.com> Link: https://lore.kernel.org/r/20240215070300.2200308-9-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
24f30b770c
commit
e6ed9892f1
1 changed files with 25 additions and 39 deletions
|
|
@ -1329,43 +1329,6 @@ static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static void setup_request_queue(struct rnbd_clt_dev *dev,
|
||||
struct rnbd_msg_open_rsp *rsp)
|
||||
{
|
||||
blk_queue_logical_block_size(dev->queue,
|
||||
le16_to_cpu(rsp->logical_block_size));
|
||||
blk_queue_physical_block_size(dev->queue,
|
||||
le16_to_cpu(rsp->physical_block_size));
|
||||
blk_queue_max_hw_sectors(dev->queue,
|
||||
dev->sess->max_io_size / SECTOR_SIZE);
|
||||
|
||||
/*
|
||||
* we don't support discards to "discontiguous" segments
|
||||
* in on request
|
||||
*/
|
||||
blk_queue_max_discard_segments(dev->queue, 1);
|
||||
|
||||
blk_queue_max_discard_sectors(dev->queue,
|
||||
le32_to_cpu(rsp->max_discard_sectors));
|
||||
dev->queue->limits.discard_granularity =
|
||||
le32_to_cpu(rsp->discard_granularity);
|
||||
dev->queue->limits.discard_alignment =
|
||||
le32_to_cpu(rsp->discard_alignment);
|
||||
if (le16_to_cpu(rsp->secure_discard))
|
||||
blk_queue_max_secure_erase_sectors(dev->queue,
|
||||
le32_to_cpu(rsp->max_discard_sectors));
|
||||
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
|
||||
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
|
||||
blk_queue_max_segments(dev->queue, dev->sess->max_segments);
|
||||
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
|
||||
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
|
||||
blk_queue_write_cache(dev->queue,
|
||||
!!(rsp->cache_policy & RNBD_WRITEBACK),
|
||||
!!(rsp->cache_policy & RNBD_FUA));
|
||||
blk_queue_max_write_zeroes_sectors(dev->queue,
|
||||
le32_to_cpu(rsp->max_write_zeroes_sectors));
|
||||
}
|
||||
|
||||
static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
|
||||
struct rnbd_msg_open_rsp *rsp, int idx)
|
||||
{
|
||||
|
|
@ -1403,18 +1366,41 @@ static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev,
|
|||
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev,
|
||||
struct rnbd_msg_open_rsp *rsp)
|
||||
{
|
||||
struct queue_limits lim = {
|
||||
.logical_block_size = le16_to_cpu(rsp->logical_block_size),
|
||||
.physical_block_size = le16_to_cpu(rsp->physical_block_size),
|
||||
.io_opt = dev->sess->max_io_size,
|
||||
.max_hw_sectors = dev->sess->max_io_size / SECTOR_SIZE,
|
||||
.max_hw_discard_sectors = le32_to_cpu(rsp->max_discard_sectors),
|
||||
.discard_granularity = le32_to_cpu(rsp->discard_granularity),
|
||||
.discard_alignment = le32_to_cpu(rsp->discard_alignment),
|
||||
.max_segments = dev->sess->max_segments,
|
||||
.virt_boundary_mask = SZ_4K - 1,
|
||||
.max_write_zeroes_sectors =
|
||||
le32_to_cpu(rsp->max_write_zeroes_sectors),
|
||||
};
|
||||
int idx = dev->clt_device_id;
|
||||
|
||||
dev->size = le64_to_cpu(rsp->nsectors) *
|
||||
le16_to_cpu(rsp->logical_block_size);
|
||||
|
||||
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, NULL, dev);
|
||||
if (rsp->secure_discard) {
|
||||
lim.max_secure_erase_sectors =
|
||||
le32_to_cpu(rsp->max_discard_sectors);
|
||||
}
|
||||
|
||||
dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, &lim, dev);
|
||||
if (IS_ERR(dev->gd))
|
||||
return PTR_ERR(dev->gd);
|
||||
dev->queue = dev->gd->queue;
|
||||
rnbd_init_mq_hw_queues(dev);
|
||||
|
||||
setup_request_queue(dev, rsp);
|
||||
blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
|
||||
blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
|
||||
blk_queue_write_cache(dev->queue,
|
||||
!!(rsp->cache_policy & RNBD_WRITEBACK),
|
||||
!!(rsp->cache_policy & RNBD_FUA));
|
||||
|
||||
return rnbd_clt_setup_gen_disk(dev, rsp, idx);
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue