blk-mq: add a new queue sysfs attribute async_depth

Add a new field async_depth to request_queue and related APIs, this is
currently not used, following patches will convert elevators to use
this instead of internal async_depth.

Signed-off-by: Yu Kuai <yukuai@fnnas.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Yu Kuai 2026-02-03 16:19:45 +08:00 committed by Jens Axboe
parent cf02d7d41b
commit f98afe4f31
5 changed files with 51 additions and 0 deletions

View file

@ -463,6 +463,7 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
fs_reclaim_release(GFP_KERNEL);
q->nr_requests = BLKDEV_DEFAULT_RQ;
q->async_depth = BLKDEV_DEFAULT_RQ;
return q;

View file

@ -4662,6 +4662,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
spin_lock_init(&q->requeue_lock);
q->nr_requests = set->queue_depth;
q->async_depth = set->queue_depth;
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_map_swqueue(q);
@ -5028,6 +5029,11 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
q->elevator->et = et;
}
/*
* Preserve relative value, both nr and async_depth are at most 16 bit
* value, no need to worry about overflow.
*/
q->async_depth = max(q->async_depth * nr / q->nr_requests, 1);
q->nr_requests = nr;
if (q->elevator && q->elevator->type->ops.depth_updated)
q->elevator->type->ops.depth_updated(q);

View file

@ -127,6 +127,46 @@ unlock:
return ret;
}
static ssize_t queue_async_depth_show(struct gendisk *disk, char *page)
{
guard(mutex)(&disk->queue->elevator_lock);
return queue_var_show(disk->queue->async_depth, page);
}
static ssize_t
queue_async_depth_store(struct gendisk *disk, const char *page, size_t count)
{
struct request_queue *q = disk->queue;
unsigned int memflags;
unsigned long nr;
int ret;
if (!queue_is_mq(q))
return -EINVAL;
ret = queue_var_store(&nr, page, count);
if (ret < 0)
return ret;
if (nr == 0)
return -EINVAL;
memflags = blk_mq_freeze_queue(q);
scoped_guard(mutex, &q->elevator_lock) {
if (q->elevator) {
q->async_depth = min(q->nr_requests, nr);
if (q->elevator->type->ops.depth_updated)
q->elevator->type->ops.depth_updated(q);
} else {
ret = -EINVAL;
}
}
blk_mq_unfreeze_queue(q, memflags);
return ret;
}
static ssize_t queue_ra_show(struct gendisk *disk, char *page)
{
ssize_t ret;
@ -532,6 +572,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \
}
QUEUE_RW_ENTRY(queue_requests, "nr_requests");
QUEUE_RW_ENTRY(queue_async_depth, "async_depth");
QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
@ -719,6 +760,7 @@ static struct attribute *blk_mq_queue_attrs[] = {
*/
&elv_iosched_entry.attr,
&queue_requests_entry.attr,
&queue_async_depth_entry.attr,
#ifdef CONFIG_BLK_WBT
&queue_wb_lat_entry.attr,
#endif

View file

@ -589,6 +589,7 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
q->elevator = NULL;
q->nr_requests = q->tag_set->queue_depth;
q->async_depth = q->tag_set->queue_depth;
}
blk_add_trace_msg(q, "elv switch: %s", ctx->name);

View file

@ -551,6 +551,7 @@ struct request_queue {
* queue settings
*/
unsigned int nr_requests; /* Max # of requests */
unsigned int async_depth; /* Max # of async requests */
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;