mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
nvme-multipath: Add visibility for queue-depth io-policy
This patch helps add nvme native multipath visibility for queue-depth io-policy. It adds a new attribute file named "queue_depth" under namespace device path node which would print the number of active/ in-flight I/O requests currently queued for the given path. For instance, if we have a shared namespace accessible from two different controllers/paths then accessing head block node of the shared namespace would show the following output: $ ls -l /sys/block/nvme1n1/multipath/ nvme1c1n1 -> ../../../../../pci052e:78/052e:78:00.0/nvme/nvme1/nvme1c1n1 nvme1c3n1 -> ../../../../../pci058e:78/058e:78:00.0/nvme/nvme3/nvme1c3n1 In the above example, nvme1n1 is head gendisk node created for a shared namespace and the namespace is accessible from nvme1c1n1 and nvme1c3n1 paths. For queue-depth io-policy we can then refer the "queue_depth" attribute file created under each namespace path: $ cat /sys/block/nvme1n1/multipath/nvme1c1n1/queue_depth 518 $cat /sys/block/nvme1n1/multipath/nvme1c3n1/queue_depth 504 >From the above output, we can infer that I/O workload targeted at nvme1n1 uses two paths nvme1c1n1 and nvme1c3n1 and the current queue depth of each path is 518 and 504 respectively. Reading "queue_depth" file when configured io-policy is anything but queue-depth would show no output. Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Nilay Shroff <nilay@linux.ibm.com> Signed-off-by: Keith Busch <kbusch@kernel.org>
This commit is contained in:
parent
6546cc4a56
commit
7cbafa3ff0
3 changed files with 15 additions and 1 deletions
|
|
@ -976,6 +976,18 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
DEVICE_ATTR_RO(ana_state);
|
||||
|
||||
static ssize_t queue_depth_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
||||
|
||||
if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD)
|
||||
return 0;
|
||||
|
||||
return sysfs_emit(buf, "%d\n", atomic_read(&ns->ctrl->nr_active));
|
||||
}
|
||||
DEVICE_ATTR_RO(queue_depth);
|
||||
|
||||
static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -984,6 +984,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
|
|||
extern bool multipath;
|
||||
extern struct device_attribute dev_attr_ana_grpid;
|
||||
extern struct device_attribute dev_attr_ana_state;
|
||||
extern struct device_attribute dev_attr_queue_depth;
|
||||
extern struct device_attribute dev_attr_numa_nodes;
|
||||
extern struct device_attribute subsys_attr_iopolicy;
|
||||
|
||||
|
|
|
|||
|
|
@ -258,6 +258,7 @@ static struct attribute *nvme_ns_attrs[] = {
|
|||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
&dev_attr_ana_grpid.attr,
|
||||
&dev_attr_ana_state.attr,
|
||||
&dev_attr_queue_depth.attr,
|
||||
&dev_attr_numa_nodes.attr,
|
||||
#endif
|
||||
&dev_attr_io_passthru_err_log_enabled.attr,
|
||||
|
|
@ -291,7 +292,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
|
|||
if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl))
|
||||
return 0;
|
||||
}
|
||||
if (a == &dev_attr_numa_nodes.attr) {
|
||||
if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) {
|
||||
if (nvme_disk_is_ns_head(dev_to_disk(dev)))
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue