From f947d9e77b26238b821b5227afb4fee8c7ea0d5a Mon Sep 17 00:00:00 2001 From: Wilfred Mallawa Date: Mon, 12 Jan 2026 09:39:03 +1000 Subject: [PATCH 01/19] nvme/host: fixup some typos Fix up some minor typos in the nvme host driver and a comment style to conform to the standard kernel style. Signed-off-by: Wilfred Mallawa Reviewed-by: Damien Le Moal Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/tcp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 69cb04406b47..74cbbf48a981 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -25,7 +25,8 @@ struct nvme_tcp_queue; -/* Define the socket priority to use for connections were it is desirable +/* + * Define the socket priority to use for connections where it is desirable * that the NIC consider performing optimized packet processing or filtering. * A non-zero value being sufficient to indicate general consideration of any * possible optimization. Making it a module param allows for alternative @@ -926,7 +927,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, req->curr_bio = req->curr_bio->bi_next; /* - * If we don`t have any bios it means that controller + * If we don't have any bios it means the controller * sent more data than we requested, hence error */ if (!req->curr_bio) { From ddfb8b322bbd8ae996f4ac0192f0190feb0a01ce Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Mon, 3 Nov 2025 15:44:06 +0100 Subject: [PATCH 02/19] nvme: expose active quirks in sysfs Currently, there is no straightforward way for a user to inspect which quirks are active for a given device from userspace. Add a new "quirks" sysfs attribute to the nvme controller device. Reading this file will display a human-readable list of all active quirks, with each quirk name on a new line. If no quirks are active, it will display "none". Tested-by: John Meneghini Reviewed-by: John Meneghini Reviewed-by: Sagi Grimberg Reviewed-by: Martin K. Petersen Reviewed-by: Chaitanya Kulkarni Signed-off-by: Maurizio Lombardi Signed-off-by: Keith Busch --- drivers/nvme/host/nvme.h | 54 +++++++++++++++++++++++++++++++++++++++ drivers/nvme/host/sysfs.c | 23 +++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9a5f28c5103c..523015ae2add 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -180,6 +180,60 @@ enum nvme_quirks { NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22), }; +static inline char *nvme_quirk_name(enum nvme_quirks q) +{ + switch (q) { + case NVME_QUIRK_STRIPE_SIZE: + return "stripe_size"; + case NVME_QUIRK_IDENTIFY_CNS: + return "identify_cns"; + case NVME_QUIRK_DEALLOCATE_ZEROES: + return "deallocate_zeroes"; + case NVME_QUIRK_DELAY_BEFORE_CHK_RDY: + return "delay_before_chk_rdy"; + case NVME_QUIRK_NO_APST: + return "no_apst"; + case NVME_QUIRK_NO_DEEPEST_PS: + return "no_deepest_ps"; + case NVME_QUIRK_QDEPTH_ONE: + return "qdepth_one"; + case NVME_QUIRK_MEDIUM_PRIO_SQ: + return "medium_prio_sq"; + case NVME_QUIRK_IGNORE_DEV_SUBNQN: + return "ignore_dev_subnqn"; + case NVME_QUIRK_DISABLE_WRITE_ZEROES: + return "disable_write_zeroes"; + case NVME_QUIRK_SIMPLE_SUSPEND: + return "simple_suspend"; + case NVME_QUIRK_SINGLE_VECTOR: + return "single_vector"; + case NVME_QUIRK_128_BYTES_SQES: + return "128_bytes_sqes"; + case NVME_QUIRK_SHARED_TAGS: + return "shared_tags"; + case NVME_QUIRK_NO_TEMP_THRESH_CHANGE: + return "no_temp_thresh_change"; + case NVME_QUIRK_NO_NS_DESC_LIST: + return "no_ns_desc_list"; + case NVME_QUIRK_DMA_ADDRESS_BITS_48: + return "dma_address_bits_48"; + case NVME_QUIRK_SKIP_CID_GEN: + return "skip_cid_gen"; + case NVME_QUIRK_BOGUS_NID: + return "bogus_nid"; + case NVME_QUIRK_NO_SECONDARY_TEMP_THRESH: + return "no_secondary_temp_thresh"; + case NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND: + return "force_no_simple_suspend"; + case NVME_QUIRK_BROKEN_MSI: + return "broken_msi"; + case NVME_QUIRK_DMAPOOL_ALIGN_512: + return "dmapool_align_512"; + } + + return "unknown"; +} + /* * Common request structure for NVMe passthrough. All drivers must have * this structure as the first member of their request-private data. diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index 29430949ce2f..16c6fea4b2db 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -601,6 +601,28 @@ static ssize_t dctype_show(struct device *dev, } static DEVICE_ATTR_RO(dctype); +static ssize_t quirks_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int count = 0, i; + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + unsigned long quirks = ctrl->quirks; + + if (!quirks) + return sysfs_emit(buf, "none\n"); + + for (i = 0; quirks; ++i) { + if (quirks & 1) { + count += sysfs_emit_at(buf, count, "%s\n", + nvme_quirk_name(BIT(i))); + } + quirks >>= 1; + } + + return count; +} +static DEVICE_ATTR_RO(quirks); + #ifdef CONFIG_NVME_HOST_AUTH static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -742,6 +764,7 @@ static struct attribute *nvme_dev_attrs[] = { &dev_attr_kato.attr, &dev_attr_cntrltype.attr, &dev_attr_dctype.attr, + &dev_attr_quirks.attr, #ifdef CONFIG_NVME_HOST_AUTH &dev_attr_dhchap_secret.attr, &dev_attr_dhchap_ctrl_secret.attr, From ac30cd304347f2daeece6998bb5f0ae2db64e397 Mon Sep 17 00:00:00 2001 From: John Garry Date: Wed, 28 Jan 2026 08:26:23 +0000 Subject: [PATCH 03/19] nvme: stop using AWUPF As described at [0], much of the atomic write parts of the specification are lacking. For now, there is nothing which we can do in software about the lack of a dedicated NVMe write atomic command. As for reading the atomic write limits, it is felt that the per-namespace values are mostly properly specified and it is assumed that they are properly implemented. The specification of NAWUPF is quite clear. However the specification of NABSPF is less clear. The lack of clarity in NABSPF comes from deciding whether NABSPF applies when NSABP is 0 - it is assumed that NSABPF does not apply when NSABP is 0. As for the per-controller AWUPF, how this value applies to shared namespaces is missing in the specification. Furthermore, the value is in terms of logical blocks, which is an NS entity. Since AWUPF is so poorly defined, stop using it already together. Hopefully this will force vendors to implement NAWUPF support always. Note that AWUPF not only effects atomic write support, but also the physical block size reported for the device. To help users know this restriction, log an info message per NS. [0] https://lore.kernel.org/linux-nvme/20250707141834.GA30198@lst.de/ Tested-by: Nilay Shroff Reviewed-by: Nilay Shroff Reviewed-by: Martin K. Petersen Reviewed-by: Christoph Hellwig Signed-off-by: John Garry Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 14 +++++--------- drivers/nvme/host/nvme.h | 3 ++- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 7bf228df6001..83efc88ac0f9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2045,14 +2045,10 @@ static u32 nvme_configure_atomic_write(struct nvme_ns *ns, if (id->nabspf) boundary = (le16_to_cpu(id->nabspf) + 1) * bs; } else { - /* - * Use the controller wide atomic write unit. This sucks - * because the limit is defined in terms of logical blocks while - * namespaces can have different formats, and because there is - * no clear language in the specification prohibiting different - * values for different controllers in the subsystem. - */ - atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + if (ns->ctrl->awupf) + dev_info_once(ns->ctrl->device, + "AWUPF ignored, only NAWUPF accepted\n"); + atomic_bs = bs; } lim->atomic_write_hw_max = atomic_bs; @@ -3221,7 +3217,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) memcpy(subsys->model, id->mn, sizeof(subsys->model)); subsys->vendor_id = le16_to_cpu(id->vid); subsys->cmic = id->cmic; - subsys->awupf = le16_to_cpu(id->awupf); /* Versions prior to 1.4 don't necessarily report a valid type */ if (id->cntrltype == NVME_CTRL_DISC || @@ -3654,6 +3649,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) dev_pm_qos_expose_latency_tolerance(ctrl->device); else if (!ctrl->apst_enabled && prev_apst_enabled) dev_pm_qos_hide_latency_tolerance(ctrl->device); + ctrl->awupf = le16_to_cpu(id->awupf); out_free: kfree(id); return ret; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 523015ae2add..9971045dbc05 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -464,6 +464,8 @@ struct nvme_ctrl { enum nvme_ctrl_type cntrltype; enum nvme_dctype dctype; + + u16 awupf; /* 0's based value. */ }; static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) @@ -496,7 +498,6 @@ struct nvme_subsystem { u8 cmic; enum nvme_subsys_type subtype; u16 vendor_id; - u16 awupf; /* 0's based value. */ struct ida ns_ida; #ifdef CONFIG_NVME_MULTIPATH enum nvme_iopolicy iopolicy; From 0a1fc2f301529ac75aec0ce80d5ab9d9e4dc4b16 Mon Sep 17 00:00:00 2001 From: Daniel Hodges Date: Sat, 31 Jan 2026 19:08:40 -0800 Subject: [PATCH 04/19] nvme-fabrics: use kfree_sensitive() for DHCHAP secrets The DHCHAP secrets (dhchap_secret and dhchap_ctrl_secret) contain authentication key material for NVMe-oF. Use kfree_sensitive() instead of kfree() in nvmf_free_options() to ensure secrets are zeroed before the memory is freed, preventing recovery from freed pages. Reviewed-by: Christoph Hellwig Signed-off-by: Daniel Hodges Signed-off-by: Keith Busch --- drivers/nvme/host/fabrics.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 55a8afd2efd5..d37cb140d832 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -1290,8 +1290,8 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts) kfree(opts->subsysnqn); kfree(opts->host_traddr); kfree(opts->host_iface); - kfree(opts->dhchap_secret); - kfree(opts->dhchap_ctrl_secret); + kfree_sensitive(opts->dhchap_secret); + kfree_sensitive(opts->dhchap_ctrl_secret); kfree(opts); } EXPORT_SYMBOL_GPL(nvmf_free_options); From b84bb7bd913d8ca2f976ee6faf4a174f91c02b8d Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sat, 31 Jan 2026 22:48:08 +0800 Subject: [PATCH 05/19] nvme: fix admin queue leak on controller reset When nvme_alloc_admin_tag_set() is called during a controller reset, a previous admin queue may still exist. Release it properly before allocating a new one to avoid orphaning the old queue. This fixes a regression introduced by commit 03b3bcd319b3 ("nvme: fix admin request_queue lifetime"). Cc: Keith Busch Fixes: 03b3bcd319b3 ("nvme: fix admin request_queue lifetime"). Reported-and-tested-by: Yi Zhang Closes: https://lore.kernel.org/linux-block/CAHj4cs9wv3SdPo+N01Fw2SHBYDs9tj2M_e1-GdQOkRy=DsBB1w@mail.gmail.com/ Signed-off-by: Ming Lei Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 83efc88ac0f9..c12986495e71 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4860,6 +4860,13 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, if (ret) return ret; + /* + * If a previous admin queue exists (e.g., from before a reset), + * put it now before allocating a new one to avoid orphaning it. + */ + if (ctrl->admin_q) + blk_put_queue(ctrl->admin_q); + ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL); if (IS_ERR(ctrl->admin_q)) { ret = PTR_ERR(ctrl->admin_q); From 7bb8c40f5ad88392bbabb719ebfd5e3354ce0428 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Wed, 4 Feb 2026 11:55:55 +0100 Subject: [PATCH 06/19] nvme: add support for dynamic quirk configuration via module parameter Introduce support for enabling or disabling specific NVMe quirks at module load time through the `quirks` module parameter. This mechanism allows users to apply known quirks dynamically based on the device's PCI vendor and device IDs, without requiring to add hardcoded entries in the driver and recompiling the kernel. While the generic PCI new_id sysfs interface exists for dynamic configuration, it is insufficient for scenarios where the system fails to boot (for example, this has been reported to happen because of the bogus_nid quirk). The new_id attribute is writable only after the system has booted and sysfs is mounted. The `quirks` parameter accepts a list of quirk specifications separated by a '-' character in the following format: ::[-::-..] Each quirk is represented by its name and can be prefixed with `^` to indicate that the quirk should be disabled; quirk names are separated by a ',' character. Example: enable BOGUS_NID and BROKEN_MSI, disable DEALLOCATE_ZEROES: $ modprobe nvme quirks=7170:2210:bogus_nid,broken_msi,^deallocate_zeroes Tested-by: Daniel Wagner Reviewed-by: Christoph Hellwig Signed-off-by: Maurizio Lombardi Signed-off-by: Daniel Wagner Signed-off-by: Keith Busch --- .../admin-guide/kernel-parameters.txt | 13 ++ drivers/nvme/host/pci.c | 162 ++++++++++++++++++ 2 files changed, 175 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index a8d0afde7f85..f0b286c2dfc1 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -74,6 +74,7 @@ TPM TPM drivers are enabled. UMS USB Mass Storage support is enabled. USB USB support is enabled. + NVME NVMe support is enabled USBHID USB Human Interface Device support is enabled. V4L Video For Linux support is enabled. VGA The VGA console has been enabled. @@ -4671,6 +4672,18 @@ Kernel parameters This can be set from sysctl after boot. See Documentation/admin-guide/sysctl/vm.rst for details. + nvme.quirks= [NVME] A list of quirk entries to augment the built-in + nvme quirk list. List entries are separated by a + '-' character. + Each entry has the form VendorID:ProductID:quirk_names. + The IDs are 4-digits hex numbers and quirk_names is a + list of quirk names separated by commas. A quirk name + can be prefixed by '^', meaning that the specified + quirk must be disabled. + + Example: + nvme.quirks=7710:2267:bogus_nid,^identify_cns-9900:7711:broken_msi + ohci1394_dma=early [HW,EARLY] enable debugging via the ohci1394 driver. See Documentation/core-api/debugging-via-ohci1394.rst for more info. diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9fc4a60280a0..bd884e294600 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -72,6 +72,13 @@ static_assert(MAX_PRP_RANGE / NVME_CTRL_PAGE_SIZE <= (1 /* prp1 */ + NVME_MAX_NR_DESCRIPTORS * PRPS_PER_PAGE)); +struct quirk_entry { + u16 vendor_id; + u16 dev_id; + u32 enabled_quirks; + u32 disabled_quirks; +}; + static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0444); @@ -102,6 +109,142 @@ static unsigned int io_queue_depth = 1024; module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); +static struct quirk_entry *nvme_pci_quirk_list; +static unsigned int nvme_pci_quirk_count; + +/* Helper to parse individual quirk names */ +static int nvme_parse_quirk_names(char *quirk_str, struct quirk_entry *entry) +{ + int i; + size_t field_len; + bool disabled, found; + char *p = quirk_str, *field; + + while ((field = strsep(&p, ",")) && *field) { + disabled = false; + found = false; + + if (*field == '^') { + /* Skip the '^' character */ + disabled = true; + field++; + } + + field_len = strlen(field); + for (i = 0; i < 32; i++) { + unsigned int bit = 1U << i; + char *q_name = nvme_quirk_name(bit); + size_t q_len = strlen(q_name); + + if (!strcmp(q_name, "unknown")) + break; + + if (!strcmp(q_name, field) && + q_len == field_len) { + if (disabled) + entry->disabled_quirks |= bit; + else + entry->enabled_quirks |= bit; + found = true; + break; + } + } + + if (!found) { + pr_err("nvme: unrecognized quirk %s\n", field); + return -EINVAL; + } + } + return 0; +} + +/* Helper to parse a single VID:DID:quirk_names entry */ +static int nvme_parse_quirk_entry(char *s, struct quirk_entry *entry) +{ + char *field; + + field = strsep(&s, ":"); + if (!field || kstrtou16(field, 16, &entry->vendor_id)) + return -EINVAL; + + field = strsep(&s, ":"); + if (!field || kstrtou16(field, 16, &entry->dev_id)) + return -EINVAL; + + field = strsep(&s, ":"); + if (!field) + return -EINVAL; + + return nvme_parse_quirk_names(field, entry); +} + +static int quirks_param_set(const char *value, const struct kernel_param *kp) +{ + int count, err, i; + struct quirk_entry *qlist; + char *field, *val, *sep_ptr; + + err = param_set_copystring(value, kp); + if (err) + return err; + + val = kstrdup(value, GFP_KERNEL); + if (!val) + return -ENOMEM; + + if (!*val) + goto out_free_val; + + count = 1; + for (i = 0; val[i]; i++) { + if (val[i] == '-') + count++; + } + + qlist = kcalloc(count, sizeof(*qlist), GFP_KERNEL); + if (!qlist) { + err = -ENOMEM; + goto out_free_val; + } + + i = 0; + sep_ptr = val; + while ((field = strsep(&sep_ptr, "-"))) { + if (nvme_parse_quirk_entry(field, &qlist[i])) { + pr_err("nvme: failed to parse quirk string %s\n", + value); + goto out_free_qlist; + } + + i++; + } + + nvme_pci_quirk_count = count; + nvme_pci_quirk_list = qlist; + goto out_free_val; + +out_free_qlist: + kfree(qlist); +out_free_val: + kfree(val); + return err; +} + +static char quirks_param[128]; +static const struct kernel_param_ops quirks_param_ops = { + .set = quirks_param_set, + .get = param_get_string, +}; + +static struct kparam_string quirks_param_string = { + .maxlen = sizeof(quirks_param), + .string = quirks_param, +}; + +module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0444); +MODULE_PARM_DESC(quirks, "Enable/disable NVMe quirks by specifying " + "quirks=VID:DID:quirk_names"); + static int io_queue_count_set(const char *val, const struct kernel_param *kp) { unsigned int n; @@ -3439,12 +3582,25 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) return 0; } +static struct quirk_entry *detect_dynamic_quirks(struct pci_dev *pdev) +{ + int i; + + for (i = 0; i < nvme_pci_quirk_count; i++) + if (pdev->vendor == nvme_pci_quirk_list[i].vendor_id && + pdev->device == nvme_pci_quirk_list[i].dev_id) + return &nvme_pci_quirk_list[i]; + + return NULL; +} + static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned long quirks = id->driver_data; int node = dev_to_node(&pdev->dev); struct nvme_dev *dev; + struct quirk_entry *qentry; int ret = -ENOMEM; dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids), @@ -3476,6 +3632,11 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, "platform quirk: setting simple suspend\n"); quirks |= NVME_QUIRK_SIMPLE_SUSPEND; } + qentry = detect_dynamic_quirks(pdev); + if (qentry) { + quirks |= qentry->enabled_quirks; + quirks &= ~qentry->disabled_quirks; + } ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, quirks); if (ret) @@ -4074,6 +4235,7 @@ static int __init nvme_init(void) static void __exit nvme_exit(void) { + kfree(nvme_pci_quirk_list); pci_unregister_driver(&nvme_driver); flush_workqueue(nvme_wq); } From 63059500b124254c2630b2f8c46cb3555e726f52 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 5 Feb 2026 17:11:15 +0000 Subject: [PATCH 07/19] nvme: stop setting namespace gendisk device driver data Since commit 1f4137e882c6 ("nvme: move passthrough logging attribute to head"), we stopped using the namespace to hold the passthrough logging enabled attribute. There is now nowhere now which looks up the gendisk dev driver data, so stop setting it. Incidentally, it would have been better to set this before adding the disk. Reviewed-by: Christoph Hellwig Signed-off-by: John Garry Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c12986495e71..3a2126584a23 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4181,13 +4181,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) nvme_mpath_add_disk(ns, info->anagrpid); nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); - /* - * Set ns->disk->device->driver_data to ns so we can access - * ns->head->passthru_err_log_enabled in - * nvme_io_passthru_err_log_enabled_[store | show](). - */ - dev_set_drvdata(disk_to_dev(ns->disk), ns); - return; out_cleanup_ns_from_list: From 3ddfbfbc78ac1d3d9e95098fb6a32b57b8a0dcae Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 5 Feb 2026 17:11:14 +0000 Subject: [PATCH 08/19] nvme: correct comment about nvme_ns_remove() The comment in nvme_mpath_remove_disk() references nvme_remove_ns(), which should be nvme_ns_remove(). Reviewed-by: Christoph Hellwig Signed-off-by: John Garry Signed-off-by: Keith Busch --- drivers/nvme/host/multipath.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 174027d1cc19..bfcc5904e6a2 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -1300,7 +1300,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) mutex_lock(&head->subsys->lock); /* * We are called when all paths have been removed, and at that point - * head->list is expected to be empty. However, nvme_remove_ns() and + * head->list is expected to be empty. However, nvme_ns_remove() and * nvme_init_ns_head() can run concurrently and so if head->delayed_ * removal_secs is configured, it is possible that by the time we reach * this point, head->list may no longer be empty. Therefore, we recheck From bbdaa8c17cae18b977f4509911d7a390aa8a6597 Mon Sep 17 00:00:00 2001 From: Maurizio Lombardi Date: Wed, 11 Feb 2026 17:30:28 +0100 Subject: [PATCH 09/19] nvme: fix memory leak in quirks_param_set() When loading the nvme module, if the 'quirks' parameter is specified via both the kernel command line (e.g., nvme.quirks=...) and the modprobe command line (e.g., modprobe nvme quirks=...), the quirks_param_set() callback is invoked twice. Currently, in the double-invocation scenario, the second call overwrites the nvme_pci_quirk_list pointer, causing the memory allocated in the first call to leak. Fix this by freeing the existing list before assigning the new one. Fixes: b4247c8317c5 ("nvme: add support for dynamic quirk configuration via module parameter") Reviewed-by: Daniel Wagner Reviewed-by: Christoph Hellwig Signed-off-by: Maurizio Lombardi Signed-off-by: Keith Busch --- drivers/nvme/host/pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bd884e294600..c0f2104326ab 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -219,6 +219,7 @@ static int quirks_param_set(const char *value, const struct kernel_param *kp) i++; } + kfree(nvme_pci_quirk_list); nvme_pci_quirk_count = count; nvme_pci_quirk_list = qlist; goto out_free_val; From 166e31d7dbf6aa44829b98aa446bda5c9580f12a Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 10 Feb 2026 09:26:54 -0800 Subject: [PATCH 10/19] nvme-pci: ensure we're polling a polled queue A user can change the polled queue count at run time. There's a brief window during a reset where a hipri task may try to poll that queue before the block layer has updated the queue maps, which would race with the now interrupt driven queue and may cause double completions. Reviewed-by: Christoph Hellwig Reviewed-by: Kanchan Joshi Signed-off-by: Keith Busch --- drivers/nvme/host/pci.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c0f2104326ab..4ee4d7ead5a9 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1627,7 +1627,8 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) struct nvme_queue *nvmeq = hctx->driver_data; bool found; - if (!nvme_cqe_pending(nvmeq)) + if (!test_bit(NVMEQ_POLLED, &nvmeq->flags) || + !nvme_cqe_pending(nvmeq)) return 0; spin_lock(&nvmeq->cq_poll_lock); From 4735b510a00fb2d4ac9e8d21a8c9552cb281f585 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 10 Feb 2026 11:00:12 -0800 Subject: [PATCH 11/19] nvme-pci: cap queue creation to used queues If the user reduces the special queue count at runtime and resets the controller, we need to reduce the number of queues and interrupts requested accordingly rather than start with the pre-allocated queue count. Tested-by: Kanchan Joshi Reviewed-by: Kanchan Joshi Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/pci.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 4ee4d7ead5a9..c63efa49132f 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2902,7 +2902,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) dev->nr_write_queues = write_queues; dev->nr_poll_queues = poll_queues; - nr_io_queues = dev->nr_allocated_queues - 1; + /* + * The initial number of allocated queue slots may be too large if the + * user reduced the special queue parameters. Cap the value to the + * number we need for this round. + */ + nr_io_queues = min(nvme_max_io_queues(dev), + dev->nr_allocated_queues - 1); result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); if (result < 0) return result; From baa47c4f89eb2e7614e6a06dd5957632e38c4e74 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 10 Feb 2026 10:38:09 -0800 Subject: [PATCH 12/19] nvme-pci: do not try to add queue maps at runtime The block layer allocates the set's maps once. We can't add special purpose queues at runtime if they weren't allocated at initialization time. Tested-by: Kanchan Joshi Reviewed-by: Kanchan Joshi Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/pci.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c63efa49132f..0c1a8d7aa1c0 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2902,6 +2902,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) dev->nr_write_queues = write_queues; dev->nr_poll_queues = poll_queues; + if (dev->ctrl.tagset) { + /* + * The set's maps are allocated only once at initialization + * time. We can't add special queues later if their mq_map + * wasn't preallocated. + */ + if (dev->ctrl.tagset->nr_maps < 3) + dev->nr_poll_queues = 0; + if (dev->ctrl.tagset->nr_maps < 2) + dev->nr_write_queues = 0; + } + /* * The initial number of allocated queue slots may be too large if the * user reduced the special queue parameters. Cap the value to the From dd677d0598387ea623820ab2bd0e029c377445a3 Mon Sep 17 00:00:00 2001 From: Justin Tee Date: Thu, 4 Dec 2025 12:26:13 -0800 Subject: [PATCH 13/19] nvmet-fcloop: Check remoteport port_state before calling done callback In nvme_fc_handle_ls_rqst_work, the lsrsp->done callback is only set when remoteport->port_state is FC_OBJSTATE_ONLINE. Otherwise, the nvme_fc_xmt_ls_rsp's LLDD call to lport->ops->xmt_ls_rsp is expected to fail and the nvme-fc transport layer itself will directly call nvme_fc_xmt_ls_rsp_free instead of relying on LLDD's done callback to free the lsrsp resources. Update the fcloop_t2h_xmt_ls_rsp routine to check remoteport->port_state. If online, then lsrsp->done callback will free the lsrsp. Else, return -ENODEV to signal the nvme-fc transport to handle freeing lsrsp. Cc: Ewan D. Milne Tested-by: Aristeu Rozanski Acked-by: Aristeu Rozanski Reviewed-by: Daniel Wagner Closes: https://lore.kernel.org/linux-nvme/21255200-a271-4fa0-b099-97755c8acd4c@work/ Fixes: 10c165af35d2 ("nvmet-fcloop: call done callback even when remote port is gone") Signed-off-by: Justin Tee Signed-off-by: Keith Busch --- drivers/nvme/target/fcloop.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index c30e9a3e014f..38bd2db3d6bb 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -491,6 +491,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, struct fcloop_rport *rport = remoteport->private; struct nvmet_fc_target_port *targetport = rport->targetport; struct fcloop_tport *tport; + int ret = 0; if (!targetport) { /* @@ -500,12 +501,18 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, * We end up here from delete association exchange: * nvmet_fc_xmt_disconnect_assoc sends an async request. * - * Return success because this is what LLDDs do; silently - * drop the response. + * Return success when remoteport is still online because this + * is what LLDDs do and silently drop the response. Otherwise, + * return with error to signal upper layer to perform the lsrsp + * resource cleanup. */ - lsrsp->done(lsrsp); + if (remoteport->port_state == FC_OBJSTATE_ONLINE) + lsrsp->done(lsrsp); + else + ret = -ENODEV; + kmem_cache_free(lsreq_cache, tls_req); - return 0; + return ret; } memcpy(lsreq->rspaddr, lsrsp->rspbuf, From 0f5197ea9a73a4c406c75e6d8af3a13f7f96ae89 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Wed, 25 Feb 2026 11:38:05 -0800 Subject: [PATCH 14/19] nvme-multipath: fix leak on try_module_get failure We need to fall back to the synchronous removal if we can't get a reference on the module needed for the deferred removal. Fixes: 62188639ec16 ("nvme-multipath: introduce delayed removal of the multipath head node") Reviewed-by: Nilay Shroff Reviewed-by: John Garry Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/multipath.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index bfcc5904e6a2..fc6800a9f7f9 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -1310,13 +1310,11 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) if (!list_empty(&head->list)) goto out; - if (head->delayed_removal_secs) { - /* - * Ensure that no one could remove this module while the head - * remove work is pending. - */ - if (!try_module_get(THIS_MODULE)) - goto out; + /* + * Ensure that no one could remove this module while the head + * remove work is pending. + */ + if (head->delayed_removal_secs && try_module_get(THIS_MODULE)) { mod_delayed_work(nvme_wq, &head->remove_work, head->delayed_removal_secs * HZ); } else { From da46b5dfef48658d03347cda21532bcdbb521e67 Mon Sep 17 00:00:00 2001 From: Chaitanya Kulkarni Date: Sun, 1 Mar 2026 16:22:07 -0800 Subject: [PATCH 15/19] blktrace: fix __this_cpu_read/write in preemptible context tracing_record_cmdline() internally uses __this_cpu_read() and __this_cpu_write() on the per-CPU variable trace_cmdline_save, and trace_save_cmdline() explicitly asserts preemption is disabled via lockdep_assert_preemption_disabled(). These operations are only safe when preemption is off, as they were designed to be called from the scheduler context (probe_wakeup_sched_switch() / probe_wakeup()). __blk_add_trace() was calling tracing_record_cmdline(current) early in the blk_tracer path, before ring buffer reservation, from process context where preemption is fully enabled. This triggers the following using blktests/blktrace/002: blktrace/002 (blktrace ftrace corruption with sysfs trace) [failed] runtime 0.367s ... 0.437s something found in dmesg: [ 81.211018] run blktests blktrace/002 at 2026-02-25 22:24:33 [ 81.239580] null_blk: disk nullb1 created [ 81.357294] BUG: using __this_cpu_read() in preemptible [00000000] code: dd/2516 [ 81.362842] caller is tracing_record_cmdline+0x10/0x40 [ 81.362872] CPU: 16 UID: 0 PID: 2516 Comm: dd Tainted: G N 7.0.0-rc1lblk+ #84 PREEMPT(full) [ 81.362877] Tainted: [N]=TEST [ 81.362878] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014 [ 81.362881] Call Trace: [ 81.362884] [ 81.362886] dump_stack_lvl+0x8d/0xb0 ... (See '/mnt/sda/blktests/results/nodev/blktrace/002.dmesg' for the entire message) [ 81.211018] run blktests blktrace/002 at 2026-02-25 22:24:33 [ 81.239580] null_blk: disk nullb1 created [ 81.357294] BUG: using __this_cpu_read() in preemptible [00000000] code: dd/2516 [ 81.362842] caller is tracing_record_cmdline+0x10/0x40 [ 81.362872] CPU: 16 UID: 0 PID: 2516 Comm: dd Tainted: G N 7.0.0-rc1lblk+ #84 PREEMPT(full) [ 81.362877] Tainted: [N]=TEST [ 81.362878] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014 [ 81.362881] Call Trace: [ 81.362884] [ 81.362886] dump_stack_lvl+0x8d/0xb0 [ 81.362895] check_preemption_disabled+0xce/0xe0 [ 81.362902] tracing_record_cmdline+0x10/0x40 [ 81.362923] __blk_add_trace+0x307/0x5d0 [ 81.362934] ? lock_acquire+0xe0/0x300 [ 81.362940] ? iov_iter_extract_pages+0x101/0xa30 [ 81.362959] blk_add_trace_bio+0x106/0x1e0 [ 81.362968] submit_bio_noacct_nocheck+0x24b/0x3a0 [ 81.362979] ? lockdep_init_map_type+0x58/0x260 [ 81.362988] submit_bio_wait+0x56/0x90 [ 81.363009] __blkdev_direct_IO_simple+0x16c/0x250 [ 81.363026] ? __pfx_submit_bio_wait_endio+0x10/0x10 [ 81.363038] ? rcu_read_lock_any_held+0x73/0xa0 [ 81.363051] blkdev_read_iter+0xc1/0x140 [ 81.363059] vfs_read+0x20b/0x330 [ 81.363083] ksys_read+0x67/0xe0 [ 81.363090] do_syscall_64+0xbf/0xf00 [ 81.363102] entry_SYSCALL_64_after_hwframe+0x76/0x7e [ 81.363106] RIP: 0033:0x7f281906029d [ 81.363111] Code: 31 c0 e9 c6 fe ff ff 50 48 8d 3d 66 63 0a 00 e8 59 ff 01 00 66 0f 1f 84 00 00 00 00 00 80 3d 41 33 0e 00 00 74 17 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 5b c3 66 2e 0f 1f 84 00 00 00 00 00 48 83 ec [ 81.363113] RSP: 002b:00007ffca127dd48 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [ 81.363120] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f281906029d [ 81.363122] RDX: 0000000000001000 RSI: 0000559f8bfae000 RDI: 0000000000000000 [ 81.363123] RBP: 0000000000001000 R08: 0000002863a10a81 R09: 00007f281915f000 [ 81.363124] R10: 00007f2818f77b60 R11: 0000000000000246 R12: 0000559f8bfae000 [ 81.363126] R13: 0000000000000000 R14: 0000000000000000 R15: 000000000000000a [ 81.363142] The same BUG fires from blk_add_trace_plug(), blk_add_trace_unplug(), and blk_add_trace_rq() paths as well. The purpose of tracing_record_cmdline() is to cache the task->comm for a given PID so that the trace can later resolve it. It is only meaningful when a trace event is actually being recorded. Ring buffer reservation via ring_buffer_lock_reserve() disables preemption, and preemption remains disabled until the event is committed :- __blk_add_trace() __trace_buffer_lock_reserve() __trace_buffer_lock_reserve() ring_buffer_lock_reserve() preempt_disable_notrace(); <--- With this fix blktests for blktrace pass: blktests (master) # ./check blktrace blktrace/001 (blktrace zone management command tracing) [passed] runtime 3.650s ... 3.647s blktrace/002 (blktrace ftrace corruption with sysfs trace) [passed] runtime 0.411s ... 0.384s Fixes: 7ffbd48d5cab ("tracing: Cache comms only after an event occurred") Reported-by: Shinichiro Kawasaki Suggested-by: Steven Rostedt Signed-off-by: Chaitanya Kulkarni Reviewed-by: Steven Rostedt (Google) Signed-off-by: Jens Axboe --- kernel/trace/blktrace.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index e6988929ead2..18a9b84cc768 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -383,8 +383,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, cpu = raw_smp_processor_id(); if (blk_tracer) { - tracing_record_cmdline(current); - buffer = blk_tr->array_buffer.buffer; trace_ctx = tracing_gen_ctx_flags(0); switch (bt->version) { @@ -419,6 +417,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, if (!event) return; + tracing_record_cmdline(current); switch (bt->version) { case 1: record_blktrace_event(ring_buffer_event_data(event), From 539d1b47e935e8384977dd7e5cec370c08b7a644 Mon Sep 17 00:00:00 2001 From: Nilay Shroff Date: Sun, 1 Mar 2026 18:29:43 +0530 Subject: [PATCH 16/19] block: break pcpu_alloc_mutex dependency on freeze_lock While nr_hw_update allocates tagset tags it acquires ->pcpu_alloc_mutex after ->freeze_lock is acquired or queue is frozen. This potentially creates a circular dependency involving ->fs_reclaim if reclaim is triggered simultaneously in a code path which first acquires ->pcpu_ alloc_mutex. As the queue is already frozen while nr_hw_queue update allocates tagsets, the reclaim can't forward progress and thus it could cause a potential deadlock as reported in lockdep splat[1]. Fix this by pre-allocating tagset tags before we freeze queue during nr_hw_queue update. Later the allocated tagset tags could be safely installed and used after queue is frozen. Reported-by: Yi Zhang Closes: https://lore.kernel.org/all/CAHj4cs8F=OV9s3La2kEQ34YndgfZP-B5PHS4Z8_b9euKG6J4mw@mail.gmail.com/ [1] Signed-off-by: Nilay Shroff Reviewed-by: Ming Lei Tested-by: Yi Zhang Reviewed-by: Yu Kuai [axboe: fix brace style issue] Signed-off-by: Jens Axboe --- block/blk-mq.c | 45 ++++++++++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index d5602ff62c7c..0b182013016a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4793,38 +4793,45 @@ static void blk_mq_update_queue_map(struct blk_mq_tag_set *set) } } -static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, - int new_nr_hw_queues) +static struct blk_mq_tags **blk_mq_prealloc_tag_set_tags( + struct blk_mq_tag_set *set, + int new_nr_hw_queues) { struct blk_mq_tags **new_tags; int i; if (set->nr_hw_queues >= new_nr_hw_queues) - goto done; + return NULL; new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); if (!new_tags) - return -ENOMEM; + return ERR_PTR(-ENOMEM); if (set->tags) memcpy(new_tags, set->tags, set->nr_hw_queues * sizeof(*set->tags)); - kfree(set->tags); - set->tags = new_tags; for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) { - if (!__blk_mq_alloc_map_and_rqs(set, i)) { - while (--i >= set->nr_hw_queues) - __blk_mq_free_map_and_rqs(set, i); - return -ENOMEM; + if (blk_mq_is_shared_tags(set->flags)) { + new_tags[i] = set->shared_tags; + } else { + new_tags[i] = blk_mq_alloc_map_and_rqs(set, i, + set->queue_depth); + if (!new_tags[i]) + goto out_unwind; } cond_resched(); } -done: - set->nr_hw_queues = new_nr_hw_queues; - return 0; + return new_tags; +out_unwind: + while (--i >= set->nr_hw_queues) { + if (!blk_mq_is_shared_tags(set->flags)) + blk_mq_free_map_and_rqs(set, new_tags[i], i); + } + kfree(new_tags); + return ERR_PTR(-ENOMEM); } /* @@ -5113,6 +5120,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, unsigned int memflags; int i; struct xarray elv_tbl; + struct blk_mq_tags **new_tags; bool queues_frozen = false; lockdep_assert_held(&set->tag_list_lock); @@ -5147,11 +5155,18 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, if (blk_mq_elv_switch_none(q, &elv_tbl)) goto switch_back; + new_tags = blk_mq_prealloc_tag_set_tags(set, nr_hw_queues); + if (IS_ERR(new_tags)) + goto switch_back; + list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_freeze_queue_nomemsave(q); queues_frozen = true; - if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) - goto switch_back; + if (new_tags) { + kfree(set->tags); + set->tags = new_tags; + } + set->nr_hw_queues = nr_hw_queues; fallback: blk_mq_update_queue_map(set); From 8da8df43124128e0478beadb58faa9cab56a3f13 Mon Sep 17 00:00:00 2001 From: Yang Xiuwei Date: Wed, 4 Mar 2026 12:51:19 +0800 Subject: [PATCH 17/19] block: use __bio_add_page in bio_copy_kern Since the bio is allocated with the exact number of pages needed via blk_rq_map_bio_alloc(), and the loop iterates exactly that many times, bio_add_page() cannot fail due to insufficient space. Switch to __bio_add_page() and remove the dead error handling code. Suggested-by: Christoph Hellwig Reviewed-by: Chaitanya Kulkarni Signed-off-by: Yang Xiuwei Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-map.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/block/blk-map.c b/block/blk-map.c index 4533094d9458..c6fa2238d578 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -398,8 +398,7 @@ static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int le if (op_is_write(op)) memcpy(page_address(page), p, bytes); - if (bio_add_page(bio, page, bytes, 0) < bytes) - break; + __bio_add_page(bio, page, bytes, 0); len -= bytes; p += bytes; From c3320153769f05fd7fe9d840cb555dd3080ae424 Mon Sep 17 00:00:00 2001 From: Sungwoo Kim Date: Fri, 27 Feb 2026 19:19:28 -0500 Subject: [PATCH 18/19] nvme: fix memory allocation in nvme_pr_read_keys() nvme_pr_read_keys() takes num_keys from userspace and uses it to calculate the allocation size for rse via struct_size(). The upper limit is PR_KEYS_MAX (64K). A malicious or buggy userspace can pass a large num_keys value that results in a 4MB allocation attempt at most, causing a warning in the page allocator when the order exceeds MAX_PAGE_ORDER. To fix this, use kvzalloc() instead of kzalloc(). This bug has the same reasoning and fix with the patch below: https://lore.kernel.org/linux-block/20251212013510.3576091-1-kartikey406@gmail.com/ Warning log: WARNING: mm/page_alloc.c:5216 at __alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216, CPU#1: syz-executor117/272 Modules linked in: CPU: 1 UID: 0 PID: 272 Comm: syz-executor117 Not tainted 6.19.0 #1 PREEMPT(voluntary) Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014 RIP: 0010:__alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216 Code: ff 83 bd a8 fe ff ff 0a 0f 86 69 fb ff ff 0f b6 1d f9 f9 c4 04 80 fb 01 0f 87 3b 76 30 ff 83 e3 01 75 09 c6 05 e4 f9 c4 04 01 <0f> 0b 48 c7 85 70 fe ff ff 00 00 00 00 e9 8f fd ff ff 31 c0 e9 0d RSP: 0018:ffffc90000fcf450 EFLAGS: 00010246 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 1ffff920001f9ea0 RDX: 0000000000000000 RSI: 000000000000000b RDI: 0000000000040dc0 RBP: ffffc90000fcf648 R08: ffff88800b6c3380 R09: 0000000000000001 R10: ffffc90000fcf840 R11: ffff88807ffad280 R12: 0000000000000000 R13: 0000000000040dc0 R14: 0000000000000001 R15: ffffc90000fcf620 FS: 0000555565db33c0(0000) GS:ffff8880be26c000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000002000000c CR3: 0000000003b72000 CR4: 00000000000006f0 Call Trace: alloc_pages_mpol+0x236/0x4d0 mm/mempolicy.c:2486 alloc_frozen_pages_noprof+0x149/0x180 mm/mempolicy.c:2557 ___kmalloc_large_node+0x10c/0x140 mm/slub.c:5598 __kmalloc_large_node_noprof+0x25/0xc0 mm/slub.c:5629 __do_kmalloc_node mm/slub.c:5645 [inline] __kmalloc_noprof+0x483/0x6f0 mm/slub.c:5669 kmalloc_noprof include/linux/slab.h:961 [inline] kzalloc_noprof include/linux/slab.h:1094 [inline] nvme_pr_read_keys+0x8f/0x4c0 drivers/nvme/host/pr.c:245 blkdev_pr_read_keys block/ioctl.c:456 [inline] blkdev_common_ioctl+0x1b71/0x29b0 block/ioctl.c:730 blkdev_ioctl+0x299/0x700 block/ioctl.c:786 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:597 [inline] __se_sys_ioctl fs/ioctl.c:583 [inline] __x64_sys_ioctl+0x1bf/0x220 fs/ioctl.c:583 x64_sys_call+0x1280/0x21b0 mnt/fuzznvme_1/fuzznvme/linux-build/v6.19/./arch/x86/include/generated/asm/syscalls_64.h:17 do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline] do_syscall_64+0x71/0x330 arch/x86/entry/syscall_64.c:94 entry_SYSCALL_64_after_hwframe+0x76/0x7e RIP: 0033:0x7fb893d3108d Code: 28 c3 e8 46 1e 00 00 66 0f 1f 44 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffff61f2f38 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 00007ffff61f3138 RCX: 00007fb893d3108d RDX: 0000000020000040 RSI: 00000000c01070ce RDI: 0000000000000003 RBP: 0000000000000001 R08: 0000000000000000 R09: 00007ffff61f3138 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 R13: 00007ffff61f3128 R14: 00007fb893dae530 R15: 0000000000000001 Fixes: 5fd96a4e15de (nvme: Add pr_ops read_keys support) Acked-by: Chao Shi Acked-by: Weidong Zhu Acked-by: Dave Tian Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Signed-off-by: Sungwoo Kim Signed-off-by: Keith Busch --- drivers/nvme/host/pr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index ad2ecc2f49a9..fe7dbe264815 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -242,7 +242,7 @@ static int nvme_pr_read_keys(struct block_device *bdev, if (rse_len > U32_MAX) return -EINVAL; - rse = kzalloc(rse_len, GFP_KERNEL); + rse = kvzalloc(rse_len, GFP_KERNEL); if (!rse) return -ENOMEM; @@ -267,7 +267,7 @@ static int nvme_pr_read_keys(struct block_device *bdev, } free_rse: - kfree(rse); + kvfree(rse); return ret; } From ce8ee8583ed83122405eabaa8fb351be4d9dc65c Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 5 Mar 2026 11:15:50 +0800 Subject: [PATCH 19/19] block: use trylock to avoid lockdep circular dependency in sysfs Use trylock instead of blocking lock acquisition for update_nr_hwq_lock in queue_requests_store() and elv_iosched_store() to avoid circular lock dependency with kernfs active reference during concurrent disk deletion: update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del) kn->active -> update_nr_hwq_lock (via sysfs write path) Return -EBUSY when the lock is not immediately available. Reported-and-tested-by: Yi Zhang Closes: https://lore.kernel.org/linux-block/CAHj4cs-em-4acsHabMdT=jJhXkCzjnprD-aQH1OgrZo4nTnmMw@mail.gmail.com/ Fixes: 626ff4f8ebcb ("blk-mq: convert to serialize updating nr_requests with update_nr_hwq_lock") Signed-off-by: Ming Lei Tested-by: Yi Zhang Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 8 +++++++- block/elevator.c | 12 +++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index f3b1968c80ce..55a1bbfef7d4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -78,8 +78,14 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count) /* * Serialize updating nr_requests with concurrent queue_requests_store() * and switching elevator. + * + * Use trylock to avoid circular lock dependency with kernfs active + * reference during concurrent disk deletion: + * update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del) + * kn->active -> update_nr_hwq_lock (via this sysfs write path) */ - down_write(&set->update_nr_hwq_lock); + if (!down_write_trylock(&set->update_nr_hwq_lock)) + return -EBUSY; if (nr == q->nr_requests) goto unlock; diff --git a/block/elevator.c b/block/elevator.c index ebe2a1fcf011..3bcd37c2aa34 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -807,7 +807,16 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, elv_iosched_load_module(ctx.name); ctx.type = elevator_find_get(ctx.name); - down_read(&set->update_nr_hwq_lock); + /* + * Use trylock to avoid circular lock dependency with kernfs active + * reference during concurrent disk deletion: + * update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del) + * kn->active -> update_nr_hwq_lock (via this sysfs write path) + */ + if (!down_read_trylock(&set->update_nr_hwq_lock)) { + ret = -EBUSY; + goto out; + } if (!blk_queue_no_elv_switch(q)) { ret = elevator_change(q, &ctx); if (!ret) @@ -817,6 +826,7 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, } up_read(&set->update_nr_hwq_lock); +out: if (ctx.type) elevator_put(ctx.type); return ret;