cxl fixes for v7.0-rc2

- Fix incorrect usages of decoder flags.
 cxl/region: Test CXL_DECODER_F_NORMALIZED_ADDRESSING as a bitmask
 cxl: Test CXL_DECODER_F_LOCK as a bitmask
 
 cxl/mbox: validate payload size before accessing contents in cxl_payload_from_user_allowed()
 
 - Moving of devm_cxl_add_nvdimm_bridge() is required for the race fix.
 cxl: Fix race of nvdimm_bus object when creating nvdimm objects
 cxl: Move devm_cxl_add_nvdimm_bridge() to cxl_pmem.ko
 
 - The port_to_host() is used to fix the race window without host lock.
 cxl/port: Hold port host lock during dport adding.
 cxl/port: Introduce port_to_host() helper
 
 cxl/memdev: fix deadlock in cxl_memdev_autoremove() on attach failure
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE5DAy15EJMCV1R6v9YGjFFmlTOEoFAmmhzZgACgkQYGjFFmlT
 OEqKrhAA2DoFTLePsbDXOAqs/9A90amNkWjE7JRuKELqVs0o00vutYiWiThP7lXP
 v0wsCq5V2+DT35Oo0SpDntNQkYn54vjRF4Q4VYdHvBRuU4mbWFEqctJBvYRHjXSE
 I4POupbqO8pAIz5R7jaRdq/PgnW1S+4ZARzkmJGXfpzgUC2JdHN+hl2aRkBWr09o
 2RCmVu0H5yxLibw7DmZex/s4ksXzuegy0BDo94Xmo+GXabYHCPCiQOZobZzUMhlL
 5r+YTGMY+Q3Z+nHm3fuC4lQCMClgTVf6xdHtqqZ/GQ6b69KjhO0V7/RmZENvmlLS
 LtW4ZwEmgQRX6rnoT3p2UaB1cu4lBscU9kYRZdmP7FnYbu8cJXe6Dfk+56lDnqy8
 8bwYAPPmV/IBv4dRecoliYhGMJ4TMmgCoxwxuh2KROUpKI1GkIsTzMeoypviS6Nm
 5EAlMvoMZkgygkpJ+z8u009IgIQyxQRWqIxw0aLcutAlBhtZxb8/iq8dOCrgCb64
 HFpeglrmechDbXgf9hHWWh+RX+A6MrhLTcjyYIdVJzb7jrfWkwUDZaKg7cRni51W
 IPZtjaawvEMbYRs+dVP0frZvLDqPYE0U4Y3GWa7d9XRSHx0QwTcLwi8ShXEx3dqS
 wcL/THCEH0Zhtl3LU6CDfeChPKyyUnt2ChbucccVLjN2thaRH44=
 =4//d
 -----END PGP SIGNATURE-----

Merge tag 'cxl-fixes-7.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull cxl fixes from Dave Jiang:

 - Fix incorrect usages of decoder flags

 - Validate payload size before accessing contents

 - Fix race condition when creating nvdimm objects

 - Fix deadlock on attach failure

* tag 'cxl-fixes-7.0-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl:
  cxl/region: Test CXL_DECODER_F_NORMALIZED_ADDRESSING as a bitmask
  cxl: Test CXL_DECODER_F_LOCK as a bitmask
  cxl/mbox: validate payload size before accessing contents in cxl_payload_from_user_allowed()
  cxl: Fix race of nvdimm_bus object when creating nvdimm objects
  cxl: Move devm_cxl_add_nvdimm_bridge() to cxl_pmem.ko
  cxl/port: Hold port host lock during dport adding.
  cxl/port: Introduce port_to_host() helper
  cxl/memdev: fix deadlock in cxl_memdev_autoremove() on attach failure
This commit is contained in:
Linus Torvalds 2026-02-27 10:52:57 -08:00
commit aed968f8a6
9 changed files with 117 additions and 54 deletions

View file

@ -152,6 +152,24 @@ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
static inline struct device *port_to_host(struct cxl_port *port)
{
struct cxl_port *parent = is_cxl_root(port) ? NULL :
to_cxl_port(port->dev.parent);
/*
* The host of CXL root port and the first level of ports is
* the platform firmware device, the host of all other ports
* is their parent port.
*/
if (!parent)
return port->uport_dev;
else if (is_cxl_root(parent))
return parent->uport_dev;
else
return &parent->dev;
}
static inline struct device *dport_to_host(struct cxl_dport *dport)
{
struct cxl_port *port = dport->port;

View file

@ -904,7 +904,7 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return;
if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags))
if (cxld->flags & CXL_DECODER_F_LOCK)
return;
if (port->commit_end == id)

View file

@ -311,6 +311,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
* cxl_payload_from_user_allowed() - Check contents of in_payload.
* @opcode: The mailbox command opcode.
* @payload_in: Pointer to the input payload passed in from user space.
* @in_size: Size of @payload_in in bytes.
*
* Return:
* * true - payload_in passes check for @opcode.
@ -325,12 +326,15 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
*
* The specific checks are determined by the opcode.
*/
static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in,
size_t in_size)
{
switch (opcode) {
case CXL_MBOX_OP_SET_PARTITION_INFO: {
struct cxl_mbox_set_partition_info *pi = payload_in;
if (in_size < sizeof(*pi))
return false;
if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
return false;
break;
@ -338,6 +342,8 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
case CXL_MBOX_OP_CLEAR_LOG: {
const uuid_t *uuid = (uuid_t *)payload_in;
if (in_size < sizeof(uuid_t))
return false;
/*
* Restrict the Clear log action to only apply to
* Vendor debug logs.
@ -365,7 +371,8 @@ static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd,
if (IS_ERR(mbox_cmd->payload_in))
return PTR_ERR(mbox_cmd->payload_in);
if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) {
if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in,
in_size)) {
dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n",
cxl_mem_opcode_to_name(opcode));
kvfree(mbox_cmd->payload_in);

View file

@ -1089,10 +1089,8 @@ static int cxlmd_add(struct cxl_memdev *cxlmd, struct cxl_dev_state *cxlds)
DEFINE_FREE(put_cxlmd, struct cxl_memdev *,
if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev))
static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd)
static bool cxl_memdev_attach_failed(struct cxl_memdev *cxlmd)
{
int rc;
/*
* If @attach is provided fail if the driver is not attached upon
* return. Note that failure here could be the result of a race to
@ -1100,7 +1098,14 @@ static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd)
* succeeded and then cxl_mem unbound before the lock is acquired.
*/
guard(device)(&cxlmd->dev);
if (cxlmd->attach && !cxlmd->dev.driver) {
return (cxlmd->attach && !cxlmd->dev.driver);
}
static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd)
{
int rc;
if (cxl_memdev_attach_failed(cxlmd)) {
cxl_memdev_unregister(cxlmd);
return ERR_PTR(-ENXIO);
}

View file

@ -115,15 +115,17 @@ static void unregister_nvb(void *_cxl_nvb)
device_unregister(&cxl_nvb->dev);
}
/**
* devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
* @host: platform firmware root device
* @port: CXL port at the root of a CXL topology
*
* Return: bridge device that can host cxl_nvdimm objects
*/
struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port)
static bool cxl_nvdimm_bridge_failed_attach(struct cxl_nvdimm_bridge *cxl_nvb)
{
struct device *dev = &cxl_nvb->dev;
guard(device)(dev);
/* If the device has no driver, then it failed to attach. */
return dev->driver == NULL;
}
struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port)
{
struct cxl_nvdimm_bridge *cxl_nvb;
struct device *dev;
@ -145,6 +147,11 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
if (rc)
goto err;
if (cxl_nvdimm_bridge_failed_attach(cxl_nvb)) {
unregister_nvb(cxl_nvb);
return ERR_PTR(-ENODEV);
}
rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
if (rc)
return ERR_PTR(rc);
@ -155,7 +162,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL");
EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_nvdimm_bridge, "cxl_pmem");
static void cxl_nvdimm_release(struct device *dev)
{
@ -255,6 +262,21 @@ int devm_cxl_add_nvdimm(struct device *host, struct cxl_port *port,
if (!cxl_nvb)
return -ENODEV;
/*
* Take the uport_dev lock to guard against race of nvdimm_bus object.
* cxl_acpi_probe() registers the nvdimm_bus and is done under the
* root port uport_dev lock.
*
* Take the cxl_nvb device lock to ensure that cxl_nvb driver is in a
* consistent state. And the driver registers nvdimm_bus.
*/
guard(device)(cxl_nvb->port->uport_dev);
guard(device)(&cxl_nvb->dev);
if (!cxl_nvb->nvdimm_bus) {
rc = -ENODEV;
goto err_alloc;
}
cxl_nvd = cxl_nvdimm_alloc(cxl_nvb, cxlmd);
if (IS_ERR(cxl_nvd)) {
rc = PTR_ERR(cxl_nvd);

View file

@ -615,22 +615,8 @@ struct cxl_port *parent_port_of(struct cxl_port *port)
static void unregister_port(void *_port)
{
struct cxl_port *port = _port;
struct cxl_port *parent = parent_port_of(port);
struct device *lock_dev;
/*
* CXL root port's and the first level of ports are unregistered
* under the platform firmware device lock, all other ports are
* unregistered while holding their parent port lock.
*/
if (!parent)
lock_dev = port->uport_dev;
else if (is_cxl_root(parent))
lock_dev = parent->uport_dev;
else
lock_dev = &parent->dev;
device_lock_assert(lock_dev);
device_lock_assert(port_to_host(port));
port->dead = true;
device_unregister(&port->dev);
}
@ -1427,20 +1413,11 @@ static struct device *grandparent(struct device *dev)
return NULL;
}
static struct device *endpoint_host(struct cxl_port *endpoint)
{
struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
if (is_cxl_root(port))
return port->uport_dev;
return &port->dev;
}
static void delete_endpoint(void *data)
{
struct cxl_memdev *cxlmd = data;
struct cxl_port *endpoint = cxlmd->endpoint;
struct device *host = endpoint_host(endpoint);
struct device *host = port_to_host(endpoint);
scoped_guard(device, host) {
if (host->driver && !endpoint->dead) {
@ -1456,7 +1433,7 @@ static void delete_endpoint(void *data)
int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
{
struct device *host = endpoint_host(endpoint);
struct device *host = port_to_host(endpoint);
struct device *dev = &cxlmd->dev;
get_device(host);
@ -1790,7 +1767,16 @@ static struct cxl_dport *find_or_add_dport(struct cxl_port *port,
{
struct cxl_dport *dport;
device_lock_assert(&port->dev);
/*
* The port is already visible in CXL hierarchy, but it may still
* be in the process of binding to the CXL port driver at this point.
*
* port creation and driver binding are protected by the port's host
* lock, so acquire the host lock here to ensure the port has completed
* driver binding before proceeding with dport addition.
*/
guard(device)(port_to_host(port));
guard(device)(&port->dev);
dport = cxl_find_dport_by_dev(port, dport_dev);
if (!dport) {
dport = probe_dport(port, dport_dev);
@ -1857,13 +1843,11 @@ retry:
* RP port enumerated by cxl_acpi without dport will
* have the dport added here.
*/
scoped_guard(device, &port->dev) {
dport = find_or_add_dport(port, dport_dev);
if (IS_ERR(dport)) {
if (PTR_ERR(dport) == -EAGAIN)
goto retry;
return PTR_ERR(dport);
}
dport = find_or_add_dport(port, dport_dev);
if (IS_ERR(dport)) {
if (PTR_ERR(dport) == -EAGAIN)
goto retry;
return PTR_ERR(dport);
}
rc = cxl_add_ep(dport, &cxlmd->dev);

View file

@ -1100,12 +1100,12 @@ static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
static void cxl_region_setup_flags(struct cxl_region *cxlr,
struct cxl_decoder *cxld)
{
if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags)) {
if (cxld->flags & CXL_DECODER_F_LOCK) {
set_bit(CXL_REGION_F_LOCK, &cxlr->flags);
clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
}
if (test_bit(CXL_DECODER_F_NORMALIZED_ADDRESSING, &cxld->flags))
if (cxld->flags & CXL_DECODER_F_NORMALIZED_ADDRESSING)
set_bit(CXL_REGION_F_NORMALIZED_ADDRESSING, &cxlr->flags);
}

View file

@ -574,11 +574,16 @@ struct cxl_nvdimm_bridge {
#define CXL_DEV_ID_LEN 19
enum {
CXL_NVD_F_INVALIDATED = 0,
};
struct cxl_nvdimm {
struct device dev;
struct cxl_memdev *cxlmd;
u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */
u64 dirty_shutdowns;
unsigned long flags;
};
struct cxl_pmem_region_mapping {
@ -920,6 +925,8 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv);
struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm_bridge *__devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_port *port,

View file

@ -13,6 +13,20 @@
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
/**
* devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
* @host: platform firmware root device
* @port: CXL port at the root of a CXL topology
*
* Return: bridge device that can host cxl_nvdimm objects
*/
struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port)
{
return __devm_cxl_add_nvdimm_bridge(host, port);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, "CXL");
static void clear_exclusive(void *mds)
{
clear_exclusive_cxl_commands(mds, exclusive_cmds);
@ -129,6 +143,9 @@ static int cxl_nvdimm_probe(struct device *dev)
struct nvdimm *nvdimm;
int rc;
if (test_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags))
return -EBUSY;
set_exclusive_cxl_commands(mds, exclusive_cmds);
rc = devm_add_action_or_reset(dev, clear_exclusive, mds);
if (rc)
@ -309,8 +326,10 @@ static int detach_nvdimm(struct device *dev, void *data)
scoped_guard(device, dev) {
if (dev->driver) {
cxl_nvd = to_cxl_nvdimm(dev);
if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data)
if (cxl_nvd->cxlmd && cxl_nvd->cxlmd->cxl_nvb == data) {
release = true;
set_bit(CXL_NVD_F_INVALIDATED, &cxl_nvd->flags);
}
}
}
if (release)
@ -353,6 +372,7 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
.probe = cxl_nvdimm_bridge_probe,
.id = CXL_DEVICE_NVDIMM_BRIDGE,
.drv = {
.probe_type = PROBE_FORCE_SYNCHRONOUS,
.suppress_bind_attrs = true,
},
};