cxl/mem: Introduce cxl_memdev_attach for CXL-dependent operation

Unlike the cxl_pci class driver that opportunistically enables memory
expansion with no other dependent functionality, CXL accelerator drivers
have distinct PCIe-only and CXL-enhanced operation states. If CXL is
available some additional coherent memory/cache operations can be enabled,
otherwise traditional DMA+MMIO over PCIe/CXL.io is a fallback.

This constitutes a new mode of operation where the caller of
devm_cxl_add_memdev() wants to make a "go/no-go" decision about running
in CXL accelerated mode or falling back to PCIe-only operation. Part of
that decision making process likely also includes additional
CXL-acceleration-specific resource setup. Encapsulate both of those
requirements into 'struct cxl_memdev_attach' that provides a ->probe()
callback. The probe callback runs in cxl_mem_probe() context, after the
port topology is successfully attached for the given memdev. It supports
a contract where, upon successful return from devm_cxl_add_memdev(),
everything needed for CXL accelerated operation has been enabled.

Additionally the presence of @cxlmd->attach indicates that the accelerator
driver be detached when CXL operation ends. This conceptually makes a CXL
link loss event mirror a PCIe link loss event which results in triggering
the ->remove() callback of affected devices+drivers. A driver can re-attach
to recover back to PCIe-only operation. Live recovery, i.e. without a
->remove()/->probe() cycle, is left as a future consideration.

[ dj: Repalce with updated commit log from Dan ]

Cc: Smita Koralahalli <Smita.KoralahalliChannabasappa@amd.com>
Reviewed-by: Ben Cheatham <benjamin.cheatham@amd.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
Tested-by: Alejandro Lucero <alucerop@amd.com>
Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Link: https://patch.msgid.link/20251216005616.3090129-7-dan.j.williams@intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
This commit is contained in:
Dan Williams 2025-12-15 16:56:16 -08:00 committed by Dave Jiang
parent f2546eba53
commit 29317f8dc6
5 changed files with 57 additions and 12 deletions

View file

@ -641,14 +641,24 @@ static void detach_memdev(struct work_struct *work)
struct cxl_memdev *cxlmd;
cxlmd = container_of(work, typeof(*cxlmd), detach_work);
device_release_driver(&cxlmd->dev);
/*
* When the creator of @cxlmd sets ->attach it indicates CXL operation
* is required. In that case, @cxlmd detach escalates to parent device
* detach.
*/
if (cxlmd->attach)
device_release_driver(cxlmd->dev.parent);
else
device_release_driver(&cxlmd->dev);
put_device(&cxlmd->dev);
}
static struct lock_class_key cxl_memdev_key;
static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
const struct file_operations *fops)
const struct file_operations *fops,
const struct cxl_memdev_attach *attach)
{
struct cxl_memdev *cxlmd;
struct device *dev;
@ -664,6 +674,8 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
goto err;
cxlmd->id = rc;
cxlmd->depth = -1;
cxlmd->attach = attach;
cxlmd->endpoint = ERR_PTR(-ENXIO);
dev = &cxlmd->dev;
device_initialize(dev);
@ -1081,6 +1093,18 @@ static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd)
{
int rc;
/*
* If @attach is provided fail if the driver is not attached upon
* return. Note that failure here could be the result of a race to
* teardown the CXL port topology. I.e. cxl_mem_probe() could have
* succeeded and then cxl_mem unbound before the lock is acquired.
*/
guard(device)(&cxlmd->dev);
if (cxlmd->attach && !cxlmd->dev.driver) {
cxl_memdev_unregister(cxlmd);
return ERR_PTR(-ENXIO);
}
rc = devm_add_action_or_reset(cxlmd->cxlds->dev, cxl_memdev_unregister,
cxlmd);
if (rc)
@ -1093,13 +1117,14 @@ static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd)
* Core helper for devm_cxl_add_memdev() that wants to both create a device and
* assert to the caller that upon return cxl_mem::probe() has been invoked.
*/
struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
const struct cxl_memdev_attach *attach)
{
struct device *dev;
int rc;
struct cxl_memdev *cxlmd __free(put_cxlmd) =
cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
cxl_memdev_alloc(cxlds, &cxl_memdev_fops, attach);
if (IS_ERR(cxlmd))
return cxlmd;

View file

@ -34,6 +34,10 @@
(FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \
CXLMDEV_RESET_NEEDED_NOT)
struct cxl_memdev_attach {
int (*probe)(struct cxl_memdev *cxlmd);
};
/**
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
* @dev: driver core device object
@ -43,6 +47,7 @@
* @cxl_nvb: coordinate removal of @cxl_nvd if present
* @cxl_nvd: optional bridge to an nvdimm if the device supports pmem
* @endpoint: connection to the CXL port topology for this memory device
* @attach: creator of this memdev depends on CXL link attach to operate
* @id: id number of this memdev instance.
* @depth: endpoint port depth
* @scrub_cycle: current scrub cycle set for this device
@ -59,6 +64,7 @@ struct cxl_memdev {
struct cxl_nvdimm_bridge *cxl_nvb;
struct cxl_nvdimm *cxl_nvd;
struct cxl_port *endpoint;
const struct cxl_memdev_attach *attach;
int id;
int depth;
u8 scrub_cycle;
@ -95,8 +101,10 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
return is_cxl_memdev(port->uport_dev);
}
struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
const struct cxl_memdev_attach *attach);
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
const struct cxl_memdev_attach *attach);
int devm_cxl_sanitize_setup_notifier(struct device *host,
struct cxl_memdev *cxlmd);
struct cxl_memdev_state;

View file

@ -142,6 +142,12 @@ static int cxl_mem_probe(struct device *dev)
return rc;
}
if (cxlmd->attach) {
rc = cxlmd->attach->probe(cxlmd);
if (rc)
return rc;
}
rc = devm_cxl_memdev_edac_register(cxlmd);
if (rc)
dev_dbg(dev, "CXL memdev EDAC registration failed rc=%d\n", rc);
@ -166,17 +172,23 @@ static int cxl_mem_probe(struct device *dev)
/**
* devm_cxl_add_memdev - Add a CXL memory device
* @cxlds: CXL device state to associate with the memdev
* @attach: Caller depends on CXL topology attachment
*
* Upon return the device will have had a chance to attach to the
* cxl_mem driver, but may fail if the CXL topology is not ready
* (hardware CXL link down, or software platform CXL root not attached)
* cxl_mem driver, but may fail to attach if the CXL topology is not ready
* (hardware CXL link down, or software platform CXL root not attached).
*
* When @attach is NULL it indicates the caller wants the memdev to remain
* registered even if it does not immediately attach to the CXL hierarchy. When
* @attach is provided a cxl_mem_probe() failure leads to failure of this routine.
*
* The parent of the resulting device and the devm context for allocations is
* @cxlds->dev.
*/
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds,
const struct cxl_memdev_attach *attach)
{
return __devm_cxl_add_memdev(cxlds);
return __devm_cxl_add_memdev(cxlds, attach);
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, "CXL");

View file

@ -1006,7 +1006,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
dev_dbg(&pdev->dev, "No CXL Features discovered\n");
cxlmd = devm_cxl_add_memdev(cxlds);
cxlmd = devm_cxl_add_memdev(cxlds, NULL);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);

View file

@ -1767,7 +1767,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
cxl_mock_add_event_logs(&mdata->mes);
cxlmd = devm_cxl_add_memdev(cxlds);
cxlmd = devm_cxl_add_memdev(cxlds, NULL);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);