Another early drm-misc-fixes PR to revert the previous uapi fix sent in

drm-misc-fixes-2026-03-05, together with a UAF fix in TTM, an argument
 order fix for panthor, a fix for the firmware getting stuck on
 resource allocation error handling for amdxdna, and a few fixes for
 ethosu (size calculation and reference underflows, and a validation
 fix).
 -----BEGIN PGP SIGNATURE-----
 
 iJUEABMJAB0WIQTkHFbLp4ejekA/qfgnX84Zoj2+dgUCaap21wAKCRAnX84Zoj2+
 do4kAX0d4bmQIc9S315QMdXeTXU42vYEgpDVS6thHlU+Ga0YDP9lHkotHYtfge53
 6haCmNMBfA4cLVpW415JpW1ivfOdKo6Spo2jcdB1qt91K7p26HHDcOVnxjRKGhV+
 ssaHx41bEw==
 =H31C
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-fixes-2026-03-06' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

Another early drm-misc-fixes PR to revert the previous uapi fix sent in
drm-misc-fixes-2026-03-05, together with a UAF fix in TTM, an argument
order fix for panthor, a fix for the firmware getting stuck on
resource allocation error handling for amdxdna, and a few fixes for
ethosu (size calculation and reference underflows, and a validation
fix).

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patch.msgid.link/20260306-grumpy-pegasus-of-witchcraft-6bd2db@houat
This commit is contained in:
Dave Airlie 2026-03-06 19:40:23 +10:00
commit 431989960f
9 changed files with 156 additions and 118 deletions

View file

@ -293,13 +293,20 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
}
intr_reg = i2x.mb_head_ptr_reg + 4;
hwctx->priv->mbox_chann = xdna_mailbox_create_channel(ndev->mbox, &x2i, &i2x,
intr_reg, ret);
hwctx->priv->mbox_chann = xdna_mailbox_alloc_channel(ndev->mbox);
if (!hwctx->priv->mbox_chann) {
XDNA_ERR(xdna, "Not able to create channel");
ret = -EINVAL;
goto del_ctx_req;
}
ret = xdna_mailbox_start_channel(hwctx->priv->mbox_chann, &x2i, &i2x,
intr_reg, ret);
if (ret) {
XDNA_ERR(xdna, "Not able to create channel");
ret = -EINVAL;
goto free_channel;
}
ndev->hwctx_num++;
XDNA_DBG(xdna, "Mailbox channel irq: %d, msix_id: %d", ret, resp.msix_id);
@ -307,6 +314,8 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
return 0;
free_channel:
xdna_mailbox_free_channel(hwctx->priv->mbox_chann);
del_ctx_req:
aie2_destroy_context_req(ndev, hwctx->fw_ctx_id);
return ret;
@ -322,7 +331,7 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc
xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
ret = aie2_destroy_context_req(ndev, hwctx->fw_ctx_id);
xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
xdna_mailbox_free_channel(hwctx->priv->mbox_chann);
XDNA_DBG(xdna, "Destroyed fw ctx %d", hwctx->fw_ctx_id);
hwctx->priv->mbox_chann = NULL;
hwctx->fw_ctx_id = -1;
@ -921,7 +930,7 @@ void aie2_destroy_mgmt_chann(struct amdxdna_dev_hdl *ndev)
return;
xdna_mailbox_stop_channel(ndev->mgmt_chann);
xdna_mailbox_destroy_channel(ndev->mgmt_chann);
xdna_mailbox_free_channel(ndev->mgmt_chann);
ndev->mgmt_chann = NULL;
}

View file

@ -361,10 +361,29 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
}
pci_set_master(pdev);
mbox_res.ringbuf_base = ndev->sram_base;
mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
mbox_res.mbox_base = ndev->mbox_base;
mbox_res.mbox_size = MBOX_SIZE(ndev);
mbox_res.name = "xdna_mailbox";
ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
if (!ndev->mbox) {
XDNA_ERR(xdna, "failed to create mailbox device");
ret = -ENODEV;
goto disable_dev;
}
ndev->mgmt_chann = xdna_mailbox_alloc_channel(ndev->mbox);
if (!ndev->mgmt_chann) {
XDNA_ERR(xdna, "failed to alloc channel");
ret = -ENODEV;
goto disable_dev;
}
ret = aie2_smu_init(ndev);
if (ret) {
XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
goto disable_dev;
goto free_channel;
}
ret = aie2_psp_start(ndev->psp_hdl);
@ -379,18 +398,6 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
goto stop_psp;
}
mbox_res.ringbuf_base = ndev->sram_base;
mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
mbox_res.mbox_base = ndev->mbox_base;
mbox_res.mbox_size = MBOX_SIZE(ndev);
mbox_res.name = "xdna_mailbox";
ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
if (!ndev->mbox) {
XDNA_ERR(xdna, "failed to create mailbox device");
ret = -ENODEV;
goto stop_psp;
}
mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
if (mgmt_mb_irq < 0) {
ret = mgmt_mb_irq;
@ -399,13 +406,13 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
}
xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
ndev->mgmt_chann = xdna_mailbox_create_channel(ndev->mbox,
&ndev->mgmt_x2i,
&ndev->mgmt_i2x,
xdna_mailbox_intr_reg,
mgmt_mb_irq);
if (!ndev->mgmt_chann) {
XDNA_ERR(xdna, "failed to create management mailbox channel");
ret = xdna_mailbox_start_channel(ndev->mgmt_chann,
&ndev->mgmt_x2i,
&ndev->mgmt_i2x,
xdna_mailbox_intr_reg,
mgmt_mb_irq);
if (ret) {
XDNA_ERR(xdna, "failed to start management mailbox channel");
ret = -EINVAL;
goto stop_psp;
}
@ -413,37 +420,41 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
ret = aie2_mgmt_fw_init(ndev);
if (ret) {
XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
goto destroy_mgmt_chann;
goto stop_fw;
}
ret = aie2_pm_init(ndev);
if (ret) {
XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
goto destroy_mgmt_chann;
goto stop_fw;
}
ret = aie2_mgmt_fw_query(ndev);
if (ret) {
XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
goto destroy_mgmt_chann;
goto stop_fw;
}
ret = aie2_error_async_events_alloc(ndev);
if (ret) {
XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
goto destroy_mgmt_chann;
goto stop_fw;
}
ndev->dev_status = AIE2_DEV_START;
return 0;
destroy_mgmt_chann:
aie2_destroy_mgmt_chann(ndev);
stop_fw:
aie2_suspend_fw(ndev);
xdna_mailbox_stop_channel(ndev->mgmt_chann);
stop_psp:
aie2_psp_stop(ndev->psp_hdl);
fini_smu:
aie2_smu_fini(ndev);
free_channel:
xdna_mailbox_free_channel(ndev->mgmt_chann);
ndev->mgmt_chann = NULL;
disable_dev:
pci_disable_device(pdev);

View file

@ -460,26 +460,49 @@ msg_id_failed:
return ret;
}
struct mailbox_channel *
xdna_mailbox_create_channel(struct mailbox *mb,
const struct xdna_mailbox_chann_res *x2i,
const struct xdna_mailbox_chann_res *i2x,
u32 iohub_int_addr,
int mb_irq)
struct mailbox_channel *xdna_mailbox_alloc_channel(struct mailbox *mb)
{
struct mailbox_channel *mb_chann;
int ret;
if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
pr_err("Ring buf size must be power of 2");
return NULL;
}
mb_chann = kzalloc_obj(*mb_chann);
if (!mb_chann)
return NULL;
INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
if (!mb_chann->work_q) {
MB_ERR(mb_chann, "Create workqueue failed");
goto free_chann;
}
mb_chann->mb = mb;
return mb_chann;
free_chann:
kfree(mb_chann);
return NULL;
}
void xdna_mailbox_free_channel(struct mailbox_channel *mb_chann)
{
destroy_workqueue(mb_chann->work_q);
kfree(mb_chann);
}
int
xdna_mailbox_start_channel(struct mailbox_channel *mb_chann,
const struct xdna_mailbox_chann_res *x2i,
const struct xdna_mailbox_chann_res *i2x,
u32 iohub_int_addr,
int mb_irq)
{
int ret;
if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
pr_err("Ring buf size must be power of 2");
return -EINVAL;
}
mb_chann->msix_irq = mb_irq;
mb_chann->iohub_int_addr = iohub_int_addr;
memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
@ -489,61 +512,37 @@ xdna_mailbox_create_channel(struct mailbox *mb,
mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
if (!mb_chann->work_q) {
MB_ERR(mb_chann, "Create workqueue failed");
goto free_and_out;
}
/* Everything look good. Time to enable irq handler */
ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
if (ret) {
MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
goto destroy_wq;
return ret;
}
mb_chann->bad_state = false;
mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
return mb_chann;
destroy_wq:
destroy_workqueue(mb_chann->work_q);
free_and_out:
kfree(mb_chann);
return NULL;
}
int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
{
struct mailbox_msg *mb_msg;
unsigned long msg_id;
MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
free_irq(mb_chann->msix_irq, mb_chann);
destroy_workqueue(mb_chann->work_q);
/* We can clean up and release resources */
xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
mailbox_release_msg(mb_chann, mb_msg);
xa_destroy(&mb_chann->chan_xa);
MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
kfree(mb_chann);
MB_DBG(mb_chann, "Mailbox channel started (irq: %d)", mb_chann->msix_irq);
return 0;
}
void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
{
struct mailbox_msg *mb_msg;
unsigned long msg_id;
/* Disable an irq and wait. This might sleep. */
disable_irq(mb_chann->msix_irq);
free_irq(mb_chann->msix_irq, mb_chann);
/* Cancel RX work and wait for it to finish */
cancel_work_sync(&mb_chann->rx_work);
MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
drain_workqueue(mb_chann->work_q);
/* We can clean up and release resources */
xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
mailbox_release_msg(mb_chann, mb_msg);
xa_destroy(&mb_chann->chan_xa);
MB_DBG(mb_chann, "Mailbox channel stopped, irq: %d", mb_chann->msix_irq);
}
struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,

View file

@ -74,9 +74,16 @@ struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
const struct xdna_mailbox_res *res);
/*
* xdna_mailbox_create_channel() -- Create a mailbox channel instance
* xdna_mailbox_alloc_channel() -- alloc a mailbox channel
*
* @mailbox: the handle return from xdna_mailbox_create()
* @mb: mailbox handle
*/
struct mailbox_channel *xdna_mailbox_alloc_channel(struct mailbox *mb);
/*
* xdna_mailbox_start_channel() -- start a mailbox channel instance
*
* @mb_chann: the handle return from xdna_mailbox_alloc_channel()
* @x2i: host to firmware mailbox resources
* @i2x: firmware to host mailbox resources
* @xdna_mailbox_intr_reg: register addr of MSI-X interrupt
@ -84,28 +91,24 @@ struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
*
* Return: If success, return a handle of mailbox channel. Otherwise, return NULL.
*/
struct mailbox_channel *
xdna_mailbox_create_channel(struct mailbox *mailbox,
const struct xdna_mailbox_chann_res *x2i,
const struct xdna_mailbox_chann_res *i2x,
u32 xdna_mailbox_intr_reg,
int mb_irq);
int
xdna_mailbox_start_channel(struct mailbox_channel *mb_chann,
const struct xdna_mailbox_chann_res *x2i,
const struct xdna_mailbox_chann_res *i2x,
u32 xdna_mailbox_intr_reg,
int mb_irq);
/*
* xdna_mailbox_destroy_channel() -- destroy mailbox channel
* xdna_mailbox_free_channel() -- free mailbox channel
*
* @mailbox_chann: the handle return from xdna_mailbox_create_channel()
*
* Return: if success, return 0. otherwise return error code
*/
int xdna_mailbox_destroy_channel(struct mailbox_channel *mailbox_chann);
void xdna_mailbox_free_channel(struct mailbox_channel *mailbox_chann);
/*
* xdna_mailbox_stop_channel() -- stop mailbox channel
*
* @mailbox_chann: the handle return from xdna_mailbox_create_channel()
*
* Return: if success, return 0. otherwise return error code
*/
void xdna_mailbox_stop_channel(struct mailbox_channel *mailbox_chann);

View file

@ -245,11 +245,14 @@ static int calc_sizes(struct drm_device *ddev,
((st->ifm.stride_kernel >> 1) & 0x1) + 1;
u32 stride_x = ((st->ifm.stride_kernel >> 5) & 0x2) +
(st->ifm.stride_kernel & 0x1) + 1;
u32 ifm_height = st->ofm.height[2] * stride_y +
s32 ifm_height = st->ofm.height[2] * stride_y +
st->ifm.height[2] - (st->ifm.pad_top + st->ifm.pad_bottom);
u32 ifm_width = st->ofm.width * stride_x +
s32 ifm_width = st->ofm.width * stride_x +
st->ifm.width - (st->ifm.pad_left + st->ifm.pad_right);
if (ifm_height < 0 || ifm_width < 0)
return -EINVAL;
len = feat_matrix_length(info, &st->ifm, ifm_width,
ifm_height, st->ifm.depth);
dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n",
@ -417,7 +420,10 @@ static int ethosu_gem_cmdstream_copy_and_validate(struct drm_device *ddev,
return ret;
break;
case NPU_OP_ELEMENTWISE:
use_ifm2 = !((st.ifm2.broadcast == 8) || (param == 5) ||
use_scale = ethosu_is_u65(edev) ?
(st.ifm2.broadcast & 0x80) :
(st.ifm2.broadcast == 8);
use_ifm2 = !(use_scale || (param == 5) ||
(param == 6) || (param == 7) || (param == 0x24));
use_ifm = st.ifm.broadcast != 8;
ret = calc_sizes_elemwise(ddev, info, cmd, &st, use_ifm, use_ifm2);

View file

@ -143,17 +143,10 @@ out:
return ret;
}
static void ethosu_job_cleanup(struct kref *ref)
static void ethosu_job_err_cleanup(struct ethosu_job *job)
{
struct ethosu_job *job = container_of(ref, struct ethosu_job,
refcount);
unsigned int i;
pm_runtime_put_autosuspend(job->dev->base.dev);
dma_fence_put(job->done_fence);
dma_fence_put(job->inference_done_fence);
for (i = 0; i < job->region_cnt; i++)
drm_gem_object_put(job->region_bo[i]);
@ -162,6 +155,19 @@ static void ethosu_job_cleanup(struct kref *ref)
kfree(job);
}
static void ethosu_job_cleanup(struct kref *ref)
{
struct ethosu_job *job = container_of(ref, struct ethosu_job,
refcount);
pm_runtime_put_autosuspend(job->dev->base.dev);
dma_fence_put(job->done_fence);
dma_fence_put(job->inference_done_fence);
ethosu_job_err_cleanup(job);
}
static void ethosu_job_put(struct ethosu_job *job)
{
kref_put(&job->refcount, ethosu_job_cleanup);
@ -454,12 +460,16 @@ static int ethosu_ioctl_submit_job(struct drm_device *dev, struct drm_file *file
}
}
ret = ethosu_job_push(ejob);
if (!ret) {
ethosu_job_put(ejob);
return 0;
}
out_cleanup_job:
if (ret)
drm_sched_job_cleanup(&ejob->base);
out_put_job:
ethosu_job_put(ejob);
ethosu_job_err_cleanup(ejob);
return ret;
}

View file

@ -875,7 +875,7 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
return drm_syncobj_export_sync_file(file_private, args->handle,
point, &args->fd);
if (point)
if (args->point)
return -EINVAL;
return drm_syncobj_handle_to_fd(file_private, args->handle,
@ -909,7 +909,7 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
args->handle,
point);
if (point)
if (args->point)
return -EINVAL;
return drm_syncobj_fd_to_handle(file_private, args->fd,

View file

@ -893,14 +893,15 @@ panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue
out_sync:
/* Make sure the CPU caches are invalidated before the seqno is read.
* drm_gem_shmem_sync() is a NOP if map_wc=true, so no need to check
* panthor_gem_sync() is a NOP if map_wc=true, so no need to check
* it here.
*/
panthor_gem_sync(&bo->base.base, queue->syncwait.offset,
panthor_gem_sync(&bo->base.base,
DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE,
queue->syncwait.offset,
queue->syncwait.sync64 ?
sizeof(struct panthor_syncobj_64b) :
sizeof(struct panthor_syncobj_32b),
DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE);
sizeof(struct panthor_syncobj_32b));
return queue->syncwait.kmap + queue->syncwait.offset;

View file

@ -1107,8 +1107,7 @@ struct ttm_bo_swapout_walk {
static s64
ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
struct ttm_resource *res = bo->resource;
struct ttm_place place = { .mem_type = res->mem_type };
struct ttm_place place = { .mem_type = bo->resource->mem_type };
struct ttm_bo_swapout_walk *swapout_walk =
container_of(walk, typeof(*swapout_walk), walk);
struct ttm_operation_ctx *ctx = walk->arg.ctx;
@ -1148,7 +1147,7 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
/*
* Move to system cached
*/
if (res->mem_type != TTM_PL_SYSTEM) {
if (bo->resource->mem_type != TTM_PL_SYSTEM) {
struct ttm_resource *evict_mem;
struct ttm_place hop;
@ -1180,15 +1179,15 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
if (ttm_tt_is_populated(tt)) {
spin_lock(&bdev->lru_lock);
ttm_resource_del_bulk_move(res, bo);
ttm_resource_del_bulk_move(bo->resource, bo);
spin_unlock(&bdev->lru_lock);
ret = ttm_tt_swapout(bdev, tt, swapout_walk->gfp_flags);
spin_lock(&bdev->lru_lock);
if (ret)
ttm_resource_add_bulk_move(res, bo);
ttm_resource_move_to_lru_tail(res);
ttm_resource_add_bulk_move(bo->resource, bo);
ttm_resource_move_to_lru_tail(bo->resource);
spin_unlock(&bdev->lru_lock);
}