linux/drivers/accel/amdxdna/aie2_pm.c
Lizhi Hou 1aa82181a3 accel/amdxdna: Fix dead lock for suspend and resume
When an application issues a query IOCTL while auto suspend is running,
a deadlock can occur. The query path holds dev_lock and then calls
pm_runtime_resume_and_get(), which waits for the ongoing suspend to
complete. Meanwhile, the suspend callback attempts to acquire dev_lock
and blocks, resulting in a deadlock.

Fix this by releasing dev_lock before calling pm_runtime_resume_and_get()
and reacquiring it after the call completes. Also acquire dev_lock in the
resume callback to keep the locking consistent.

Fixes: 063db45183 ("accel/amdxdna: Enhance runtime power management")
Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
Link: https://patch.msgid.link/20260211204644.722758-1-lizhi.hou@amd.com
2026-02-23 09:24:17 -08:00

126 lines
2.5 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024, Advanced Micro Devices, Inc.
*/
#include <drm/amdxdna_accel.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include "aie2_pci.h"
#include "amdxdna_pci_drv.h"
#include "amdxdna_pm.h"
#define AIE2_CLK_GATING_ENABLE 1
#define AIE2_CLK_GATING_DISABLE 0
static int aie2_pm_set_clk_gating(struct amdxdna_dev_hdl *ndev, u32 val)
{
int ret;
ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, &val);
if (ret)
return ret;
ndev->clk_gating = val;
return 0;
}
int aie2_pm_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
{
int ret;
ret = amdxdna_pm_resume_get_locked(ndev->xdna);
if (ret)
return ret;
ret = ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
if (!ret)
ndev->dpm_level = dpm_level;
amdxdna_pm_suspend_put(ndev->xdna);
return ret;
}
int aie2_pm_init(struct amdxdna_dev_hdl *ndev)
{
int ret;
if (ndev->dev_status != AIE2_DEV_UNINIT) {
/* Resume device */
ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->dpm_level);
if (ret)
return ret;
ret = aie2_pm_set_clk_gating(ndev, ndev->clk_gating);
if (ret)
return ret;
return 0;
}
while (ndev->priv->dpm_clk_tbl[ndev->max_dpm_level].hclk)
ndev->max_dpm_level++;
ndev->max_dpm_level--;
ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->max_dpm_level);
if (ret)
return ret;
ndev->dpm_level = ndev->max_dpm_level;
ret = aie2_pm_set_clk_gating(ndev, AIE2_CLK_GATING_ENABLE);
if (ret)
return ret;
ndev->pw_mode = POWER_MODE_DEFAULT;
ndev->dft_dpm_level = ndev->max_dpm_level;
return 0;
}
int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target)
{
struct amdxdna_dev *xdna = ndev->xdna;
u32 clk_gating, dpm_level;
int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
if (ndev->pw_mode == target)
return 0;
switch (target) {
case POWER_MODE_TURBO:
if (ndev->hwctx_num) {
XDNA_ERR(xdna, "Can not set turbo when there is active hwctx");
return -EINVAL;
}
clk_gating = AIE2_CLK_GATING_DISABLE;
dpm_level = ndev->max_dpm_level;
break;
case POWER_MODE_HIGH:
clk_gating = AIE2_CLK_GATING_ENABLE;
dpm_level = ndev->max_dpm_level;
break;
case POWER_MODE_DEFAULT:
clk_gating = AIE2_CLK_GATING_ENABLE;
dpm_level = ndev->dft_dpm_level;
break;
default:
return -EOPNOTSUPP;
}
ret = aie2_pm_set_dpm(ndev, dpm_level);
if (ret)
return ret;
ret = aie2_pm_set_clk_gating(ndev, clk_gating);
if (ret)
return ret;
ndev->pw_mode = target;
return 0;
}