mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:04:51 +01:00
amd-drm-next-6.19-2025-10-29:
amdgpu: - VPE idle handler fix - Re-enable DM idle optimizations - DCN3.0 fix - SMU fix - Powerplay fixes for fiji/iceland - License copy-pasta fixes - HDP eDP panel fix - Vblank fix - RAS fixes - SR-IOV updates - SMU 13 VCN reset fix - DMUB fixes - DC frame limit fix - Additional DC underflow logging - DCN 3.1.5 fixes - DC Analog encoders support - Enable DC on bonaire by default - UserQ fixes - Remove redundant pm_runtime_mark_last_busy() calls amdkfd: - Process cleanup fix - Misc fixes radeon: - devm migration fixes - Remove redundant pm_runtime_mark_last_busy() calls UAPI - Add ABM KMS property Proposed kwin changes: https://invent.kde.org/plasma/kwin/-/merge_requests/6028 -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCaQJ+0gAKCRC93/aFa7yZ 2Nk+AQCHTFT+1Tnte1dV3BhmHKtChER0EmSmcct/fPYthfUmwwD/SjWpFlyaUCeq OQzJtocc6Af4DAXS+xDPpkiFO6tjRwU= =8VBA -----END PGP SIGNATURE----- Merge tag 'amd-drm-next-6.19-2025-10-29' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-next-6.19-2025-10-29: amdgpu: - VPE idle handler fix - Re-enable DM idle optimizations - DCN3.0 fix - SMU fix - Powerplay fixes for fiji/iceland - License copy-pasta fixes - HDP eDP panel fix - Vblank fix - RAS fixes - SR-IOV updates - SMU 13 VCN reset fix - DMUB fixes - DC frame limit fix - Additional DC underflow logging - DCN 3.1.5 fixes - DC Analog encoders support - Enable DC on bonaire by default - UserQ fixes - Remove redundant pm_runtime_mark_last_busy() calls amdkfd: - Process cleanup fix - Misc fixes radeon: - devm migration fixes - Remove redundant pm_runtime_mark_last_busy() calls UAPI - Add ABM KMS property Proposed kwin changes: https://invent.kde.org/plasma/kwin/-/merge_requests/6028 Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patch.msgid.link/20251029205713.9480-1-alexander.deucher@amd.com
This commit is contained in:
commit
f67d54e96b
256 changed files with 3546 additions and 889 deletions
|
|
@ -1176,6 +1176,12 @@ struct amdgpu_device {
|
|||
* queue fence.
|
||||
*/
|
||||
struct xarray userq_xa;
|
||||
/**
|
||||
* @userq_doorbell_xa: Global user queue map (doorbell index → queue)
|
||||
* Key: doorbell_index (unique global identifier for the queue)
|
||||
* Value: struct amdgpu_usermode_queue
|
||||
*/
|
||||
struct xarray userq_doorbell_xa;
|
||||
|
||||
/* df */
|
||||
struct amdgpu_df df;
|
||||
|
|
@ -1309,8 +1315,6 @@ struct amdgpu_device {
|
|||
*/
|
||||
bool apu_prefer_gtt;
|
||||
|
||||
struct list_head userq_mgr_list;
|
||||
struct mutex userq_mutex;
|
||||
bool userq_halt_for_enforce_isolation;
|
||||
struct amdgpu_uid *uid_info;
|
||||
|
||||
|
|
|
|||
|
|
@ -507,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
|||
pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
/* Just fire off a uevent and let userspace tell us what to do */
|
||||
drm_helper_hpd_irq_event(adev_to_drm(adev));
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
|
|||
{
|
||||
uint8_t __iomem *bios = NULL;
|
||||
resource_size_t vram_base;
|
||||
resource_size_t size = 256 * 1024; /* ??? */
|
||||
u32 size = 256U * 1024U; /* ??? */
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU))
|
||||
if (amdgpu_device_need_post(adev))
|
||||
|
|
@ -126,7 +126,7 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
|
|||
*/
|
||||
if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
|
||||
if (amdgpu_virt_get_dynamic_data_info(adev,
|
||||
AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, (uint64_t *)&size)) {
|
||||
AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) {
|
||||
amdgpu_bios_release(adev);
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -734,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
|
|||
|
||||
amdgpu_connector_update_scratch_regs(connector, ret);
|
||||
|
||||
if (!drm_kms_helper_is_poll_worker()) {
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
if (!drm_kms_helper_is_poll_worker())
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -919,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
|
|||
amdgpu_connector_update_scratch_regs(connector, ret);
|
||||
|
||||
out:
|
||||
if (!drm_kms_helper_is_poll_worker()) {
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
if (!drm_kms_helper_is_poll_worker())
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -1146,10 +1142,8 @@ out:
|
|||
amdgpu_connector_update_scratch_regs(connector, ret);
|
||||
|
||||
exit:
|
||||
if (!drm_kms_helper_is_poll_worker()) {
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
if (!drm_kms_helper_is_poll_worker())
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -1486,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
|
|||
|
||||
amdgpu_connector_update_scratch_regs(connector, ret);
|
||||
out:
|
||||
if (!drm_kms_helper_is_poll_worker()) {
|
||||
pm_runtime_mark_last_busy(connector->dev->dev);
|
||||
if (!drm_kms_helper_is_poll_worker())
|
||||
pm_runtime_put_autosuspend(connector->dev->dev);
|
||||
}
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2025 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2025 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
|
|||
if (use_bank) {
|
||||
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
||||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return -EINVAL;
|
||||
|
|
@ -179,7 +178,6 @@ end:
|
|||
if (pm_pg_lock)
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
|
|
@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
|
|||
if (rd->id.use_grbm) {
|
||||
if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
|
||||
(rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
mutex_unlock(&rd->lock);
|
||||
|
|
@ -310,7 +307,6 @@ end:
|
|||
|
||||
mutex_unlock(&rd->lock);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
|
|
@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
|
|||
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
if (!x) {
|
||||
|
|
@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
|
|
@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
|
|
@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
|
|
@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
|
|
@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
|
|
@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
amdgpu_virt_disable_access_debugfs(adev);
|
||||
return r;
|
||||
|
|
@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
|||
|
||||
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
if (r) {
|
||||
|
|
@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
|
|||
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
if (!x) {
|
||||
|
|
@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
|
|||
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
while (size) {
|
||||
|
|
@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
|
|
@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
|
|
@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
|
|
@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
|
|
@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
|
|
@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu
|
|||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
|
|
@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
|
|||
|
||||
up_write(&adev->reset_domain->sem);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
return 0;
|
||||
|
|
@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
|
|||
|
||||
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
return 0;
|
||||
|
|
@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
|
|||
|
||||
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
return 0;
|
||||
|
|
@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val)
|
|||
|
||||
r = amdgpu_benchmark(adev, val);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
return r;
|
||||
|
|
@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
|
|||
ret = -EINVAL;
|
||||
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -4215,7 +4215,6 @@ bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
|
|||
#else
|
||||
return false;
|
||||
#endif
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_KAVERI:
|
||||
case CHIP_KABINI:
|
||||
case CHIP_MULLINS:
|
||||
|
|
@ -4558,7 +4557,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
mutex_init(&adev->gfx.userq_sch_mutex);
|
||||
mutex_init(&adev->gfx.workload_profile_mutex);
|
||||
mutex_init(&adev->vcn.workload_profile_mutex);
|
||||
mutex_init(&adev->userq_mutex);
|
||||
|
||||
amdgpu_device_init_apu_flags(adev);
|
||||
|
||||
|
|
@ -4586,7 +4584,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||
|
||||
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
|
||||
|
||||
INIT_LIST_HEAD(&adev->userq_mgr_list);
|
||||
xa_init(&adev->userq_doorbell_xa);
|
||||
|
||||
INIT_DELAYED_WORK(&adev->delayed_init_work,
|
||||
amdgpu_device_delayed_init_work_handler);
|
||||
|
|
|
|||
|
|
@ -311,7 +311,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
|
|||
*/
|
||||
if (amdgpu_virt_get_dynamic_data_info(adev,
|
||||
AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
|
||||
(uint64_t *)&adev->discovery.size)) {
|
||||
&adev->discovery.size)) {
|
||||
dev_err(adev->dev,
|
||||
"failed to read discovery info from dynamic critical region.");
|
||||
ret = -EINVAL;
|
||||
|
|
|
|||
|
|
@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
|
|||
if (crtc->enabled)
|
||||
active = true;
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
|
||||
adev = drm_to_adev(dev);
|
||||
/* if we have active crtcs and we don't have a power ref,
|
||||
* take the current one
|
||||
|
|
@ -1365,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
|
|||
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
|
||||
};
|
||||
|
||||
/**
|
||||
* DOC: property for adaptive backlight modulation
|
||||
*
|
||||
* The 'adaptive backlight modulation' property is used for the compositor to
|
||||
* directly control the adaptive backlight modulation power savings feature
|
||||
* that is part of DCN hardware.
|
||||
*
|
||||
* The property will be attached specifically to eDP panels that support it.
|
||||
*
|
||||
* The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings'
|
||||
* to be able to control it.
|
||||
* If set to 'off' the compositor will ensure it stays off.
|
||||
* The other values 'min', 'bias min', 'bias max', and 'max' will control the
|
||||
* intensity of the power savings.
|
||||
*
|
||||
* Modifying this value can have implications on color accuracy, so tread
|
||||
* carefully.
|
||||
*/
|
||||
static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct drm_prop_enum_list props[] = {
|
||||
{ ABM_SYSFS_CONTROL, "sysfs" },
|
||||
{ ABM_LEVEL_OFF, "off" },
|
||||
{ ABM_LEVEL_MIN, "min" },
|
||||
{ ABM_LEVEL_BIAS_MIN, "bias min" },
|
||||
{ ABM_LEVEL_BIAS_MAX, "bias max" },
|
||||
{ ABM_LEVEL_MAX, "max" },
|
||||
};
|
||||
struct drm_property *prop;
|
||||
int i;
|
||||
|
||||
if (!adev->dc_enabled)
|
||||
return 0;
|
||||
|
||||
prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM,
|
||||
"adaptive backlight modulation",
|
||||
6);
|
||||
if (!prop)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(props); i++) {
|
||||
int ret;
|
||||
|
||||
ret = drm_property_add_enum(prop, props[i].type,
|
||||
props[i].name);
|
||||
|
||||
if (ret) {
|
||||
drm_property_destroy(adev_to_drm(adev), prop);
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
adev->mode_info.abm_level_property = prop;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
|
||||
{
|
||||
int sz;
|
||||
|
|
@ -1411,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
|
|||
"dither",
|
||||
amdgpu_dither_enum_list, sz);
|
||||
|
||||
return 0;
|
||||
return amdgpu_display_setup_abm_prop(adev);
|
||||
}
|
||||
|
||||
void amdgpu_display_update_priority(struct amdgpu_device *adev)
|
||||
|
|
|
|||
|
|
@ -55,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev);
|
|||
int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
|
||||
struct drm_scanout_buffer *sb);
|
||||
|
||||
#define ABM_SYSFS_CONTROL -1
|
||||
#define ABM_LEVEL_OFF 0
|
||||
#define ABM_LEVEL_MIN 1
|
||||
#define ABM_LEVEL_BIAS_MIN 2
|
||||
#define ABM_LEVEL_BIAS_MAX 3
|
||||
#define ABM_LEVEL_MAX 4
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -2228,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
|
|||
adev->pdev->bus->number, i);
|
||||
if (p) {
|
||||
pm_runtime_get_sync(&p->dev);
|
||||
pm_runtime_mark_last_busy(&p->dev);
|
||||
pm_runtime_put_autosuspend(&p->dev);
|
||||
pci_dev_put(p);
|
||||
}
|
||||
|
|
@ -2474,7 +2473,6 @@ retry_init:
|
|||
|
||||
pm_runtime_allow(ddev->dev);
|
||||
|
||||
pm_runtime_mark_last_busy(ddev->dev);
|
||||
pm_runtime_put_autosuspend(ddev->dev);
|
||||
|
||||
pci_wake_from_d3(pdev, TRUE);
|
||||
|
|
@ -2772,22 +2770,8 @@ static int amdgpu_runtime_idle_check_userq(struct device *dev)
|
|||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
struct amdgpu_userq_mgr *uqm, *tmp;
|
||||
int queue_id;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&adev->userq_mutex);
|
||||
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
|
||||
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
done:
|
||||
mutex_unlock(&adev->userq_mutex);
|
||||
|
||||
return ret;
|
||||
return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
|
|
@ -2934,7 +2918,6 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
|
|||
|
||||
ret = amdgpu_runtime_idle_check_userq(dev);
|
||||
done:
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_autosuspend(dev);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -2970,7 +2953,6 @@ long amdgpu_drm_ioctl(struct file *filp,
|
|||
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
out:
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -250,7 +250,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
|
|||
drv->signalled_wptr = am_fence->wptr;
|
||||
dma_fence_signal(fence);
|
||||
dma_fence_put(fence);
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
} while (last_seq != seq);
|
||||
|
||||
|
|
@ -928,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val)
|
|||
|
||||
*val = atomic_read(&adev->reset_domain->reset_res);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -1670,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
|
|||
|
||||
ret = amdgpu_gfx_run_cleaner_shader(adev, value);
|
||||
|
||||
pm_runtime_mark_last_busy(ddev->dev);
|
||||
pm_runtime_put_autosuspend(ddev->dev);
|
||||
|
||||
if (ret)
|
||||
|
|
|
|||
|
|
@ -221,6 +221,7 @@ retry:
|
|||
|
||||
out_free_pfns:
|
||||
kvfree(pfns);
|
||||
hmm_range->hmm_pfns = NULL;
|
||||
out_free_range:
|
||||
if (r == -EBUSY)
|
||||
r = -EAGAIN;
|
||||
|
|
@ -286,7 +287,9 @@ void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
|
|||
if (!range)
|
||||
return;
|
||||
|
||||
kvfree(range->hmm_range.hmm_pfns);
|
||||
if (range->hmm_range.hmm_pfns)
|
||||
kvfree(range->hmm_range.hmm_pfns);
|
||||
|
||||
amdgpu_bo_unref(&range->bo);
|
||||
kfree(range);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1471,7 +1471,6 @@ error_pasid:
|
|||
kfree(fpriv);
|
||||
|
||||
out_suspend:
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_put:
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
|
|
@ -1539,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||
kfree(fpriv);
|
||||
file_priv->driver_priv = NULL;
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -326,6 +326,8 @@ struct amdgpu_mode_info {
|
|||
struct drm_property *audio_property;
|
||||
/* FMT dithering */
|
||||
struct drm_property *dither_property;
|
||||
/* Adaptive Backlight Modulation (power feature) */
|
||||
struct drm_property *abm_level_property;
|
||||
/* hardcoded DFP edid from BIOS */
|
||||
const struct drm_edid *bios_hardcoded_edid;
|
||||
|
||||
|
|
|
|||
|
|
@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
|
|||
}
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
return size;
|
||||
|
|
|
|||
|
|
@ -612,6 +612,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
|
|||
return size;
|
||||
}
|
||||
|
||||
static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
|
||||
|
||||
/**
|
||||
* DOC: AMDGPU RAS debugfs EEPROM table reset interface
|
||||
*
|
||||
|
|
@ -636,6 +638,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
|
|||
(struct amdgpu_device *)file_inode(f)->i_private;
|
||||
int ret;
|
||||
|
||||
if (amdgpu_uniras_enabled(adev)) {
|
||||
ret = amdgpu_uniras_clear_badpages_info(adev);
|
||||
return ret ? ret : size;
|
||||
}
|
||||
|
||||
ret = amdgpu_ras_eeprom_reset_table(
|
||||
&(amdgpu_ras_get_context(adev)->eeprom_control));
|
||||
|
||||
|
|
@ -1543,6 +1550,21 @@ out_fini_err_data:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
|
||||
{
|
||||
struct ras_cmd_dev_handle req = {0};
|
||||
int ret;
|
||||
|
||||
ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
|
||||
&req, sizeof(req), NULL, 0);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
|
||||
struct ras_query_if *info)
|
||||
{
|
||||
|
|
@ -1928,12 +1950,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
|
|||
return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
|
||||
}
|
||||
|
||||
static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
|
||||
u32 *minor, u32 *rev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
|
||||
*major = adev->ip_blocks[i].version->major;
|
||||
*minor = adev->ip_blocks[i].version->minor;
|
||||
*rev = adev->ip_blocks[i].version->rev;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct amdgpu_ras *con =
|
||||
container_of(attr, struct amdgpu_ras, version_attr);
|
||||
return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
|
||||
u32 major, minor, rev;
|
||||
ssize_t size = 0;
|
||||
|
||||
size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
|
||||
con->eeprom_control.tbl_hdr.version);
|
||||
|
||||
if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
|
||||
size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
|
||||
major, minor, rev);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
|
||||
|
|
@ -4099,7 +4151,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
|
|||
atomic_set(&con->ras_ue_count, ue_count);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
Out:
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
|
|||
dev_err(adev->dev, "Invalid input: %s\n", str);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
return size;
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@
|
|||
#include "amdgpu_vm.h"
|
||||
#include "amdgpu_userq.h"
|
||||
#include "amdgpu_hmm.h"
|
||||
#include "amdgpu_reset.h"
|
||||
#include "amdgpu_userq_fence.h"
|
||||
|
||||
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
|
||||
|
|
@ -159,9 +160,9 @@ static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
|
|||
r = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
|
||||
dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
|
||||
queue, va_cursor->gpu_addr);
|
||||
amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
|
||||
}
|
||||
err:
|
||||
amdgpu_bo_unreserve(queue->vm->root.bo);
|
||||
|
|
@ -278,19 +279,27 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
|
|||
struct amdgpu_device *adev = uq_mgr->adev;
|
||||
const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
|
||||
|
||||
/* Wait for mode-1 reset to complete */
|
||||
down_read(&adev->reset_domain->sem);
|
||||
|
||||
/* Drop the userq reference. */
|
||||
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
|
||||
uq_funcs->mqd_destroy(uq_mgr, queue);
|
||||
amdgpu_userq_fence_driver_free(queue);
|
||||
idr_remove(&uq_mgr->userq_idr, queue_id);
|
||||
/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
|
||||
xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
|
||||
xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
|
||||
queue->userq_mgr = NULL;
|
||||
list_del(&queue->userq_va_list);
|
||||
kfree(queue);
|
||||
|
||||
up_read(&adev->reset_domain->sem);
|
||||
}
|
||||
|
||||
static struct amdgpu_usermode_queue *
|
||||
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
|
||||
{
|
||||
return idr_find(&uq_mgr->userq_idr, qid);
|
||||
return xa_load(&uq_mgr->userq_mgr_xa, qid);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -479,7 +488,6 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
|
|||
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
|
||||
mutex_unlock(&uq_mgr->userq_mutex);
|
||||
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
|
|
@ -551,8 +559,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
|||
struct amdgpu_db_info db_info;
|
||||
char *queue_name;
|
||||
bool skip_map_queue;
|
||||
u32 qid;
|
||||
uint64_t index;
|
||||
int qid, r = 0;
|
||||
int r = 0;
|
||||
int priority =
|
||||
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
|
||||
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
|
||||
|
|
@ -575,7 +584,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
|||
*
|
||||
* This will also make sure we have a valid eviction fence ready to be used.
|
||||
*/
|
||||
mutex_lock(&adev->userq_mutex);
|
||||
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
|
||||
|
||||
uq_funcs = adev->userq_funcs[args->in.ip_type];
|
||||
|
|
@ -638,15 +646,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
|
||||
if (qid < 0) {
|
||||
/* Wait for mode-1 reset to complete */
|
||||
down_read(&adev->reset_domain->sem);
|
||||
r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
|
||||
if (r) {
|
||||
kfree(queue);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
|
||||
if (r) {
|
||||
drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
|
||||
amdgpu_userq_fence_driver_free(queue);
|
||||
uq_funcs->mqd_destroy(uq_mgr, queue);
|
||||
kfree(queue);
|
||||
r = -ENOMEM;
|
||||
up_read(&adev->reset_domain->sem);
|
||||
goto unlock;
|
||||
}
|
||||
up_read(&adev->reset_domain->sem);
|
||||
queue->userq_mgr = uq_mgr;
|
||||
|
||||
/* don't map the queue if scheduling is halted */
|
||||
if (adev->userq_halt_for_enforce_isolation &&
|
||||
|
|
@ -659,7 +679,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
|||
r = amdgpu_userq_map_helper(uq_mgr, queue);
|
||||
if (r) {
|
||||
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
|
||||
idr_remove(&uq_mgr->userq_idr, qid);
|
||||
xa_erase(&uq_mgr->userq_mgr_xa, qid);
|
||||
amdgpu_userq_fence_driver_free(queue);
|
||||
uq_funcs->mqd_destroy(uq_mgr, queue);
|
||||
kfree(queue);
|
||||
|
|
@ -684,7 +704,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
|||
|
||||
unlock:
|
||||
mutex_unlock(&uq_mgr->userq_mutex);
|
||||
mutex_unlock(&adev->userq_mutex);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
|
@ -782,11 +801,11 @@ static int
|
|||
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
|
||||
{
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
int queue_id;
|
||||
unsigned long queue_id;
|
||||
int ret = 0, r;
|
||||
|
||||
/* Resume all the queues for this process */
|
||||
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
|
||||
xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
|
||||
|
||||
if (!amdgpu_userq_buffer_vas_mapped(queue)) {
|
||||
drm_file_err(uq_mgr->file,
|
||||
|
|
@ -1023,11 +1042,11 @@ static int
|
|||
amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
|
||||
{
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
int queue_id;
|
||||
unsigned long queue_id;
|
||||
int ret = 0, r;
|
||||
|
||||
/* Try to unmap all the queues in this process ctx */
|
||||
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
|
||||
xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
|
||||
r = amdgpu_userq_preempt_helper(uq_mgr, queue);
|
||||
if (r)
|
||||
ret = r;
|
||||
|
|
@ -1042,9 +1061,10 @@ static int
|
|||
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
|
||||
{
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
int queue_id, ret;
|
||||
unsigned long queue_id;
|
||||
int ret;
|
||||
|
||||
idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
|
||||
xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
|
||||
struct dma_fence *f = queue->last_fence;
|
||||
|
||||
if (!f || dma_fence_is_signaled(f))
|
||||
|
|
@ -1097,44 +1117,30 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f
|
|||
struct amdgpu_device *adev)
|
||||
{
|
||||
mutex_init(&userq_mgr->userq_mutex);
|
||||
idr_init_base(&userq_mgr->userq_idr, 1);
|
||||
xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
|
||||
userq_mgr->adev = adev;
|
||||
userq_mgr->file = file_priv;
|
||||
|
||||
mutex_lock(&adev->userq_mutex);
|
||||
list_add(&userq_mgr->list, &adev->userq_mgr_list);
|
||||
mutex_unlock(&adev->userq_mutex);
|
||||
|
||||
INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
|
||||
{
|
||||
struct amdgpu_device *adev = userq_mgr->adev;
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
struct amdgpu_userq_mgr *uqm, *tmp;
|
||||
uint32_t queue_id;
|
||||
unsigned long queue_id;
|
||||
|
||||
cancel_delayed_work_sync(&userq_mgr->resume_work);
|
||||
|
||||
mutex_lock(&adev->userq_mutex);
|
||||
mutex_lock(&userq_mgr->userq_mutex);
|
||||
idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
|
||||
xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
|
||||
amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
|
||||
amdgpu_userq_unmap_helper(userq_mgr, queue);
|
||||
amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
|
||||
if (uqm == userq_mgr) {
|
||||
list_del(&uqm->list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
idr_destroy(&userq_mgr->userq_idr);
|
||||
xa_destroy(&userq_mgr->userq_mgr_xa);
|
||||
mutex_unlock(&userq_mgr->userq_mutex);
|
||||
mutex_unlock(&adev->userq_mutex);
|
||||
mutex_destroy(&userq_mgr->userq_mutex);
|
||||
}
|
||||
|
||||
|
|
@ -1142,25 +1148,23 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
|
|||
{
|
||||
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
struct amdgpu_userq_mgr *uqm, *tmp;
|
||||
int queue_id;
|
||||
struct amdgpu_userq_mgr *uqm;
|
||||
unsigned long queue_id;
|
||||
int r;
|
||||
|
||||
if (!ip_mask)
|
||||
return 0;
|
||||
|
||||
guard(mutex)(&adev->userq_mutex);
|
||||
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
|
||||
xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
|
||||
uqm = queue->userq_mgr;
|
||||
cancel_delayed_work_sync(&uqm->resume_work);
|
||||
guard(mutex)(&uqm->userq_mutex);
|
||||
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
|
||||
if (adev->in_s0ix)
|
||||
r = amdgpu_userq_preempt_helper(uqm, queue);
|
||||
else
|
||||
r = amdgpu_userq_unmap_helper(uqm, queue);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
if (adev->in_s0ix)
|
||||
r = amdgpu_userq_preempt_helper(uqm, queue);
|
||||
else
|
||||
r = amdgpu_userq_unmap_helper(uqm, queue);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1169,24 +1173,22 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
|
|||
{
|
||||
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
struct amdgpu_userq_mgr *uqm, *tmp;
|
||||
int queue_id;
|
||||
struct amdgpu_userq_mgr *uqm;
|
||||
unsigned long queue_id;
|
||||
int r;
|
||||
|
||||
if (!ip_mask)
|
||||
return 0;
|
||||
|
||||
guard(mutex)(&adev->userq_mutex);
|
||||
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
|
||||
xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
|
||||
uqm = queue->userq_mgr;
|
||||
guard(mutex)(&uqm->userq_mutex);
|
||||
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
|
||||
if (adev->in_s0ix)
|
||||
r = amdgpu_userq_restore_helper(uqm, queue);
|
||||
else
|
||||
r = amdgpu_userq_map_helper(uqm, queue);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
if (adev->in_s0ix)
|
||||
r = amdgpu_userq_restore_helper(uqm, queue);
|
||||
else
|
||||
r = amdgpu_userq_map_helper(uqm, queue);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
@ -1197,33 +1199,31 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
struct amdgpu_userq_mgr *uqm, *tmp;
|
||||
int queue_id;
|
||||
struct amdgpu_userq_mgr *uqm;
|
||||
unsigned long queue_id;
|
||||
int ret = 0, r;
|
||||
|
||||
/* only need to stop gfx/compute */
|
||||
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->userq_mutex);
|
||||
if (adev->userq_halt_for_enforce_isolation)
|
||||
dev_warn(adev->dev, "userq scheduling already stopped!\n");
|
||||
adev->userq_halt_for_enforce_isolation = true;
|
||||
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
|
||||
xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
|
||||
uqm = queue->userq_mgr;
|
||||
cancel_delayed_work_sync(&uqm->resume_work);
|
||||
mutex_lock(&uqm->userq_mutex);
|
||||
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
|
||||
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
|
||||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
|
||||
(queue->xcp_id == idx)) {
|
||||
r = amdgpu_userq_preempt_helper(uqm, queue);
|
||||
if (r)
|
||||
ret = r;
|
||||
}
|
||||
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
|
||||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
|
||||
(queue->xcp_id == idx)) {
|
||||
r = amdgpu_userq_preempt_helper(uqm, queue);
|
||||
if (r)
|
||||
ret = r;
|
||||
}
|
||||
mutex_unlock(&uqm->userq_mutex);
|
||||
}
|
||||
mutex_unlock(&adev->userq_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -1232,21 +1232,20 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
|
|||
{
|
||||
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
struct amdgpu_userq_mgr *uqm, *tmp;
|
||||
int queue_id;
|
||||
struct amdgpu_userq_mgr *uqm;
|
||||
unsigned long queue_id;
|
||||
int ret = 0, r;
|
||||
|
||||
/* only need to stop gfx/compute */
|
||||
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->userq_mutex);
|
||||
if (!adev->userq_halt_for_enforce_isolation)
|
||||
dev_warn(adev->dev, "userq scheduling already started!\n");
|
||||
adev->userq_halt_for_enforce_isolation = false;
|
||||
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
|
||||
xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
|
||||
uqm = queue->userq_mgr;
|
||||
mutex_lock(&uqm->userq_mutex);
|
||||
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
|
||||
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
|
||||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
|
||||
(queue->xcp_id == idx)) {
|
||||
|
|
@ -1254,10 +1253,9 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
|
|||
if (r)
|
||||
ret = r;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&uqm->userq_mutex);
|
||||
}
|
||||
mutex_unlock(&adev->userq_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -96,11 +96,15 @@ struct amdgpu_userq_funcs {
|
|||
|
||||
/* Usermode queues for gfx */
|
||||
struct amdgpu_userq_mgr {
|
||||
struct idr userq_idr;
|
||||
/**
|
||||
* @userq_mgr_xa: Per-process user queue map (queue ID → queue)
|
||||
* Key: queue_id (unique ID within the process's userq manager)
|
||||
* Value: struct amdgpu_usermode_queue
|
||||
*/
|
||||
struct xarray userq_mgr_xa;
|
||||
struct mutex userq_mutex;
|
||||
struct amdgpu_device *adev;
|
||||
struct delayed_work resume_work;
|
||||
struct list_head list;
|
||||
struct drm_file *file;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -537,7 +537,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
/* Retrieve the user queue */
|
||||
queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
|
||||
queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id);
|
||||
if (!queue) {
|
||||
r = -ENOENT;
|
||||
goto put_gobj_write;
|
||||
|
|
@ -899,7 +899,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
|
|||
*/
|
||||
num_fences = dma_fence_dedup_array(fences, num_fences);
|
||||
|
||||
waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
|
||||
waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id);
|
||||
if (!waitq) {
|
||||
r = -EINVAL;
|
||||
goto free_fences;
|
||||
|
|
|
|||
|
|
@ -937,9 +937,10 @@ static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t
|
|||
int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amd_sriov_msg_init_data_header *init_data_hdr = NULL;
|
||||
uint32_t init_hdr_offset = adev->virt.init_data_header.offset;
|
||||
uint32_t init_hdr_size = adev->virt.init_data_header.size_kb << 10;
|
||||
uint64_t vram_size;
|
||||
u64 init_hdr_offset = adev->virt.init_data_header.offset;
|
||||
u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */
|
||||
u64 vram_size;
|
||||
u64 end;
|
||||
int r = 0;
|
||||
uint8_t checksum = 0;
|
||||
|
||||
|
|
@ -957,7 +958,7 @@ int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
|
|||
return -EINVAL;
|
||||
vram_size <<= 20;
|
||||
|
||||
if ((init_hdr_offset + init_hdr_size) > vram_size) {
|
||||
if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) {
|
||||
dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -1101,7 +1102,7 @@ out:
|
|||
}
|
||||
|
||||
int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
|
||||
int data_id, uint8_t *binary, uint64_t *size)
|
||||
int data_id, uint8_t *binary, u32 *size)
|
||||
{
|
||||
uint32_t data_offset = 0;
|
||||
uint32_t data_size = 0;
|
||||
|
|
|
|||
|
|
@ -443,7 +443,7 @@ void amdgpu_virt_init(struct amdgpu_device *adev);
|
|||
|
||||
int amdgpu_virt_init_critical_region(struct amdgpu_device *adev);
|
||||
int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
|
||||
int data_id, uint8_t *binary, uint64_t *size);
|
||||
int data_id, uint8_t *binary, u32 *size);
|
||||
|
||||
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
|
||||
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
|
||||
|
|
|
|||
|
|
@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
|
||||
case IP_VERSION(6, 1, 1):
|
||||
return adev->pm.fw_version < 0x0a640500;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int vpe_get_dpm_level(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_vpe *vpe = &adev->vpe;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return 0;
|
||||
|
||||
return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
|
||||
}
|
||||
|
||||
static void vpe_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_device *adev =
|
||||
|
|
@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
|
|||
unsigned int fences = 0;
|
||||
|
||||
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
|
||||
if (fences)
|
||||
goto reschedule;
|
||||
|
||||
if (fences == 0)
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
|
||||
else
|
||||
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
|
||||
if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
|
||||
goto reschedule;
|
||||
|
||||
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
|
||||
return;
|
||||
|
||||
reschedule:
|
||||
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
|
||||
}
|
||||
|
||||
static int vpe_common_init(struct amdgpu_vpe *vpe)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2018 Advanced Micro Devices, Inc.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1843,6 +1843,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
|
|||
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
|
||||
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
|
||||
adev->rev_id == 0x3)
|
||||
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
|
||||
vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
|
||||
adev->gmc.vram_vendor = vram_info & 0xF;
|
||||
|
|
|
|||
|
|
@ -205,10 +205,10 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
|
|||
int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
|
||||
struct mes_detect_and_reset_queue_input input;
|
||||
struct amdgpu_usermode_queue *queue;
|
||||
struct amdgpu_userq_mgr *uqm, *tmp;
|
||||
unsigned int hung_db_num = 0;
|
||||
int queue_id, r, i;
|
||||
unsigned long queue_id;
|
||||
u32 db_array[8];
|
||||
int r, i;
|
||||
|
||||
if (db_array_size > 8) {
|
||||
dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
|
||||
|
|
@ -227,16 +227,14 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
|
|||
if (r) {
|
||||
dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
|
||||
} else if (hung_db_num) {
|
||||
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
|
||||
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
|
||||
if (queue->queue_type == queue_type) {
|
||||
for (i = 0; i < hung_db_num; i++) {
|
||||
if (queue->doorbell_index == db_array[i]) {
|
||||
queue->state = AMDGPU_USERQ_STATE_HUNG;
|
||||
atomic_inc(&adev->gpu_reset_counter);
|
||||
amdgpu_userq_fence_driver_force_completion(queue);
|
||||
drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
|
||||
}
|
||||
xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
|
||||
if (queue->queue_type == queue_type) {
|
||||
for (i = 0; i < hung_db_num; i++) {
|
||||
if (queue->doorbell_index == db_array[i]) {
|
||||
queue->state = AMDGPU_USERQ_STATE_HUNG;
|
||||
atomic_inc(&adev->gpu_reset_counter);
|
||||
amdgpu_userq_fence_driver_force_completion(queue);
|
||||
drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -254,7 +252,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
|
||||
struct drm_amdgpu_userq_in *mqd_user = args_in;
|
||||
struct amdgpu_mqd_prop *userq_props;
|
||||
struct amdgpu_gfx_shadow_info shadow_info;
|
||||
int r;
|
||||
|
||||
/* Structure to initialize MQD for userqueue using generic MQD init function */
|
||||
|
|
@ -280,8 +277,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
userq_props->doorbell_index = queue->doorbell_index;
|
||||
userq_props->fence_address = queue->fence_drv->gpu_addr;
|
||||
|
||||
if (adev->gfx.funcs->get_gfx_shadow_info)
|
||||
adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
|
||||
if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
|
||||
struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
|
||||
|
||||
|
|
@ -299,7 +294,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
}
|
||||
|
||||
r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
|
||||
max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE));
|
||||
2048);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
||||
|
|
@ -312,6 +307,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
kfree(compute_mqd);
|
||||
} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
|
||||
struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
|
||||
struct amdgpu_gfx_shadow_info shadow_info;
|
||||
|
||||
if (adev->gfx.funcs->get_gfx_shadow_info) {
|
||||
adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
|
||||
} else {
|
||||
r = -EINVAL;
|
||||
goto free_mqd;
|
||||
}
|
||||
|
||||
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
|
||||
DRM_ERROR("Invalid GFX MQD\n");
|
||||
|
|
@ -335,6 +338,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
shadow_info.shadow_size);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
|
||||
shadow_info.csa_size);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
||||
kfree(mqd_gfx_v11);
|
||||
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
|
||||
|
|
@ -353,7 +360,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
|
|||
goto free_mqd;
|
||||
}
|
||||
r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
|
||||
shadow_info.csa_size);
|
||||
32);
|
||||
if (r)
|
||||
goto free_mqd;
|
||||
|
||||
|
|
|
|||
|
|
@ -41,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
|
|||
|
||||
static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
u32 rev_id;
|
||||
|
||||
tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
|
||||
/* If it is VF or subrevision holds a non-zero value, that should be used */
|
||||
if (tmp || amdgpu_sriov_vf(adev))
|
||||
return tmp;
|
||||
/*
|
||||
* fetch the sub-revision field from the IP-discovery table
|
||||
* (returns zero if the table entry is not populated).
|
||||
*/
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
|
||||
} else {
|
||||
rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
|
||||
rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
|
||||
STRAP_ATI_REV_ID_DEV0_F0);
|
||||
}
|
||||
|
||||
/* If discovery subrev is not updated, use register version */
|
||||
tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
|
||||
tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
|
||||
STRAP_ATI_REV_ID_DEV0_F0);
|
||||
|
||||
return tmp;
|
||||
return rev_id;
|
||||
}
|
||||
|
||||
static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
|
||||
|
|
|
|||
|
|
@ -1897,6 +1897,8 @@ fail_packet_manager_init:
|
|||
|
||||
static int stop_cpsch(struct device_queue_manager *dqm)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
dqm_lock(dqm);
|
||||
if (!dqm->sched_running) {
|
||||
dqm_unlock(dqm);
|
||||
|
|
@ -1904,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
|||
}
|
||||
|
||||
if (!dqm->dev->kfd->shared_resources.enable_mes)
|
||||
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
|
||||
ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
|
||||
0, USE_DEFAULT_GRACE_PERIOD, false);
|
||||
else
|
||||
remove_all_kfd_queues_mes(dqm);
|
||||
ret = remove_all_kfd_queues_mes(dqm);
|
||||
|
||||
dqm->sched_running = false;
|
||||
|
||||
|
|
@ -1920,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
|||
dqm->detect_hang_info = NULL;
|
||||
dqm_unlock(dqm);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
||||
|
|
|
|||
|
|
@ -1083,7 +1083,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
|
|||
* for auto suspend
|
||||
*/
|
||||
if (pdd->runtime_inuse) {
|
||||
pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
|
||||
pdd->runtime_inuse = false;
|
||||
}
|
||||
|
|
@ -1162,9 +1161,6 @@ static void kfd_process_wq_release(struct work_struct *work)
|
|||
release_work);
|
||||
struct dma_fence *ef;
|
||||
|
||||
kfd_process_dequeue_from_all_devices(p);
|
||||
pqm_uninit(&p->pqm);
|
||||
|
||||
/*
|
||||
* If GPU in reset, user queues may still running, wait for reset complete.
|
||||
*/
|
||||
|
|
@ -1226,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p)
|
|||
cancel_delayed_work_sync(&p->eviction_work);
|
||||
cancel_delayed_work_sync(&p->restore_work);
|
||||
|
||||
/*
|
||||
* Dequeue and destroy user queues, it is not safe for GPU to access
|
||||
* system memory after mmu release notifier callback returns because
|
||||
* exit_mmap free process memory afterwards.
|
||||
*/
|
||||
kfd_process_dequeue_from_all_devices(p);
|
||||
pqm_uninit(&p->pqm);
|
||||
|
||||
for (i = 0; i < p->n_pdds; i++) {
|
||||
struct kfd_process_device *pdd = p->pdds[i];
|
||||
|
||||
|
|
|
|||
|
|
@ -1738,12 +1738,15 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
|||
|
||||
WRITE_ONCE(p->svms.faulting_task, current);
|
||||
range = amdgpu_hmm_range_alloc(NULL);
|
||||
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
|
||||
readonly, owner,
|
||||
range);
|
||||
if (likely(range))
|
||||
r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
|
||||
readonly, owner, range);
|
||||
else
|
||||
r = -ENOMEM;
|
||||
WRITE_ONCE(p->svms.faulting_task, NULL);
|
||||
if (r) {
|
||||
amdgpu_hmm_range_free(range);
|
||||
range = NULL;
|
||||
pr_debug("failed %d to get svm range pages\n", r);
|
||||
}
|
||||
} else {
|
||||
|
|
@ -1761,7 +1764,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
|||
svm_range_lock(prange);
|
||||
|
||||
/* Free backing memory of hmm_range if it was initialized
|
||||
* Overrride return value to TRY AGAIN only if prior returns
|
||||
* Override return value to TRY AGAIN only if prior returns
|
||||
* were successful
|
||||
*/
|
||||
if (range && !amdgpu_hmm_range_valid(range) && !r) {
|
||||
|
|
@ -1769,7 +1772,8 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
|
|||
r = -EAGAIN;
|
||||
}
|
||||
/* Free the hmm range */
|
||||
amdgpu_hmm_range_free(range);
|
||||
if (range)
|
||||
amdgpu_hmm_range_free(range);
|
||||
|
||||
|
||||
if (!r && !list_empty(&prange->child_list)) {
|
||||
|
|
|
|||
|
|
@ -3853,7 +3853,9 @@ void amdgpu_dm_update_connector_after_detect(
|
|||
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
|
||||
aconnector->connector_id, aconnector->dc_sink, sink);
|
||||
|
||||
guard(mutex)(&dev->mode_config.mutex);
|
||||
/* When polling, DRM has already locked the mutex for us. */
|
||||
if (!drm_kms_helper_is_poll_worker())
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
|
||||
/*
|
||||
* 1. Update status of the drm connector
|
||||
|
|
@ -3916,6 +3918,10 @@ void amdgpu_dm_update_connector_after_detect(
|
|||
}
|
||||
|
||||
update_subconnector_property(aconnector);
|
||||
|
||||
/* When polling, the mutex will be unlocked for us by DRM. */
|
||||
if (!drm_kms_helper_is_poll_worker())
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
|
|
@ -5200,6 +5206,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
|
|||
static void setup_backlight_device(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct amdgpu_dm_backlight_caps *caps;
|
||||
struct dc_link *link = aconnector->dc_link;
|
||||
int bl_idx = dm->num_of_edps;
|
||||
|
||||
|
|
@ -5219,6 +5226,13 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
|
|||
dm->num_of_edps++;
|
||||
|
||||
update_connector_ext_caps(aconnector);
|
||||
caps = &dm->backlight_caps[aconnector->bl_idx];
|
||||
|
||||
/* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
|
||||
if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
|
||||
drm_object_attach_property(&aconnector->base.base,
|
||||
dm->adev->mode_info.abm_level_property,
|
||||
ABM_SYSFS_CONTROL);
|
||||
}
|
||||
|
||||
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
|
||||
|
|
@ -7218,29 +7232,101 @@ finish:
|
|||
return stream;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_connector_poll() - Poll a connector to see if it's connected to a display
|
||||
*
|
||||
* Used for connectors that don't support HPD (hotplug detection)
|
||||
* to periodically checked whether the connector is connected to a display.
|
||||
*/
|
||||
static enum drm_connector_status
|
||||
amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dc_link *link = aconnector->dc_link;
|
||||
enum dc_connection_type conn_type = dc_connection_none;
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
|
||||
/* When we determined the connection using DAC load detection,
|
||||
* do NOT poll the connector do detect disconnect because
|
||||
* that would run DAC load detection again which can cause
|
||||
* visible visual glitches.
|
||||
*
|
||||
* Only allow to poll such a connector again when forcing.
|
||||
*/
|
||||
if (!force && link->local_sink && link->type == dc_connection_dac_load)
|
||||
return connector->status;
|
||||
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
|
||||
if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) &&
|
||||
conn_type != dc_connection_none) {
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
|
||||
/* Only call full link detection when a sink isn't created yet,
|
||||
* ie. just when the display is plugged in, otherwise we risk flickering.
|
||||
*/
|
||||
if (link->local_sink ||
|
||||
dc_link_detect(link, DETECT_REASON_HPD))
|
||||
status = connector_status_connected;
|
||||
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
}
|
||||
|
||||
if (connector->status != status) {
|
||||
if (status == connector_status_disconnected) {
|
||||
if (link->local_sink)
|
||||
dc_sink_release(link->local_sink);
|
||||
|
||||
link->local_sink = NULL;
|
||||
link->dpcd_sink_count = 0;
|
||||
link->type = dc_connection_none;
|
||||
}
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
}
|
||||
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display
|
||||
*
|
||||
* A connector is considered connected when it has a sink that is not NULL.
|
||||
* For connectors that support HPD (hotplug detection), the connection is
|
||||
* handled in the HPD interrupt.
|
||||
* For connectors that may not support HPD, such as analog connectors,
|
||||
* DRM will call this function repeatedly to poll them.
|
||||
*
|
||||
* Notes:
|
||||
* 1. This interface is NOT called in context of HPD irq.
|
||||
* 2. This interface *is called* in context of user-mode ioctl. Which
|
||||
* makes it a bad place for *any* MST-related activity.
|
||||
*/
|
||||
static enum drm_connector_status
|
||||
amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
bool connected;
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
/*
|
||||
* Notes:
|
||||
* 1. This interface is NOT called in context of HPD irq.
|
||||
* 2. This interface *is called* in context of user-mode ioctl. Which
|
||||
* makes it a bad place for *any* MST-related activity.
|
||||
*/
|
||||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
|
||||
!aconnector->fake_enable)
|
||||
connected = (aconnector->dc_sink != NULL);
|
||||
else
|
||||
connected = (aconnector->base.force == DRM_FORCE_ON ||
|
||||
aconnector->base.force == DRM_FORCE_ON_DIGITAL);
|
||||
|
||||
update_subconnector_property(aconnector);
|
||||
|
||||
return (connected ? connector_status_connected :
|
||||
if (aconnector->base.force == DRM_FORCE_ON ||
|
||||
aconnector->base.force == DRM_FORCE_ON_DIGITAL)
|
||||
return connector_status_connected;
|
||||
else if (aconnector->base.force == DRM_FORCE_OFF)
|
||||
return connector_status_disconnected;
|
||||
|
||||
/* Poll analog connectors and only when either
|
||||
* disconnected or connected to an analog display.
|
||||
*/
|
||||
if (drm_kms_helper_is_poll_worker() &&
|
||||
dc_connector_supports_analog(aconnector->dc_link->link_id.id) &&
|
||||
(!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog))
|
||||
return amdgpu_dm_connector_poll(aconnector, force);
|
||||
|
||||
return (aconnector->dc_sink ? connector_status_connected :
|
||||
connector_status_disconnected);
|
||||
}
|
||||
|
||||
|
|
@ -7291,6 +7377,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
|
|||
} else if (property == adev->mode_info.underscan_property) {
|
||||
dm_new_state->underscan_enable = val;
|
||||
ret = 0;
|
||||
} else if (property == adev->mode_info.abm_level_property) {
|
||||
switch (val) {
|
||||
case ABM_SYSFS_CONTROL:
|
||||
dm_new_state->abm_sysfs_forbidden = false;
|
||||
break;
|
||||
case ABM_LEVEL_OFF:
|
||||
dm_new_state->abm_sysfs_forbidden = true;
|
||||
dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
|
||||
break;
|
||||
default:
|
||||
dm_new_state->abm_sysfs_forbidden = true;
|
||||
dm_new_state->abm_level = val;
|
||||
};
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
@ -7333,6 +7433,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
|
|||
} else if (property == adev->mode_info.underscan_property) {
|
||||
*val = dm_state->underscan_enable;
|
||||
ret = 0;
|
||||
} else if (property == adev->mode_info.abm_level_property) {
|
||||
if (!dm_state->abm_sysfs_forbidden)
|
||||
*val = ABM_SYSFS_CONTROL;
|
||||
else
|
||||
*val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
|
||||
dm_state->abm_level : 0;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
@ -7385,10 +7492,16 @@ static ssize_t panel_power_savings_store(struct device *device,
|
|||
return -EINVAL;
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
to_dm_connector_state(connector->state)->abm_level = val ?:
|
||||
ABM_LEVEL_IMMEDIATE_DISABLE;
|
||||
if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden)
|
||||
ret = -EBUSY;
|
||||
else
|
||||
to_dm_connector_state(connector->state)->abm_level = val ?:
|
||||
ABM_LEVEL_IMMEDIATE_DISABLE;
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
|
||||
return count;
|
||||
|
|
@ -8228,7 +8341,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int to_drm_connector_type(enum signal_type st)
|
||||
static int to_drm_connector_type(enum signal_type st, uint32_t connector_id)
|
||||
{
|
||||
switch (st) {
|
||||
case SIGNAL_TYPE_HDMI_TYPE_A:
|
||||
|
|
@ -8244,6 +8357,10 @@ static int to_drm_connector_type(enum signal_type st)
|
|||
return DRM_MODE_CONNECTOR_DisplayPort;
|
||||
case SIGNAL_TYPE_DVI_DUAL_LINK:
|
||||
case SIGNAL_TYPE_DVI_SINGLE_LINK:
|
||||
if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII ||
|
||||
connector_id == CONNECTOR_ID_DUAL_LINK_DVII)
|
||||
return DRM_MODE_CONNECTOR_DVII;
|
||||
|
||||
return DRM_MODE_CONNECTOR_DVID;
|
||||
case SIGNAL_TYPE_VIRTUAL:
|
||||
return DRM_MODE_CONNECTOR_VIRTUAL;
|
||||
|
|
@ -8295,7 +8412,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
|
|||
|
||||
static struct drm_display_mode *
|
||||
amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
|
||||
char *name,
|
||||
const char *name,
|
||||
int hdisplay, int vdisplay)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
|
|
@ -8317,6 +8434,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
|
|||
|
||||
}
|
||||
|
||||
static const struct amdgpu_dm_mode_size {
|
||||
char name[DRM_DISPLAY_MODE_LEN];
|
||||
int w;
|
||||
int h;
|
||||
} common_modes[] = {
|
||||
{ "640x480", 640, 480},
|
||||
{ "800x600", 800, 600},
|
||||
{ "1024x768", 1024, 768},
|
||||
{ "1280x720", 1280, 720},
|
||||
{ "1280x800", 1280, 800},
|
||||
{"1280x1024", 1280, 1024},
|
||||
{ "1440x900", 1440, 900},
|
||||
{"1680x1050", 1680, 1050},
|
||||
{"1600x1200", 1600, 1200},
|
||||
{"1920x1080", 1920, 1080},
|
||||
{"1920x1200", 1920, 1200}
|
||||
};
|
||||
|
||||
static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
|
|
@ -8327,23 +8462,6 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
|
|||
to_amdgpu_dm_connector(connector);
|
||||
int i;
|
||||
int n;
|
||||
struct mode_size {
|
||||
char name[DRM_DISPLAY_MODE_LEN];
|
||||
int w;
|
||||
int h;
|
||||
} common_modes[] = {
|
||||
{ "640x480", 640, 480},
|
||||
{ "800x600", 800, 600},
|
||||
{ "1024x768", 1024, 768},
|
||||
{ "1280x720", 1280, 720},
|
||||
{ "1280x800", 1280, 800},
|
||||
{"1280x1024", 1280, 1024},
|
||||
{ "1440x900", 1440, 900},
|
||||
{"1680x1050", 1680, 1050},
|
||||
{"1600x1200", 1600, 1200},
|
||||
{"1920x1080", 1920, 1080},
|
||||
{"1920x1200", 1920, 1200}
|
||||
};
|
||||
|
||||
if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
|
||||
(connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
|
||||
|
|
@ -8544,6 +8662,10 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
|
|||
if (!(amdgpu_freesync_vid_mode && drm_edid))
|
||||
return;
|
||||
|
||||
if (!amdgpu_dm_connector->dc_sink || amdgpu_dm_connector->dc_sink->edid_caps.analog ||
|
||||
!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
|
||||
return;
|
||||
|
||||
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
|
||||
amdgpu_dm_connector->num_modes +=
|
||||
add_fs_modes(amdgpu_dm_connector);
|
||||
|
|
@ -8567,6 +8689,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
|
|||
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
|
||||
amdgpu_dm_connector->num_modes +=
|
||||
drm_add_modes_noedid(connector, 1920, 1080);
|
||||
|
||||
if (amdgpu_dm_connector->dc_sink->edid_caps.analog) {
|
||||
/* Analog monitor connected by DAC load detection.
|
||||
* Add common modes. It will be up to the user to select one that works.
|
||||
*/
|
||||
for (int i = 0; i < ARRAY_SIZE(common_modes); i++)
|
||||
amdgpu_dm_connector->num_modes += drm_add_modes_noedid(
|
||||
connector, common_modes[i].w, common_modes[i].h);
|
||||
}
|
||||
} else {
|
||||
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
|
||||
if (encoder)
|
||||
|
|
@ -8635,6 +8766,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||
case DRM_MODE_CONNECTOR_DVID:
|
||||
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
case DRM_MODE_CONNECTOR_VGA:
|
||||
aconnector->base.polled =
|
||||
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
@ -8836,7 +8972,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
connector_type = to_drm_connector_type(link->connector_signal);
|
||||
connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id);
|
||||
|
||||
res = drm_connector_init_with_ddc(
|
||||
dm->ddev,
|
||||
|
|
|
|||
|
|
@ -993,6 +993,7 @@ struct dm_connector_state {
|
|||
bool underscan_enable;
|
||||
bool freesync_capable;
|
||||
bool update_hdcp;
|
||||
bool abm_sysfs_forbidden;
|
||||
uint8_t abm_level;
|
||||
int vcpi_slots;
|
||||
uint64_t pbn;
|
||||
|
|
|
|||
|
|
@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
|
|||
struct vblank_control_work *vblank_work =
|
||||
container_of(work, struct vblank_control_work, work);
|
||||
struct amdgpu_display_manager *dm = vblank_work->dm;
|
||||
struct amdgpu_device *adev = drm_to_adev(dm->ddev);
|
||||
int r;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
|
|
@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
|
|||
|
||||
if (dm->active_vblank_irq_count == 0) {
|
||||
dc_post_update_surfaces_to_stream(dm->dc);
|
||||
|
||||
r = amdgpu_dpm_pause_power_profile(adev, true);
|
||||
if (r)
|
||||
dev_warn(adev->dev, "failed to set default power profile mode\n");
|
||||
|
||||
dc_allow_idle_optimizations(dm->dc, true);
|
||||
|
||||
r = amdgpu_dpm_pause_power_profile(adev, false);
|
||||
if (r)
|
||||
dev_warn(adev->dev, "failed to restore the power profile mode\n");
|
||||
}
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
|
@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
|
|||
int irq_type;
|
||||
int rc = 0;
|
||||
|
||||
if (acrtc->otg_inst == -1)
|
||||
goto skip;
|
||||
if (enable && !acrtc->base.enabled) {
|
||||
drm_dbg_vbl(crtc->dev,
|
||||
"Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
|
||||
acrtc->crtc_id, acrtc->base.enabled);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
|
||||
|
||||
|
|
@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
|
|||
return rc;
|
||||
}
|
||||
#endif
|
||||
skip:
|
||||
|
||||
if (amdgpu_in_reset(adev))
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -759,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
|
|||
int max_param_num = 11;
|
||||
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
|
||||
bool disable_hpd = false;
|
||||
bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID;
|
||||
bool valid_test_pattern = false;
|
||||
uint8_t param_nums = 0;
|
||||
/* init with default 80bit custom pattern */
|
||||
|
|
@ -850,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
|
|||
* because it might have been disabled after a test pattern was set.
|
||||
* AUX depends on HPD * sequence dependent, do not move!
|
||||
*/
|
||||
if (!disable_hpd)
|
||||
if (supports_hpd && !disable_hpd)
|
||||
dc_link_enable_hpd(link);
|
||||
|
||||
prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
|
||||
|
|
@ -888,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
|
|||
* Need disable interrupt to avoid SW driver disable DP output. This is
|
||||
* done after the test pattern is set.
|
||||
*/
|
||||
if (valid_test_pattern && disable_hpd)
|
||||
if (valid_test_pattern && supports_hpd && disable_hpd)
|
||||
dc_link_disable_hpd(link);
|
||||
|
||||
kfree(wr_buf);
|
||||
|
|
|
|||
|
|
@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
|
|||
edid_caps->panel_patch.remove_sink_ext_caps = true;
|
||||
break;
|
||||
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
|
||||
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
|
||||
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
|
||||
edid_caps->panel_patch.disable_colorimetry = true;
|
||||
break;
|
||||
|
|
@ -130,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
|
|||
edid_caps->serial_number = edid_buf->serial;
|
||||
edid_caps->manufacture_week = edid_buf->mfg_week;
|
||||
edid_caps->manufacture_year = edid_buf->mfg_year;
|
||||
edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
|
||||
|
||||
drm_edid_get_monitor_name(edid_buf,
|
||||
edid_caps->display_name,
|
||||
|
|
|
|||
|
|
@ -476,6 +476,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
|
|||
|
||||
void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
int src;
|
||||
struct list_head *hnd_list_h;
|
||||
struct list_head *hnd_list_l;
|
||||
|
|
@ -512,6 +513,9 @@ void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
if (dev->mode_config.poll_enabled)
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
}
|
||||
|
||||
void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
|
||||
|
|
@ -537,6 +541,7 @@ void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
|
|||
|
||||
void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
|
||||
{
|
||||
struct drm_device *dev = adev_to_drm(adev);
|
||||
int src;
|
||||
struct list_head *hnd_list_h, *hnd_list_l;
|
||||
unsigned long irq_table_flags;
|
||||
|
|
@ -557,6 +562,9 @@ void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
|
||||
|
||||
if (dev->mode_config.poll_enabled)
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -893,6 +901,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
|||
struct drm_connector_list_iter iter;
|
||||
int irq_type;
|
||||
int i;
|
||||
bool use_polling = false;
|
||||
|
||||
/* First, clear all hpd and hpdrx interrupts */
|
||||
for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
|
||||
|
|
@ -906,6 +915,8 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
|||
struct amdgpu_dm_connector *amdgpu_dm_connector;
|
||||
const struct dc_link *dc_link;
|
||||
|
||||
use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
continue;
|
||||
|
||||
|
|
@ -947,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
if (use_polling)
|
||||
drm_kms_helper_poll_init(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -997,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
if (dev->mode_config.poll_enabled)
|
||||
drm_kms_helper_poll_fini(dev);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ DC_LIBS += dcn30
|
|||
DC_LIBS += dcn301
|
||||
DC_LIBS += dcn31
|
||||
DC_LIBS += dml
|
||||
DC_LIBS += dml2
|
||||
DC_LIBS += dml2_0
|
||||
DC_LIBS += soc_and_ip_translator
|
||||
endif
|
||||
|
||||
|
|
|
|||
|
|
@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
|
|||
ATOM_OBJECT *object);
|
||||
static struct device_id device_type_from_device_id(uint16_t device_id);
|
||||
static uint32_t signal_to_ss_id(enum as_signal_type signal);
|
||||
static uint32_t get_support_mask_for_device_id(struct device_id device_id);
|
||||
static uint32_t get_support_mask_for_device_id(
|
||||
enum dal_device_type device_type,
|
||||
uint32_t enum_id);
|
||||
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
|
||||
struct bios_parser *bp,
|
||||
ATOM_OBJECT *object);
|
||||
|
|
@ -441,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4(
|
|||
le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
|
||||
info->pll_info.max_output_pxl_clk_pll_frequency =
|
||||
le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
|
||||
info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10;
|
||||
|
||||
if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
|
||||
/* Since there is no information on the SS, report conservative
|
||||
|
|
@ -497,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1(
|
|||
info->external_clock_source_frequency_for_dp =
|
||||
le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
|
||||
info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
|
||||
info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10;
|
||||
|
||||
/* There should be only one entry in the SS info table for Memory Clock
|
||||
*/
|
||||
|
|
@ -736,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control(
|
|||
return bp->cmd_tbl.transmitter_control(bp, cntl);
|
||||
}
|
||||
|
||||
static enum bp_result bios_parser_select_crtc_source(
|
||||
struct dc_bios *dcb,
|
||||
struct bp_crtc_source_select *bp_params)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
|
||||
if (!bp->cmd_tbl.select_crtc_source)
|
||||
return BP_RESULT_FAILURE;
|
||||
|
||||
return bp->cmd_tbl.select_crtc_source(bp, bp_params);
|
||||
}
|
||||
|
||||
static enum bp_result bios_parser_encoder_control(
|
||||
struct dc_bios *dcb,
|
||||
struct bp_encoder_control *cntl)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
|
||||
if (cntl->engine_id == ENGINE_ID_DACA) {
|
||||
if (!bp->cmd_tbl.dac1_encoder_control)
|
||||
return BP_RESULT_FAILURE;
|
||||
|
||||
return bp->cmd_tbl.dac1_encoder_control(
|
||||
bp, cntl->action == ENCODER_CONTROL_ENABLE,
|
||||
cntl->pixel_clock, ATOM_DAC1_PS2);
|
||||
} else if (cntl->engine_id == ENGINE_ID_DACB) {
|
||||
if (!bp->cmd_tbl.dac2_encoder_control)
|
||||
return BP_RESULT_FAILURE;
|
||||
|
||||
return bp->cmd_tbl.dac2_encoder_control(
|
||||
bp, cntl->action == ENCODER_CONTROL_ENABLE,
|
||||
cntl->pixel_clock, ATOM_DAC1_PS2);
|
||||
}
|
||||
|
||||
if (!bp->cmd_tbl.dig_encoder_control)
|
||||
return BP_RESULT_FAILURE;
|
||||
|
||||
return bp->cmd_tbl.dig_encoder_control(bp, cntl);
|
||||
}
|
||||
|
||||
static enum bp_result bios_parser_dac_load_detection(
|
||||
struct dc_bios *dcb,
|
||||
enum engine_id engine_id,
|
||||
enum dal_device_type device_type,
|
||||
uint32_t enum_id)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
struct dc_context *ctx = dcb->ctx;
|
||||
struct bp_load_detection_parameters bp_params = {0};
|
||||
enum bp_result bp_result;
|
||||
uint32_t bios_0_scratch;
|
||||
uint32_t device_id_mask = 0;
|
||||
|
||||
bp_params.engine_id = engine_id;
|
||||
bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id);
|
||||
|
||||
if (engine_id != ENGINE_ID_DACA &&
|
||||
engine_id != ENGINE_ID_DACB)
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
if (!bp->cmd_tbl.dac_load_detection)
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
|
||||
device_id_mask = ATOM_S0_CRT1_MASK;
|
||||
else if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
|
||||
device_id_mask = ATOM_S0_CRT2_MASK;
|
||||
else
|
||||
return BP_RESULT_UNSUPPORTED;
|
||||
|
||||
/* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */
|
||||
bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
|
||||
bios_0_scratch &= ~device_id_mask;
|
||||
dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch);
|
||||
|
||||
bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params);
|
||||
|
||||
if (bp_result != BP_RESULT_OK)
|
||||
return bp_result;
|
||||
|
||||
bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
|
||||
|
||||
if (bios_0_scratch & device_id_mask)
|
||||
return BP_RESULT_OK;
|
||||
|
||||
return BP_RESULT_FAILURE;
|
||||
}
|
||||
|
||||
static enum bp_result bios_parser_adjust_pixel_clock(
|
||||
struct dc_bios *dcb,
|
||||
struct bp_adjust_pixel_clock_parameters *bp_params)
|
||||
|
|
@ -858,7 +938,7 @@ static bool bios_parser_is_device_id_supported(
|
|||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
|
||||
uint32_t mask = get_support_mask_for_device_id(id);
|
||||
uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id);
|
||||
|
||||
return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
|
||||
}
|
||||
|
|
@ -2149,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal)
|
|||
return clk_id_ss;
|
||||
}
|
||||
|
||||
static uint32_t get_support_mask_for_device_id(struct device_id device_id)
|
||||
static uint32_t get_support_mask_for_device_id(
|
||||
enum dal_device_type device_type,
|
||||
uint32_t enum_id)
|
||||
{
|
||||
enum dal_device_type device_type = device_id.device_type;
|
||||
uint32_t enum_id = device_id.enum_id;
|
||||
|
||||
switch (device_type) {
|
||||
case DEVICE_TYPE_LCD:
|
||||
switch (enum_id) {
|
||||
|
|
@ -2829,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = {
|
|||
.is_device_id_supported = bios_parser_is_device_id_supported,
|
||||
|
||||
/* COMMANDS */
|
||||
.select_crtc_source = bios_parser_select_crtc_source,
|
||||
|
||||
.encoder_control = bios_parser_encoder_control,
|
||||
|
||||
.dac_load_detection = bios_parser_dac_load_detection,
|
||||
|
||||
.transmitter_control = bios_parser_transmitter_control,
|
||||
|
||||
.enable_crtc = bios_parser_enable_crtc,
|
||||
|
|
|
|||
|
|
@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp);
|
|||
static void init_set_pixel_clock(struct bios_parser *bp);
|
||||
static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
|
||||
static void init_adjust_display_pll(struct bios_parser *bp);
|
||||
static void init_select_crtc_source(struct bios_parser *bp);
|
||||
static void init_dac_encoder_control(struct bios_parser *bp);
|
||||
static void init_dac_load_detection(struct bios_parser *bp);
|
||||
static void init_dac_output_control(struct bios_parser *bp);
|
||||
static void init_set_crtc_timing(struct bios_parser *bp);
|
||||
static void init_enable_crtc(struct bios_parser *bp);
|
||||
|
|
@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
|
|||
init_set_pixel_clock(bp);
|
||||
init_enable_spread_spectrum_on_ppll(bp);
|
||||
init_adjust_display_pll(bp);
|
||||
init_select_crtc_source(bp);
|
||||
init_dac_encoder_control(bp);
|
||||
init_dac_load_detection(bp);
|
||||
init_dac_output_control(bp);
|
||||
init_set_crtc_timing(bp);
|
||||
init_enable_crtc(bp);
|
||||
|
|
@ -1609,6 +1613,198 @@ static enum bp_result adjust_display_pll_v3(
|
|||
return result;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
********************************************************************************
|
||||
**
|
||||
** SELECT CRTC SOURCE
|
||||
**
|
||||
********************************************************************************
|
||||
*******************************************************************************/
|
||||
|
||||
static enum bp_result select_crtc_source_v1(
|
||||
struct bios_parser *bp,
|
||||
struct bp_crtc_source_select *bp_params);
|
||||
static enum bp_result select_crtc_source_v2(
|
||||
struct bios_parser *bp,
|
||||
struct bp_crtc_source_select *bp_params);
|
||||
static enum bp_result select_crtc_source_v3(
|
||||
struct bios_parser *bp,
|
||||
struct bp_crtc_source_select *bp_params);
|
||||
|
||||
static void init_select_crtc_source(struct bios_parser *bp)
|
||||
{
|
||||
switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
|
||||
case 1:
|
||||
bp->cmd_tbl.select_crtc_source = select_crtc_source_v1;
|
||||
break;
|
||||
case 2:
|
||||
bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
|
||||
break;
|
||||
case 3:
|
||||
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
|
||||
break;
|
||||
default:
|
||||
bp->cmd_tbl.select_crtc_source = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static enum bp_result select_crtc_source_v1(
|
||||
struct bios_parser *bp,
|
||||
struct bp_crtc_source_select *bp_params)
|
||||
{
|
||||
enum bp_result result = BP_RESULT_FAILURE;
|
||||
SELECT_CRTC_SOURCE_PS_ALLOCATION params;
|
||||
|
||||
if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC))
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
switch (bp_params->engine_id) {
|
||||
case ENGINE_ID_DACA:
|
||||
params.ucDevice = ATOM_DEVICE_CRT1_INDEX;
|
||||
break;
|
||||
case ENGINE_ID_DACB:
|
||||
params.ucDevice = ATOM_DEVICE_CRT2_INDEX;
|
||||
break;
|
||||
default:
|
||||
return BP_RESULT_BADINPUT;
|
||||
}
|
||||
|
||||
if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
|
||||
result = BP_RESULT_OK;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool select_crtc_source_v2_encoder_id(
|
||||
enum engine_id engine_id, uint8_t *out_encoder_id)
|
||||
{
|
||||
uint8_t encoder_id = 0;
|
||||
|
||||
switch (engine_id) {
|
||||
case ENGINE_ID_DIGA:
|
||||
encoder_id = ASIC_INT_DIG1_ENCODER_ID;
|
||||
break;
|
||||
case ENGINE_ID_DIGB:
|
||||
encoder_id = ASIC_INT_DIG2_ENCODER_ID;
|
||||
break;
|
||||
case ENGINE_ID_DIGC:
|
||||
encoder_id = ASIC_INT_DIG3_ENCODER_ID;
|
||||
break;
|
||||
case ENGINE_ID_DIGD:
|
||||
encoder_id = ASIC_INT_DIG4_ENCODER_ID;
|
||||
break;
|
||||
case ENGINE_ID_DIGE:
|
||||
encoder_id = ASIC_INT_DIG5_ENCODER_ID;
|
||||
break;
|
||||
case ENGINE_ID_DIGF:
|
||||
encoder_id = ASIC_INT_DIG6_ENCODER_ID;
|
||||
break;
|
||||
case ENGINE_ID_DIGG:
|
||||
encoder_id = ASIC_INT_DIG7_ENCODER_ID;
|
||||
break;
|
||||
case ENGINE_ID_DACA:
|
||||
encoder_id = ASIC_INT_DAC1_ENCODER_ID;
|
||||
break;
|
||||
case ENGINE_ID_DACB:
|
||||
encoder_id = ASIC_INT_DAC2_ENCODER_ID;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
*out_encoder_id = encoder_id;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool select_crtc_source_v2_encoder_mode(
|
||||
enum signal_type signal_type, uint8_t *out_encoder_mode)
|
||||
{
|
||||
uint8_t encoder_mode = 0;
|
||||
|
||||
switch (signal_type) {
|
||||
case SIGNAL_TYPE_DVI_SINGLE_LINK:
|
||||
case SIGNAL_TYPE_DVI_DUAL_LINK:
|
||||
encoder_mode = ATOM_ENCODER_MODE_DVI;
|
||||
break;
|
||||
case SIGNAL_TYPE_HDMI_TYPE_A:
|
||||
encoder_mode = ATOM_ENCODER_MODE_HDMI;
|
||||
break;
|
||||
case SIGNAL_TYPE_LVDS:
|
||||
encoder_mode = ATOM_ENCODER_MODE_LVDS;
|
||||
break;
|
||||
case SIGNAL_TYPE_RGB:
|
||||
encoder_mode = ATOM_ENCODER_MODE_CRT;
|
||||
break;
|
||||
case SIGNAL_TYPE_DISPLAY_PORT:
|
||||
encoder_mode = ATOM_ENCODER_MODE_DP;
|
||||
break;
|
||||
case SIGNAL_TYPE_DISPLAY_PORT_MST:
|
||||
encoder_mode = ATOM_ENCODER_MODE_DP_MST;
|
||||
break;
|
||||
case SIGNAL_TYPE_EDP:
|
||||
encoder_mode = ATOM_ENCODER_MODE_DP;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
*out_encoder_mode = encoder_mode;
|
||||
return true;
|
||||
}
|
||||
|
||||
static enum bp_result select_crtc_source_v2(
|
||||
struct bios_parser *bp,
|
||||
struct bp_crtc_source_select *bp_params)
|
||||
{
|
||||
enum bp_result result = BP_RESULT_FAILURE;
|
||||
SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
|
||||
|
||||
if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC))
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
if (!select_crtc_source_v2_encoder_id(
|
||||
bp_params->engine_id,
|
||||
¶ms.ucEncoderID))
|
||||
return BP_RESULT_BADINPUT;
|
||||
if (!select_crtc_source_v2_encoder_mode(
|
||||
bp_params->sink_signal,
|
||||
¶ms.ucEncodeMode))
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
|
||||
result = BP_RESULT_OK;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static enum bp_result select_crtc_source_v3(
|
||||
struct bios_parser *bp,
|
||||
struct bp_crtc_source_select *bp_params)
|
||||
{
|
||||
enum bp_result result = BP_RESULT_FAILURE;
|
||||
SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
|
||||
|
||||
if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC))
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
if (!select_crtc_source_v2_encoder_id(
|
||||
bp_params->engine_id,
|
||||
¶ms.ucEncoderID))
|
||||
return BP_RESULT_BADINPUT;
|
||||
if (!select_crtc_source_v2_encoder_mode(
|
||||
bp_params->sink_signal,
|
||||
¶ms.ucEncodeMode))
|
||||
return BP_RESULT_BADINPUT;
|
||||
|
||||
params.ucDstBpc = bp_params->bit_depth;
|
||||
|
||||
if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
|
||||
result = BP_RESULT_OK;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
********************************************************************************
|
||||
**
|
||||
|
|
@ -1708,6 +1904,96 @@ static enum bp_result dac2_encoder_control_v1(
|
|||
return result;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
********************************************************************************
|
||||
**
|
||||
** DAC LOAD DETECTION
|
||||
**
|
||||
********************************************************************************
|
||||
*******************************************************************************/
|
||||
|
||||
static enum bp_result dac_load_detection_v1(
|
||||
struct bios_parser *bp,
|
||||
struct bp_load_detection_parameters *bp_params);
|
||||
|
||||
static enum bp_result dac_load_detection_v3(
|
||||
struct bios_parser *bp,
|
||||
struct bp_load_detection_parameters *bp_params);
|
||||
|
||||
static void init_dac_load_detection(struct bios_parser *bp)
|
||||
{
|
||||
switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) {
|
||||
case 1:
|
||||
case 2:
|
||||
bp->cmd_tbl.dac_load_detection = dac_load_detection_v1;
|
||||
break;
|
||||
case 3:
|
||||
default:
|
||||
bp->cmd_tbl.dac_load_detection = dac_load_detection_v3;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void dac_load_detect_prepare_params(
|
||||
struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params,
|
||||
enum engine_id engine_id,
|
||||
uint16_t device_id,
|
||||
uint8_t misc)
|
||||
{
|
||||
uint8_t dac_type = ENGINE_ID_DACA;
|
||||
|
||||
if (engine_id == ENGINE_ID_DACB)
|
||||
dac_type = ATOM_DAC_B;
|
||||
|
||||
params->sDacload.usDeviceID = cpu_to_le16(device_id);
|
||||
params->sDacload.ucDacType = dac_type;
|
||||
params->sDacload.ucMisc = misc;
|
||||
}
|
||||
|
||||
static enum bp_result dac_load_detection_v1(
|
||||
struct bios_parser *bp,
|
||||
struct bp_load_detection_parameters *bp_params)
|
||||
{
|
||||
enum bp_result result = BP_RESULT_FAILURE;
|
||||
DAC_LOAD_DETECTION_PS_ALLOCATION params;
|
||||
|
||||
dac_load_detect_prepare_params(
|
||||
¶ms,
|
||||
bp_params->engine_id,
|
||||
bp_params->device_id,
|
||||
0);
|
||||
|
||||
if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
|
||||
result = BP_RESULT_OK;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static enum bp_result dac_load_detection_v3(
|
||||
struct bios_parser *bp,
|
||||
struct bp_load_detection_parameters *bp_params)
|
||||
{
|
||||
enum bp_result result = BP_RESULT_FAILURE;
|
||||
DAC_LOAD_DETECTION_PS_ALLOCATION params;
|
||||
|
||||
uint8_t misc = 0;
|
||||
|
||||
if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT ||
|
||||
bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT)
|
||||
misc = DAC_LOAD_MISC_YPrPb;
|
||||
|
||||
dac_load_detect_prepare_params(
|
||||
¶ms,
|
||||
bp_params->engine_id,
|
||||
bp_params->device_id,
|
||||
misc);
|
||||
|
||||
if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
|
||||
result = BP_RESULT_OK;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
********************************************************************************
|
||||
**
|
||||
|
|
|
|||
|
|
@ -52,6 +52,9 @@ struct cmd_tbl {
|
|||
enum bp_result (*adjust_display_pll)(
|
||||
struct bios_parser *bp,
|
||||
struct bp_adjust_pixel_clock_parameters *bp_params);
|
||||
enum bp_result (*select_crtc_source)(
|
||||
struct bios_parser *bp,
|
||||
struct bp_crtc_source_select *bp_params);
|
||||
enum bp_result (*dac1_encoder_control)(
|
||||
struct bios_parser *bp,
|
||||
bool enable,
|
||||
|
|
@ -68,6 +71,9 @@ struct cmd_tbl {
|
|||
enum bp_result (*dac2_output_control)(
|
||||
struct bios_parser *bp,
|
||||
bool enable);
|
||||
enum bp_result (*dac_load_detection)(
|
||||
struct bios_parser *bp,
|
||||
struct bp_load_detection_parameters *bp_params);
|
||||
enum bp_result (*set_crtc_timing)(
|
||||
struct bios_parser *bp,
|
||||
struct bp_hw_crtc_timing_parameters *bp_params);
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@
|
|||
#include "dm_helpers.h"
|
||||
|
||||
#include "dc_dmub_srv.h"
|
||||
|
||||
#include "reg_helper.h"
|
||||
#include "logger_types.h"
|
||||
#undef DC_LOGGER
|
||||
#define DC_LOGGER \
|
||||
|
|
@ -48,9 +48,43 @@
|
|||
|
||||
#include "link_service.h"
|
||||
|
||||
#define MAX_INSTANCE 7
|
||||
#define MAX_SEGMENT 8
|
||||
|
||||
struct IP_BASE_INSTANCE {
|
||||
unsigned int segment[MAX_SEGMENT];
|
||||
};
|
||||
|
||||
struct IP_BASE {
|
||||
struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
|
||||
};
|
||||
|
||||
static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } },
|
||||
{ { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } };
|
||||
|
||||
#define regCLK1_CLK0_CURRENT_CNT 0x0314
|
||||
#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
|
||||
#define regCLK1_CLK1_CURRENT_CNT 0x0315
|
||||
#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
|
||||
#define regCLK1_CLK2_CURRENT_CNT 0x0316
|
||||
#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
|
||||
#define regCLK1_CLK3_CURRENT_CNT 0x0317
|
||||
#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
|
||||
#define regCLK1_CLK4_CURRENT_CNT 0x0318
|
||||
#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
|
||||
#define regCLK1_CLK5_CURRENT_CNT 0x0319
|
||||
#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
|
||||
|
||||
#define TO_CLK_MGR_DCN315(clk_mgr)\
|
||||
container_of(clk_mgr, struct clk_mgr_dcn315, base)
|
||||
|
||||
#define REG(reg_name) \
|
||||
(CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
|
||||
|
||||
#define UNSUPPORTED_DCFCLK 10000000
|
||||
#define MIN_DPP_DISP_CLK 100000
|
||||
|
||||
|
|
@ -245,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
|
||||
}
|
||||
|
||||
static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
|
||||
|
||||
// read dtbclk
|
||||
internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
|
||||
|
||||
// read dcfclk
|
||||
internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
|
||||
|
||||
// read dppclk
|
||||
internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
|
||||
|
||||
// read dprefclk
|
||||
internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
|
||||
|
||||
// read dispclk
|
||||
internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
|
||||
}
|
||||
|
||||
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
|
||||
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
|
||||
{
|
||||
struct dcn35_clk_internal internal = {0};
|
||||
|
||||
dcn315_dump_clk_registers_internal(&internal, clk_mgr_base);
|
||||
|
||||
regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
|
||||
regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
|
||||
regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
|
||||
regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
|
||||
regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -594,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = {
|
|||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
|
||||
.update_clocks = dcn315_update_clocks,
|
||||
.init_clocks = dcn31_init_clocks,
|
||||
.init_clocks = dcn315_init_clocks,
|
||||
.enable_pme_wa = dcn315_enable_pme_wa,
|
||||
.are_clock_states_equal = dcn31_are_clock_states_equal,
|
||||
.notify_wm_ranges = dcn315_notify_wm_ranges
|
||||
};
|
||||
extern struct clk_mgr_funcs dcn3_fpga_funcs;
|
||||
|
||||
void dcn315_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
|
||||
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
|
||||
struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int);
|
||||
struct clk_log_info log_info = {0};
|
||||
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
// Assumption is that boot state always supports pstate
|
||||
clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
clk_mgr->clks.prev_p_state_change_support = true;
|
||||
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
|
||||
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
|
||||
|
||||
dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info);
|
||||
clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
|
||||
}
|
||||
|
||||
void dcn315_clk_mgr_construct(
|
||||
struct dc_context *ctx,
|
||||
struct clk_mgr_dcn315 *clk_mgr,
|
||||
|
|
@ -661,6 +743,7 @@ void dcn315_clk_mgr_construct(
|
|||
/* Saved clocks configured at boot for debug purposes */
|
||||
dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
|
||||
&clk_mgr->base.base, &log_info);
|
||||
clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000;
|
||||
|
||||
clk_mgr->base.base.dprefclk_khz = 600000;
|
||||
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx,
|
|||
struct pp_smu_funcs *pp_smu,
|
||||
struct dccg *dccg);
|
||||
|
||||
void dcn315_init_clocks(struct clk_mgr *clk_mgr);
|
||||
void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
|
||||
|
||||
#endif //__DCN315_CLK_MGR_H__
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@
|
|||
#include "hw_sequencer_private.h"
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
#include "dml2/dml2_internal_types.h"
|
||||
#include "dml2_0/dml2_internal_types.h"
|
||||
#include "soc_and_ip_translator.h"
|
||||
#endif
|
||||
|
||||
|
|
@ -148,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build";
|
|||
|
||||
/* Private functions */
|
||||
|
||||
static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
|
||||
static inline void elevate_update_type(
|
||||
struct surface_update_descriptor *descriptor,
|
||||
enum surface_update_type new_type,
|
||||
enum dc_lock_descriptor new_locks
|
||||
)
|
||||
{
|
||||
if (new > *original)
|
||||
*original = new;
|
||||
if (new_type > descriptor->update_type)
|
||||
descriptor->update_type = new_type;
|
||||
|
||||
descriptor->lock_descriptor |= new_locks;
|
||||
}
|
||||
|
||||
static void destroy_links(struct dc *dc)
|
||||
|
|
@ -493,13 +499,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
|||
1,
|
||||
*adjust);
|
||||
stream->adjust.timing_adjust_pending = false;
|
||||
|
||||
if (dc->hwss.notify_cursor_offload_drr_update)
|
||||
dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (dc->hwss.notify_cursor_offload_drr_update)
|
||||
dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -1147,8 +1154,8 @@ static bool dc_construct(struct dc *dc,
|
|||
/* set i2c speed if not done by the respective dcnxxx__resource.c */
|
||||
if (dc->caps.i2c_speed_in_khz_hdcp == 0)
|
||||
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
|
||||
if (dc->caps.max_optimizable_video_width == 0)
|
||||
dc->caps.max_optimizable_video_width = 5120;
|
||||
if (dc->check_config.max_optimizable_video_width == 0)
|
||||
dc->check_config.max_optimizable_video_width = 5120;
|
||||
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
|
||||
if (!dc->clk_mgr)
|
||||
goto fail;
|
||||
|
|
@ -2655,47 +2662,49 @@ static bool is_surface_in_context(
|
|||
return false;
|
||||
}
|
||||
|
||||
static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
|
||||
static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
|
||||
{
|
||||
union surface_update_flags *update_flags = &u->surface->update_flags;
|
||||
enum surface_update_type update_type = UPDATE_TYPE_FAST;
|
||||
struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
|
||||
|
||||
if (!u->plane_info)
|
||||
return UPDATE_TYPE_FAST;
|
||||
return update_type;
|
||||
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_PLANE);
|
||||
|
||||
if (u->plane_info->color_space != u->surface->color_space) {
|
||||
update_flags->bits.color_space_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
|
||||
update_flags->bits.horizontal_mirror_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->plane_info->rotation != u->surface->rotation) {
|
||||
update_flags->bits.rotation_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->plane_info->format != u->surface->format) {
|
||||
update_flags->bits.pixel_format_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->plane_info->stereo_format != u->surface->stereo_format) {
|
||||
update_flags->bits.stereo_format_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
|
||||
update_flags->bits.per_pixel_alpha_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
|
||||
update_flags->bits.global_alpha_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->plane_info->dcc.enable != u->surface->dcc.enable
|
||||
|
|
@ -2707,7 +2716,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
|
|||
* recalculate stutter period.
|
||||
*/
|
||||
update_flags->bits.dcc_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
|
||||
|
|
@ -2716,33 +2725,33 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
|
|||
* and DML calculation
|
||||
*/
|
||||
update_flags->bits.bpp_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
|
||||
|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
|
||||
update_flags->bits.plane_size_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
const struct dc_tiling_info *tiling = &u->plane_info->tiling_info;
|
||||
|
||||
if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) {
|
||||
update_flags->bits.swizzle_change = 1;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
|
||||
switch (tiling->gfxversion) {
|
||||
case DcGfxVersion9:
|
||||
case DcGfxVersion10:
|
||||
case DcGfxVersion11:
|
||||
if (tiling->gfx9.swizzle != DC_SW_LINEAR) {
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
update_flags->bits.bandwidth_change = 1;
|
||||
}
|
||||
break;
|
||||
case DcGfxAddr3:
|
||||
if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) {
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL);
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
update_flags->bits.bandwidth_change = 1;
|
||||
}
|
||||
break;
|
||||
|
|
@ -2758,14 +2767,17 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
|
|||
return update_type;
|
||||
}
|
||||
|
||||
static enum surface_update_type get_scaling_info_update_type(
|
||||
const struct dc *dc,
|
||||
static struct surface_update_descriptor get_scaling_info_update_type(
|
||||
const struct dc_check_config *check_config,
|
||||
const struct dc_surface_update *u)
|
||||
{
|
||||
union surface_update_flags *update_flags = &u->surface->update_flags;
|
||||
struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
|
||||
|
||||
if (!u->scaling_info)
|
||||
return UPDATE_TYPE_FAST;
|
||||
return update_type;
|
||||
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_PLANE);
|
||||
|
||||
if (u->scaling_info->src_rect.width != u->surface->src_rect.width
|
||||
|| u->scaling_info->src_rect.height != u->surface->src_rect.height
|
||||
|
|
@ -2789,7 +2801,7 @@ static enum surface_update_type get_scaling_info_update_type(
|
|||
/* Making dst rect smaller requires a bandwidth change */
|
||||
update_flags->bits.bandwidth_change = 1;
|
||||
|
||||
if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
|
||||
if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width &&
|
||||
(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
|
||||
u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
|
||||
/* Changing clip size of a large surface may result in MPC slice count change */
|
||||
|
|
@ -2808,39 +2820,41 @@ static enum surface_update_type get_scaling_info_update_type(
|
|||
if (update_flags->bits.clock_change
|
||||
|| update_flags->bits.bandwidth_change
|
||||
|| update_flags->bits.scaling_change)
|
||||
return UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
|
||||
if (update_flags->bits.position_change)
|
||||
return UPDATE_TYPE_MED;
|
||||
elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
|
||||
return UPDATE_TYPE_FAST;
|
||||
return update_type;
|
||||
}
|
||||
|
||||
static enum surface_update_type det_surface_update(const struct dc *dc,
|
||||
const struct dc_surface_update *u)
|
||||
static struct surface_update_descriptor det_surface_update(
|
||||
const struct dc_check_config *check_config,
|
||||
struct dc_surface_update *u)
|
||||
{
|
||||
enum surface_update_type type;
|
||||
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
|
||||
struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
|
||||
union surface_update_flags *update_flags = &u->surface->update_flags;
|
||||
|
||||
if (u->surface->force_full_update) {
|
||||
update_flags->raw = 0xFFFFFFFF;
|
||||
return UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
return overall_type;
|
||||
}
|
||||
|
||||
update_flags->raw = 0; // Reset all flags
|
||||
|
||||
type = get_plane_info_update_type(dc, u);
|
||||
elevate_update_type(&overall_type, type);
|
||||
struct surface_update_descriptor inner_type = get_plane_info_update_type(u);
|
||||
|
||||
type = get_scaling_info_update_type(dc, u);
|
||||
elevate_update_type(&overall_type, type);
|
||||
elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
|
||||
|
||||
inner_type = get_scaling_info_update_type(check_config, u);
|
||||
elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
|
||||
|
||||
if (u->flip_addr) {
|
||||
update_flags->bits.addr_update = 1;
|
||||
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
|
||||
update_flags->bits.tmz_changed = 1;
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
}
|
||||
if (u->in_transfer_func)
|
||||
|
|
@ -2876,13 +2890,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
|
|||
if (u->hdr_mult.value)
|
||||
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
|
||||
update_flags->bits.hdr_mult = 1;
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_MED);
|
||||
// TODO: Should be fast?
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->sdr_white_level_nits)
|
||||
if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
|
||||
update_flags->bits.sdr_white_level_nits = 1;
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
|
||||
// TODO: Should be fast?
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (u->cm2_params) {
|
||||
|
|
@ -2896,27 +2912,24 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
|
|||
update_flags->bits.mcm_transfer_function_enable_change = 1;
|
||||
}
|
||||
if (update_flags->bits.in_transfer_func_change) {
|
||||
type = UPDATE_TYPE_MED;
|
||||
elevate_update_type(&overall_type, type);
|
||||
// TODO: Fast?
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (update_flags->bits.lut_3d &&
|
||||
u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
|
||||
type = UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&overall_type, type);
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
if (update_flags->bits.mcm_transfer_function_enable_change) {
|
||||
type = UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&overall_type, type);
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (dc->debug.enable_legacy_fast_update &&
|
||||
if (check_config->enable_legacy_fast_update &&
|
||||
(update_flags->bits.gamma_change ||
|
||||
update_flags->bits.gamut_remap_change ||
|
||||
update_flags->bits.input_csc_change ||
|
||||
update_flags->bits.coeff_reduction_change)) {
|
||||
type = UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&overall_type, type);
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
return overall_type;
|
||||
}
|
||||
|
|
@ -2944,34 +2957,34 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda
|
|||
}
|
||||
}
|
||||
|
||||
static enum surface_update_type check_update_surfaces_for_stream(
|
||||
struct dc *dc,
|
||||
static struct surface_update_descriptor check_update_surfaces_for_stream(
|
||||
const struct dc_check_config *check_config,
|
||||
struct dc_surface_update *updates,
|
||||
int surface_count,
|
||||
struct dc_stream_update *stream_update,
|
||||
const struct dc_stream_status *stream_status)
|
||||
struct dc_stream_update *stream_update)
|
||||
{
|
||||
int i;
|
||||
enum surface_update_type overall_type = UPDATE_TYPE_FAST;
|
||||
struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
|
||||
|
||||
if (stream_update && stream_update->pending_test_pattern) {
|
||||
overall_type = UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
if (stream_update && stream_update->hw_cursor_req) {
|
||||
overall_type = UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
}
|
||||
|
||||
/* some stream updates require passive update */
|
||||
if (stream_update) {
|
||||
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
|
||||
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
|
||||
|
||||
if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
|
||||
(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
|
||||
stream_update->integer_scaling_update)
|
||||
su_flags->bits.scaling = 1;
|
||||
|
||||
if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
|
||||
if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func)
|
||||
su_flags->bits.out_tf = 1;
|
||||
|
||||
if (stream_update->abm_level)
|
||||
|
|
@ -3007,7 +3020,7 @@ static enum surface_update_type check_update_surfaces_for_stream(
|
|||
su_flags->bits.out_csc = 1;
|
||||
|
||||
if (su_flags->raw != 0)
|
||||
overall_type = UPDATE_TYPE_FULL;
|
||||
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE);
|
||||
|
||||
if (stream_update->output_csc_transform)
|
||||
su_flags->bits.out_csc = 1;
|
||||
|
|
@ -3015,15 +3028,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
|
|||
/* Output transfer function changes do not require bandwidth recalculation,
|
||||
* so don't trigger a full update
|
||||
*/
|
||||
if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
|
||||
if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func)
|
||||
su_flags->bits.out_tf = 1;
|
||||
}
|
||||
|
||||
for (i = 0 ; i < surface_count; i++) {
|
||||
enum surface_update_type type =
|
||||
det_surface_update(dc, &updates[i]);
|
||||
for (int i = 0 ; i < surface_count; i++) {
|
||||
struct surface_update_descriptor inner_type =
|
||||
det_surface_update(check_config, &updates[i]);
|
||||
|
||||
elevate_update_type(&overall_type, type);
|
||||
elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
|
||||
}
|
||||
|
||||
return overall_type;
|
||||
|
|
@ -3034,23 +3047,18 @@ static enum surface_update_type check_update_surfaces_for_stream(
|
|||
*
|
||||
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
|
||||
*/
|
||||
enum surface_update_type dc_check_update_surfaces_for_stream(
|
||||
struct dc *dc,
|
||||
struct surface_update_descriptor dc_check_update_surfaces_for_stream(
|
||||
const struct dc_check_config *check_config,
|
||||
struct dc_surface_update *updates,
|
||||
int surface_count,
|
||||
struct dc_stream_update *stream_update,
|
||||
const struct dc_stream_status *stream_status)
|
||||
struct dc_stream_update *stream_update)
|
||||
{
|
||||
int i;
|
||||
enum surface_update_type type;
|
||||
|
||||
if (stream_update)
|
||||
stream_update->stream->update_flags.raw = 0;
|
||||
for (i = 0; i < surface_count; i++)
|
||||
for (size_t i = 0; i < surface_count; i++)
|
||||
updates[i].surface->update_flags.raw = 0;
|
||||
|
||||
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
|
||||
return type;
|
||||
return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
|
||||
}
|
||||
|
||||
static struct dc_stream_status *stream_get_status(
|
||||
|
|
@ -3419,11 +3427,12 @@ static void update_seamless_boot_flags(struct dc *dc,
|
|||
}
|
||||
}
|
||||
|
||||
static bool full_update_required_weak(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates,
|
||||
static bool full_update_required_weak(
|
||||
const struct dc *dc,
|
||||
const struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_stream_state *stream);
|
||||
const struct dc_stream_update *stream_update,
|
||||
const struct dc_stream_state *stream);
|
||||
|
||||
/**
|
||||
* update_planes_and_stream_state() - The function takes planes and stream
|
||||
|
|
@ -3471,8 +3480,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
|
|||
|
||||
context = dc->current_state;
|
||||
update_type = dc_check_update_surfaces_for_stream(
|
||||
dc, srf_updates, surface_count, stream_update, stream_status);
|
||||
|
||||
&dc->check_config, srf_updates, surface_count, stream_update).update_type;
|
||||
if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
|
||||
update_type = UPDATE_TYPE_FULL;
|
||||
|
||||
|
|
@ -5008,7 +5016,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update,
|
|||
}
|
||||
}
|
||||
|
||||
static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
|
||||
static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
@ -5049,11 +5057,12 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool full_update_required_weak(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates,
|
||||
static bool full_update_required_weak(
|
||||
const struct dc *dc,
|
||||
const struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_stream_state *stream)
|
||||
const struct dc_stream_update *stream_update,
|
||||
const struct dc_stream_state *stream)
|
||||
{
|
||||
const struct dc_state *context = dc->current_state;
|
||||
if (srf_updates)
|
||||
|
|
@ -5062,7 +5071,7 @@ static bool full_update_required_weak(struct dc *dc,
|
|||
return true;
|
||||
|
||||
if (stream) {
|
||||
const struct dc_stream_status *stream_status = dc_stream_get_status(stream);
|
||||
const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
|
||||
if (stream_status == NULL || stream_status->plane_count != surface_count)
|
||||
return true;
|
||||
}
|
||||
|
|
@ -5075,11 +5084,12 @@ static bool full_update_required_weak(struct dc *dc,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool full_update_required(struct dc *dc,
|
||||
struct dc_surface_update *srf_updates,
|
||||
static bool full_update_required(
|
||||
const struct dc *dc,
|
||||
const struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_stream_state *stream)
|
||||
const struct dc_stream_update *stream_update,
|
||||
const struct dc_stream_state *stream)
|
||||
{
|
||||
if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
|
||||
return true;
|
||||
|
|
@ -5139,12 +5149,13 @@ static bool full_update_required(struct dc *dc,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool fast_update_only(struct dc *dc,
|
||||
struct dc_fast_update *fast_update,
|
||||
struct dc_surface_update *srf_updates,
|
||||
static bool fast_update_only(
|
||||
const struct dc *dc,
|
||||
const struct dc_fast_update *fast_update,
|
||||
const struct dc_surface_update *srf_updates,
|
||||
int surface_count,
|
||||
struct dc_stream_update *stream_update,
|
||||
struct dc_stream_state *stream)
|
||||
const struct dc_stream_update *stream_update,
|
||||
const struct dc_stream_state *stream)
|
||||
{
|
||||
return fast_updates_exist(fast_update, surface_count)
|
||||
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
|
||||
|
|
@ -5207,7 +5218,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
|
|||
commit_minimal_transition_state_in_dc_update(dc, context, stream,
|
||||
srf_updates, surface_count);
|
||||
|
||||
if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
|
||||
if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
|
||||
commit_planes_for_stream_fast(dc,
|
||||
srf_updates,
|
||||
surface_count,
|
||||
|
|
@ -5250,7 +5261,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
|
|||
stream_update);
|
||||
if (fast_update_only(dc, fast_update, srf_updates, surface_count,
|
||||
stream_update, stream) &&
|
||||
!dc->debug.enable_legacy_fast_update)
|
||||
!dc->check_config.enable_legacy_fast_update)
|
||||
commit_planes_for_stream_fast(dc,
|
||||
srf_updates,
|
||||
surface_count,
|
||||
|
|
@ -6376,7 +6387,7 @@ bool dc_is_cursor_limit_pending(struct dc *dc)
|
|||
return false;
|
||||
}
|
||||
|
||||
bool dc_can_clear_cursor_limit(struct dc *dc)
|
||||
bool dc_can_clear_cursor_limit(const struct dc *dc)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
|
|
@ -6405,3 +6416,8 @@ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
|
|||
if (dc->hwss.get_underflow_debug_data)
|
||||
dc->hwss.get_underflow_debug_data(dc, tg, out_data);
|
||||
}
|
||||
|
||||
void dc_log_preos_dmcub_info(const struct dc *dc)
|
||||
{
|
||||
dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -522,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
|
|||
struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
|
||||
{
|
||||
struct link_encoder *link_enc = NULL;
|
||||
enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
|
||||
enum engine_id encs_assigned[MAX_LINK_ENCODERS];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
|
||||
for (i = 0; i < MAX_LINK_ENCODERS; i++)
|
||||
encs_assigned[i] = ENGINE_ID_UNKNOWN;
|
||||
|
||||
/* Add assigned encoders to list. */
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@
|
|||
#define DC_LOGGER \
|
||||
dc->ctx->logger
|
||||
#define DC_LOGGER_INIT(logger)
|
||||
#include "dml2/dml2_wrapper.h"
|
||||
#include "dml2_0/dml2_wrapper.h"
|
||||
|
||||
#define UNABLE_TO_SPLIT -1
|
||||
|
||||
|
|
@ -446,6 +446,14 @@ bool resource_construct(
|
|||
DC_ERR("DC: failed to create stream_encoder!\n");
|
||||
pool->stream_enc_count++;
|
||||
}
|
||||
|
||||
for (i = 0; i < caps->num_analog_stream_encoder; i++) {
|
||||
pool->stream_enc[caps->num_stream_encoder + i] =
|
||||
create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx);
|
||||
if (pool->stream_enc[caps->num_stream_encoder + i] == NULL)
|
||||
DC_ERR("DC: failed to create analog stream_encoder %d!\n", i);
|
||||
pool->stream_enc_count++;
|
||||
}
|
||||
}
|
||||
|
||||
pool->hpo_dp_stream_enc_count = 0;
|
||||
|
|
@ -2690,17 +2698,40 @@ static inline int find_fixed_dio_link_enc(const struct dc_link *link)
|
|||
}
|
||||
|
||||
static inline int find_free_dio_link_enc(const struct resource_context *res_ctx,
|
||||
const struct dc_link *link, const struct resource_pool *pool)
|
||||
const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream)
|
||||
{
|
||||
int i;
|
||||
int i, j = -1;
|
||||
int stream_enc_inst = -1;
|
||||
int enc_count = pool->dig_link_enc_count;
|
||||
|
||||
/* for dpia, check preferred encoder first and then the next one */
|
||||
for (i = 0; i < enc_count; i++)
|
||||
if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0)
|
||||
break;
|
||||
/* Find stream encoder instance for the stream */
|
||||
if (stream) {
|
||||
for (i = 0; i < pool->pipe_count; i++) {
|
||||
if ((res_ctx->pipe_ctx[i].stream == stream) &&
|
||||
(res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) {
|
||||
stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1;
|
||||
/* Assign dpia preferred > stream enc instance > available */
|
||||
for (i = 0; i < enc_count; i++) {
|
||||
if (res_ctx->dio_link_enc_ref_cnts[i] == 0) {
|
||||
if (j == -1)
|
||||
j = i;
|
||||
|
||||
if (link->dpia_preferred_eng_id == i) {
|
||||
j = i;
|
||||
break;
|
||||
}
|
||||
|
||||
if (stream_enc_inst == i) {
|
||||
j = stream_enc_inst;
|
||||
}
|
||||
}
|
||||
}
|
||||
return j;
|
||||
}
|
||||
|
||||
static inline void acquire_dio_link_enc(
|
||||
|
|
@ -2781,7 +2812,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
|
|||
retain_dio_link_enc(res_ctx, enc_index);
|
||||
} else {
|
||||
if (stream->link->is_dig_mapping_flexible)
|
||||
enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool);
|
||||
enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream);
|
||||
else {
|
||||
int link_index = 0;
|
||||
|
||||
|
|
@ -2791,7 +2822,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
|
|||
* one into the acquiring link.
|
||||
*/
|
||||
if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) {
|
||||
int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool);
|
||||
int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream);
|
||||
|
||||
if (new_enc_index >= 0)
|
||||
swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index);
|
||||
|
|
@ -5201,7 +5232,7 @@ struct link_encoder *get_temp_dio_link_enc(
|
|||
enc_index = link->eng_id;
|
||||
|
||||
if (enc_index < 0)
|
||||
enc_index = find_free_dio_link_enc(res_ctx, link, pool);
|
||||
enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL);
|
||||
|
||||
if (enc_index >= 0)
|
||||
link_enc = pool->link_encoders[enc_index];
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@
|
|||
#include "link_enc_cfg.h"
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
#include "dml2/dml2_wrapper.h"
|
||||
#include "dml2/dml2_internal_types.h"
|
||||
#include "dml2_0/dml2_wrapper.h"
|
||||
#include "dml2_0/dml2_internal_types.h"
|
||||
#endif
|
||||
|
||||
#define DC_LOGGER \
|
||||
|
|
|
|||
|
|
@ -224,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status(
|
|||
return dc_state_get_stream_status(dc->current_state, stream);
|
||||
}
|
||||
|
||||
const struct dc_stream_status *dc_stream_get_status_const(
|
||||
const struct dc_stream_state *stream)
|
||||
{
|
||||
struct dc *dc = stream->ctx->dc;
|
||||
|
||||
return dc_state_get_stream_status(dc->current_state, stream);
|
||||
}
|
||||
|
||||
void program_cursor_attributes(
|
||||
struct dc *dc,
|
||||
struct dc_stream_state *stream)
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@
|
|||
#include "inc/hw/dmcu.h"
|
||||
#include "dml/display_mode_lib.h"
|
||||
|
||||
#include "dml2/dml2_wrapper.h"
|
||||
#include "dml2_0/dml2_wrapper.h"
|
||||
|
||||
#include "dmub/inc/dmub_cmd.h"
|
||||
|
||||
|
|
@ -54,8 +54,16 @@ struct abm_save_restore;
|
|||
struct aux_payload;
|
||||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
struct dcn_hubbub_reg_state;
|
||||
struct dcn_hubp_reg_state;
|
||||
struct dcn_dpp_reg_state;
|
||||
struct dcn_mpc_reg_state;
|
||||
struct dcn_opp_reg_state;
|
||||
struct dcn_dsc_reg_state;
|
||||
struct dcn_optc_reg_state;
|
||||
struct dcn_dccg_reg_state;
|
||||
|
||||
#define DC_VER "3.2.355"
|
||||
#define DC_VER "3.2.356"
|
||||
|
||||
/**
|
||||
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
|
||||
|
|
@ -278,6 +286,15 @@ struct dc_scl_caps {
|
|||
bool sharpener_support;
|
||||
};
|
||||
|
||||
struct dc_check_config {
|
||||
/**
|
||||
* max video plane width that can be safely assumed to be always
|
||||
* supported by single DPP pipe.
|
||||
*/
|
||||
unsigned int max_optimizable_video_width;
|
||||
bool enable_legacy_fast_update;
|
||||
};
|
||||
|
||||
struct dc_caps {
|
||||
uint32_t max_streams;
|
||||
uint32_t max_links;
|
||||
|
|
@ -293,11 +310,6 @@ struct dc_caps {
|
|||
unsigned int max_cursor_size;
|
||||
unsigned int max_buffered_cursor_size;
|
||||
unsigned int max_video_width;
|
||||
/*
|
||||
* max video plane width that can be safely assumed to be always
|
||||
* supported by single DPP pipe.
|
||||
*/
|
||||
unsigned int max_optimizable_video_width;
|
||||
unsigned int min_horizontal_blanking_period;
|
||||
int linear_pitch_alignment;
|
||||
bool dcc_const_color;
|
||||
|
|
@ -455,6 +467,19 @@ enum surface_update_type {
|
|||
UPDATE_TYPE_FULL, /* may need to shuffle resources */
|
||||
};
|
||||
|
||||
enum dc_lock_descriptor {
|
||||
LOCK_DESCRIPTOR_NONE = 0x0,
|
||||
LOCK_DESCRIPTOR_STATE = 0x1,
|
||||
LOCK_DESCRIPTOR_LINK = 0x2,
|
||||
LOCK_DESCRIPTOR_STREAM = 0x4,
|
||||
LOCK_DESCRIPTOR_PLANE = 0x8,
|
||||
};
|
||||
|
||||
struct surface_update_descriptor {
|
||||
enum surface_update_type update_type;
|
||||
enum dc_lock_descriptor lock_descriptor;
|
||||
};
|
||||
|
||||
/* Forward declaration*/
|
||||
struct dc;
|
||||
struct dc_plane_state;
|
||||
|
|
@ -1120,7 +1145,6 @@ struct dc_debug_options {
|
|||
uint32_t fpo_vactive_min_active_margin_us;
|
||||
uint32_t fpo_vactive_max_blank_us;
|
||||
bool enable_hpo_pg_support;
|
||||
bool enable_legacy_fast_update;
|
||||
bool disable_dc_mode_overwrite;
|
||||
bool replay_skip_crtc_disabled;
|
||||
bool ignore_pg;/*do nothing, let pmfw control it*/
|
||||
|
|
@ -1152,7 +1176,6 @@ struct dc_debug_options {
|
|||
bool enable_ips_visual_confirm;
|
||||
unsigned int sharpen_policy;
|
||||
unsigned int scale_to_sharpness_policy;
|
||||
bool skip_full_updated_if_possible;
|
||||
unsigned int enable_oled_edp_power_up_opt;
|
||||
bool enable_hblank_borrow;
|
||||
bool force_subvp_df_throttle;
|
||||
|
|
@ -1703,6 +1726,7 @@ struct dc {
|
|||
struct dc_debug_options debug;
|
||||
struct dc_versions versions;
|
||||
struct dc_caps caps;
|
||||
struct dc_check_config check_config;
|
||||
struct dc_cap_funcs cap_funcs;
|
||||
struct dc_config config;
|
||||
struct dc_bounding_box_overrides bb_overrides;
|
||||
|
|
@ -1831,20 +1855,14 @@ struct dc_surface_update {
|
|||
};
|
||||
|
||||
struct dc_underflow_debug_data {
|
||||
uint32_t otg_inst;
|
||||
uint32_t otg_underflow;
|
||||
uint32_t h_position;
|
||||
uint32_t v_position;
|
||||
uint32_t otg_frame_count;
|
||||
struct dc_underflow_per_hubp_debug_data {
|
||||
uint32_t hubp_underflow;
|
||||
uint32_t hubp_in_blank;
|
||||
uint32_t hubp_readline;
|
||||
uint32_t det_config_error;
|
||||
} hubps[MAX_PIPES];
|
||||
uint32_t curr_det_sizes[MAX_PIPES];
|
||||
uint32_t target_det_sizes[MAX_PIPES];
|
||||
uint32_t compbuf_config_error;
|
||||
struct dcn_hubbub_reg_state *hubbub_reg_state;
|
||||
struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES];
|
||||
struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES];
|
||||
struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES];
|
||||
struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES];
|
||||
struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES];
|
||||
struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES];
|
||||
struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES];
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
@ -2722,6 +2740,8 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
|
|||
|
||||
bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
|
||||
|
||||
void dc_log_preos_dmcub_info(const struct dc *dc);
|
||||
|
||||
/* DSC Interfaces */
|
||||
#include "dc_dsc.h"
|
||||
|
||||
|
|
@ -2737,7 +2757,7 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
|
|||
struct dc_stream_state *new_stream);
|
||||
|
||||
bool dc_is_cursor_limit_pending(struct dc *dc);
|
||||
bool dc_can_clear_cursor_limit(struct dc *dc);
|
||||
bool dc_can_clear_cursor_limit(const struct dc *dc);
|
||||
|
||||
/**
|
||||
* dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
|
||||
|
|
|
|||
|
|
@ -91,9 +91,17 @@ struct dc_vbios_funcs {
|
|||
struct device_id id);
|
||||
/* COMMANDS */
|
||||
|
||||
enum bp_result (*select_crtc_source)(
|
||||
struct dc_bios *bios,
|
||||
struct bp_crtc_source_select *bp_params);
|
||||
enum bp_result (*encoder_control)(
|
||||
struct dc_bios *bios,
|
||||
struct bp_encoder_control *cntl);
|
||||
enum bp_result (*dac_load_detection)(
|
||||
struct dc_bios *bios,
|
||||
enum engine_id engine_id,
|
||||
enum dal_device_type device_type,
|
||||
uint32_t enum_id);
|
||||
enum bp_result (*transmitter_control)(
|
||||
struct dc_bios *bios,
|
||||
struct bp_transmitter_control *cntl);
|
||||
|
|
@ -165,6 +173,7 @@ struct dc_vbios_funcs {
|
|||
};
|
||||
|
||||
struct bios_registers {
|
||||
uint32_t BIOS_SCRATCH_0;
|
||||
uint32_t BIOS_SCRATCH_3;
|
||||
uint32_t BIOS_SCRATCH_6;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -2344,3 +2344,24 @@ void dc_dmub_srv_release_hw(const struct dc *dc)
|
|||
|
||||
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
|
||||
}
|
||||
|
||||
void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv)
|
||||
{
|
||||
struct dmub_srv *dmub;
|
||||
|
||||
if (!dc_dmub_srv || !dc_dmub_srv->dmub)
|
||||
return;
|
||||
|
||||
dmub = dc_dmub_srv->dmub;
|
||||
|
||||
if (dmub_srv_get_preos_info(dmub)) {
|
||||
DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__);
|
||||
DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version);
|
||||
DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options);
|
||||
DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status);
|
||||
DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr);
|
||||
DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size);
|
||||
DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base);
|
||||
DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -367,4 +367,11 @@ bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc);
|
|||
* @dc - pointer to DC object
|
||||
*/
|
||||
void dc_dmub_srv_release_hw(const struct dc *dc);
|
||||
|
||||
/**
|
||||
* dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info.
|
||||
*
|
||||
* @dc - pointer to DC object
|
||||
*/
|
||||
void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv);
|
||||
#endif /* _DMUB_DC_SRV_H_ */
|
||||
|
|
|
|||
|
|
@ -473,12 +473,11 @@ void dc_enable_stereo(
|
|||
/* Triggers multi-stream synchronization. */
|
||||
void dc_trigger_sync(struct dc *dc, struct dc_state *context);
|
||||
|
||||
enum surface_update_type dc_check_update_surfaces_for_stream(
|
||||
struct dc *dc,
|
||||
struct surface_update_descriptor dc_check_update_surfaces_for_stream(
|
||||
const struct dc_check_config *check_config,
|
||||
struct dc_surface_update *updates,
|
||||
int surface_count,
|
||||
struct dc_stream_update *stream_update,
|
||||
const struct dc_stream_status *stream_status);
|
||||
struct dc_stream_update *stream_update);
|
||||
|
||||
/**
|
||||
* Create a new default stream for the requested sink
|
||||
|
|
@ -492,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
|
|||
void dc_stream_retain(struct dc_stream_state *dc_stream);
|
||||
void dc_stream_release(struct dc_stream_state *dc_stream);
|
||||
|
||||
struct dc_stream_status *dc_stream_get_status(
|
||||
struct dc_stream_state *dc_stream);
|
||||
struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream);
|
||||
const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream);
|
||||
|
||||
/*******************************************************************************
|
||||
* Cursor interfaces - To manages the cursor within a stream
|
||||
|
|
|
|||
|
|
@ -185,6 +185,10 @@ struct dc_panel_patch {
|
|||
unsigned int wait_after_dpcd_poweroff_ms;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dc_edid_caps - Capabilities read from EDID.
|
||||
* @analog: Whether the monitor is analog. Used by DVI-I handling.
|
||||
*/
|
||||
struct dc_edid_caps {
|
||||
/* sink identification */
|
||||
uint16_t manufacturer_id;
|
||||
|
|
@ -213,6 +217,7 @@ struct dc_edid_caps {
|
|||
bool hdr_supported;
|
||||
bool rr_capable;
|
||||
bool scdc_present;
|
||||
bool analog;
|
||||
|
||||
struct dc_panel_patch panel_patch;
|
||||
};
|
||||
|
|
@ -348,7 +353,8 @@ enum dc_connection_type {
|
|||
dc_connection_none,
|
||||
dc_connection_single,
|
||||
dc_connection_mst_branch,
|
||||
dc_connection_sst_branch
|
||||
dc_connection_sst_branch,
|
||||
dc_connection_dac_load
|
||||
};
|
||||
|
||||
struct dc_csc_adjustments {
|
||||
|
|
|
|||
|
|
@ -425,7 +425,69 @@ struct dccg_mask {
|
|||
uint32_t SYMCLKD_CLOCK_ENABLE; \
|
||||
uint32_t SYMCLKE_CLOCK_ENABLE; \
|
||||
uint32_t DP_DTO_MODULO[MAX_PIPES]; \
|
||||
uint32_t DP_DTO_PHASE[MAX_PIPES]
|
||||
uint32_t DP_DTO_PHASE[MAX_PIPES]; \
|
||||
uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \
|
||||
uint32_t DCCG_AUDIO_DTO0_MODULE; \
|
||||
uint32_t DCCG_AUDIO_DTO0_PHASE; \
|
||||
uint32_t DCCG_AUDIO_DTO1_MODULE; \
|
||||
uint32_t DCCG_AUDIO_DTO1_PHASE; \
|
||||
uint32_t DCCG_CAC_STATUS; \
|
||||
uint32_t DCCG_CAC_STATUS2; \
|
||||
uint32_t DCCG_DISP_CNTL_REG; \
|
||||
uint32_t DCCG_DS_CNTL; \
|
||||
uint32_t DCCG_DS_DTO_INCR; \
|
||||
uint32_t DCCG_DS_DTO_MODULO; \
|
||||
uint32_t DCCG_DS_HW_CAL_INTERVAL; \
|
||||
uint32_t DCCG_GTC_CNTL; \
|
||||
uint32_t DCCG_GTC_CURRENT; \
|
||||
uint32_t DCCG_GTC_DTO_INCR; \
|
||||
uint32_t DCCG_GTC_DTO_MODULO; \
|
||||
uint32_t DCCG_PERFMON_CNTL; \
|
||||
uint32_t DCCG_PERFMON_CNTL2; \
|
||||
uint32_t DCCG_SOFT_RESET; \
|
||||
uint32_t DCCG_TEST_CLK_SEL; \
|
||||
uint32_t DCCG_VSYNC_CNT_CTRL; \
|
||||
uint32_t DCCG_VSYNC_CNT_INT_CTRL; \
|
||||
uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \
|
||||
uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \
|
||||
uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \
|
||||
uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \
|
||||
uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \
|
||||
uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \
|
||||
uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \
|
||||
uint32_t DP_DTO_DBUF_EN; \
|
||||
uint32_t DPIACLK_540M_DTO_MODULO; \
|
||||
uint32_t DPIACLK_540M_DTO_PHASE; \
|
||||
uint32_t DPIACLK_810M_DTO_MODULO; \
|
||||
uint32_t DPIACLK_810M_DTO_PHASE; \
|
||||
uint32_t DPIACLK_DTO_CNTL; \
|
||||
uint32_t DPIASYMCLK_CNTL; \
|
||||
uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \
|
||||
uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \
|
||||
uint32_t DPREFCLK_CNTL; \
|
||||
uint32_t DTBCLK_DTO_DBUF_EN; \
|
||||
uint32_t FORCE_SYMCLK_DISABLE; \
|
||||
uint32_t HDMICHARCLK0_CLOCK_CNTL; \
|
||||
uint32_t MICROSECOND_TIME_BASE_DIV; \
|
||||
uint32_t MILLISECOND_TIME_BASE_DIV; \
|
||||
uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \
|
||||
uint32_t OTG0_PIXEL_RATE_CNTL; \
|
||||
uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \
|
||||
uint32_t OTG1_PIXEL_RATE_CNTL; \
|
||||
uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \
|
||||
uint32_t OTG2_PIXEL_RATE_CNTL; \
|
||||
uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \
|
||||
uint32_t OTG3_PIXEL_RATE_CNTL; \
|
||||
uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \
|
||||
uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \
|
||||
uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \
|
||||
uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \
|
||||
uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \
|
||||
uint32_t REFCLK_CGTT_BLK_CTRL_REG; \
|
||||
uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \
|
||||
uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \
|
||||
uint32_t SYMCLK_PSP_CNTL
|
||||
|
||||
struct dccg_registers {
|
||||
DCCG_REG_VARIABLE_LIST;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg,
|
|||
OTG_DROP_PIXEL[otg_inst], 1);
|
||||
}
|
||||
|
||||
void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state)
|
||||
{
|
||||
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
|
||||
|
||||
dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL);
|
||||
dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO);
|
||||
dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE);
|
||||
dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE);
|
||||
dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE);
|
||||
dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE);
|
||||
dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE);
|
||||
dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE);
|
||||
dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS);
|
||||
dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2);
|
||||
dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG);
|
||||
dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL);
|
||||
dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR);
|
||||
dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO);
|
||||
dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL);
|
||||
dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL);
|
||||
dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2);
|
||||
dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3);
|
||||
dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4);
|
||||
dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5);
|
||||
dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6);
|
||||
dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL);
|
||||
dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL);
|
||||
dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT);
|
||||
dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR);
|
||||
dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO);
|
||||
dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL);
|
||||
dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2);
|
||||
dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET);
|
||||
dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL);
|
||||
dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL);
|
||||
dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL);
|
||||
dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE);
|
||||
dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE);
|
||||
dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE);
|
||||
dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE);
|
||||
dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE);
|
||||
dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE);
|
||||
dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG);
|
||||
dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL);
|
||||
dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN);
|
||||
dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]);
|
||||
dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]);
|
||||
dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]);
|
||||
dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]);
|
||||
dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]);
|
||||
dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]);
|
||||
dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]);
|
||||
dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]);
|
||||
dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO);
|
||||
dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE);
|
||||
dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO);
|
||||
dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE);
|
||||
dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL);
|
||||
dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL);
|
||||
dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG);
|
||||
dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL);
|
||||
dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL);
|
||||
dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]);
|
||||
dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]);
|
||||
dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]);
|
||||
dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]);
|
||||
dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG);
|
||||
dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL);
|
||||
dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL);
|
||||
dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL);
|
||||
dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM);
|
||||
dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM);
|
||||
dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM);
|
||||
dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM);
|
||||
dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN);
|
||||
dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]);
|
||||
dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]);
|
||||
dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]);
|
||||
dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]);
|
||||
dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]);
|
||||
dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]);
|
||||
dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]);
|
||||
dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]);
|
||||
dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL);
|
||||
dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE);
|
||||
dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL);
|
||||
dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL);
|
||||
dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM);
|
||||
dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV);
|
||||
dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV);
|
||||
dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV);
|
||||
dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL);
|
||||
dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL);
|
||||
dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL);
|
||||
dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL);
|
||||
dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL);
|
||||
dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL);
|
||||
dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL);
|
||||
dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL);
|
||||
dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL);
|
||||
dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL);
|
||||
dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL);
|
||||
dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL);
|
||||
dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL);
|
||||
dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL);
|
||||
dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL);
|
||||
dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL);
|
||||
dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL);
|
||||
dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL);
|
||||
dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG);
|
||||
dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG);
|
||||
dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG);
|
||||
dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL);
|
||||
dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL);
|
||||
dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL);
|
||||
dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE);
|
||||
dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE);
|
||||
dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE);
|
||||
dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE);
|
||||
dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE);
|
||||
}
|
||||
|
||||
static const struct dccg_funcs dccg31_funcs = {
|
||||
.update_dpp_dto = dccg31_update_dpp_dto,
|
||||
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
|
||||
|
|
@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = {
|
|||
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
|
||||
.disable_dsc = dccg31_disable_dscclk,
|
||||
.enable_dsc = dccg31_enable_dscclk,
|
||||
.dccg_read_reg_state = dccg31_read_reg_state,
|
||||
};
|
||||
|
||||
struct dccg *dccg31_create(
|
||||
|
|
|
|||
|
|
@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst);
|
|||
|
||||
void dccg31_enable_dscclk(struct dccg *dccg, int inst);
|
||||
|
||||
void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
|
||||
|
||||
#endif //__DCN31_DCCG_H__
|
||||
|
|
|
|||
|
|
@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = {
|
|||
.get_pixel_rate_div = dccg314_get_pixel_rate_div,
|
||||
.trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync,
|
||||
.set_valid_pixel_rate = dccg314_set_valid_pixel_rate,
|
||||
.set_dtbclk_p_src = dccg314_set_dtbclk_p_src
|
||||
.set_dtbclk_p_src = dccg314_set_dtbclk_p_src,
|
||||
.dccg_read_reg_state = dccg31_read_reg_state
|
||||
};
|
||||
|
||||
struct dccg *dccg314_create(
|
||||
|
|
|
|||
|
|
@ -74,8 +74,7 @@
|
|||
SR(DCCG_GATE_DISABLE_CNTL3),\
|
||||
SR(HDMISTREAMCLK0_DTO_PARAM),\
|
||||
SR(OTG_PIXEL_RATE_DIV),\
|
||||
SR(DTBCLK_P_CNTL),\
|
||||
SR(DCCG_AUDIO_DTO_SOURCE)
|
||||
SR(DTBCLK_P_CNTL)
|
||||
|
||||
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
|
||||
|
|
|
|||
|
|
@ -2453,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = {
|
|||
.disable_symclk_se = dccg35_disable_symclk_se,
|
||||
.set_dtbclk_p_src = dccg35_set_dtbclk_p_src,
|
||||
.dccg_root_gate_disable_control = dccg35_root_gate_disable_control,
|
||||
.dccg_read_reg_state = dccg31_read_reg_state,
|
||||
};
|
||||
|
||||
struct dccg *dccg35_create(
|
||||
|
|
|
|||
|
|
@ -41,8 +41,9 @@
|
|||
SR(SYMCLKA_CLOCK_ENABLE),\
|
||||
SR(SYMCLKB_CLOCK_ENABLE),\
|
||||
SR(SYMCLKC_CLOCK_ENABLE),\
|
||||
SR(SYMCLKD_CLOCK_ENABLE),\
|
||||
SR(SYMCLKE_CLOCK_ENABLE)
|
||||
SR(SYMCLKD_CLOCK_ENABLE), \
|
||||
SR(SYMCLKE_CLOCK_ENABLE),\
|
||||
SR(SYMCLK_PSP_CNTL)
|
||||
|
||||
#define DCCG_MASK_SH_LIST_DCN35(mask_sh) \
|
||||
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
|
||||
|
|
|
|||
|
|
@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = {
|
|||
.enable_symclk_se = dccg401_enable_symclk_se,
|
||||
.disable_symclk_se = dccg401_disable_symclk_se,
|
||||
.set_dtbclk_p_src = dccg401_set_dtbclk_p_src,
|
||||
.dccg_read_reg_state = dccg31_read_reg_state
|
||||
};
|
||||
|
||||
struct dccg *dccg401_create(
|
||||
|
|
|
|||
|
|
@ -302,6 +302,10 @@ static void setup_panel_mode(
|
|||
if (ctx->dc->caps.psp_setup_panel_mode)
|
||||
return;
|
||||
|
||||
/* The code below is only applicable to encoders with a digital transmitter. */
|
||||
if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
|
||||
return;
|
||||
|
||||
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
|
||||
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
|
||||
|
||||
|
|
@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output(
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool dce110_link_encoder_validate_rgb_output(
|
||||
const struct dce110_link_encoder *enc110,
|
||||
const struct dc_crtc_timing *crtc_timing)
|
||||
{
|
||||
/* When the VBIOS doesn't specify any limits, use 400 MHz.
|
||||
* The value comes from amdgpu_atombios_get_clock_info.
|
||||
*/
|
||||
uint32_t max_pixel_clock_khz = 400000;
|
||||
|
||||
if (enc110->base.ctx->dc_bios->fw_info_valid &&
|
||||
enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) {
|
||||
max_pixel_clock_khz =
|
||||
enc110->base.ctx->dc_bios->fw_info.max_pixel_clock;
|
||||
}
|
||||
|
||||
if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->display_color_depth != COLOR_DEPTH_888)
|
||||
return false;
|
||||
|
||||
if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void dce110_link_encoder_construct(
|
||||
struct dce110_link_encoder *enc110,
|
||||
const struct encoder_init_data *init_data,
|
||||
|
|
@ -824,6 +855,7 @@ void dce110_link_encoder_construct(
|
|||
enc110->base.connector = init_data->connector;
|
||||
|
||||
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
enc110->base.analog_engine = init_data->analog_engine;
|
||||
|
||||
enc110->base.features = *enc_features;
|
||||
|
||||
|
|
@ -847,6 +879,11 @@ void dce110_link_encoder_construct(
|
|||
SIGNAL_TYPE_EDP |
|
||||
SIGNAL_TYPE_HDMI_TYPE_A;
|
||||
|
||||
if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
|
||||
enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
|
||||
enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
|
||||
enc110->base.output_signals |= SIGNAL_TYPE_RGB;
|
||||
|
||||
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
|
||||
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
|
||||
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
|
||||
|
|
@ -885,6 +922,13 @@ void dce110_link_encoder_construct(
|
|||
enc110->base.preferred_engine = ENGINE_ID_DIGG;
|
||||
break;
|
||||
default:
|
||||
if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
|
||||
/* The connector is analog-only, ie. VGA */
|
||||
enc110->base.preferred_engine = init_data->analog_engine;
|
||||
enc110->base.output_signals = SIGNAL_TYPE_RGB;
|
||||
enc110->base.transmitter = TRANSMITTER_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
ASSERT_CRITICAL(false);
|
||||
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
}
|
||||
|
|
@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream(
|
|||
is_valid = dce110_link_encoder_validate_dp_output(
|
||||
enc110, &stream->timing);
|
||||
break;
|
||||
case SIGNAL_TYPE_RGB:
|
||||
is_valid = dce110_link_encoder_validate_rgb_output(
|
||||
enc110, &stream->timing);
|
||||
break;
|
||||
case SIGNAL_TYPE_EDP:
|
||||
case SIGNAL_TYPE_LVDS:
|
||||
is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
|
||||
|
|
@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init(
|
|||
cntl.coherent = false;
|
||||
cntl.hpd_sel = enc110->base.hpd_source;
|
||||
|
||||
/* The code below is only applicable to encoders with a digital transmitter. */
|
||||
if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
|
||||
return;
|
||||
|
||||
if (enc110->base.connector.id == CONNECTOR_ID_EDP)
|
||||
cntl.signal = SIGNAL_TYPE_EDP;
|
||||
|
||||
|
|
@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup(
|
|||
/* DP MST */
|
||||
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
|
||||
break;
|
||||
case SIGNAL_TYPE_RGB:
|
||||
break;
|
||||
default:
|
||||
ASSERT_CRITICAL(false);
|
||||
/* invalid mode ! */
|
||||
|
|
@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output(
|
|||
struct bp_transmitter_control cntl = { 0 };
|
||||
enum bp_result result;
|
||||
|
||||
switch (enc->analog_engine) {
|
||||
case ENGINE_ID_DACA:
|
||||
REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0);
|
||||
break;
|
||||
case ENGINE_ID_DACB:
|
||||
/* DACB doesn't seem to be present on DCE6+,
|
||||
* although there are references to it in the register file.
|
||||
*/
|
||||
DC_LOG_ERROR("%s DACB is unsupported\n", __func__);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* The code below only applies to connectors that support digital signals. */
|
||||
if (enc->transmitter == TRANSMITTER_UNKNOWN)
|
||||
return;
|
||||
|
||||
if (!dce110_is_dig_enabled(enc)) {
|
||||
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
|
||||
return;
|
||||
|
|
@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct(
|
|||
enc110->base.connector = init_data->connector;
|
||||
|
||||
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
enc110->base.analog_engine = init_data->analog_engine;
|
||||
|
||||
enc110->base.features = *enc_features;
|
||||
|
||||
|
|
@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct(
|
|||
SIGNAL_TYPE_EDP |
|
||||
SIGNAL_TYPE_HDMI_TYPE_A;
|
||||
|
||||
if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
|
||||
enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
|
||||
enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
|
||||
enc110->base.output_signals |= SIGNAL_TYPE_RGB;
|
||||
|
||||
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
|
||||
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
|
||||
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
|
||||
|
|
@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct(
|
|||
enc110->base.preferred_engine = ENGINE_ID_DIGG;
|
||||
break;
|
||||
default:
|
||||
if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
|
||||
/* The connector is analog-only, ie. VGA */
|
||||
enc110->base.preferred_engine = init_data->analog_engine;
|
||||
enc110->base.output_signals = SIGNAL_TYPE_RGB;
|
||||
enc110->base.transmitter = TRANSMITTER_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
ASSERT_CRITICAL(false);
|
||||
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -101,18 +101,21 @@
|
|||
SRI(DP_SEC_CNTL, DP, id), \
|
||||
SRI(DP_VID_STREAM_CNTL, DP, id), \
|
||||
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
|
||||
SRI(DP_SEC_CNTL1, DP, id)
|
||||
SRI(DP_SEC_CNTL1, DP, id), \
|
||||
SR(DAC_ENABLE)
|
||||
#endif
|
||||
|
||||
#define LE_DCE80_REG_LIST(id)\
|
||||
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
|
||||
LE_COMMON_REG_LIST_BASE(id)
|
||||
LE_COMMON_REG_LIST_BASE(id), \
|
||||
SR(DAC_ENABLE)
|
||||
|
||||
#define LE_DCE100_REG_LIST(id)\
|
||||
LE_COMMON_REG_LIST_BASE(id), \
|
||||
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
|
||||
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
|
||||
SR(DCI_MEM_PWR_STATUS)
|
||||
SR(DCI_MEM_PWR_STATUS), \
|
||||
SR(DAC_ENABLE)
|
||||
|
||||
#define LE_DCE110_REG_LIST(id)\
|
||||
LE_COMMON_REG_LIST_BASE(id), \
|
||||
|
|
@ -181,6 +184,9 @@ struct dce110_link_enc_registers {
|
|||
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
|
||||
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
|
||||
uint32_t DP_SEC_CNTL1;
|
||||
|
||||
/* DAC registers */
|
||||
uint32_t DAC_ENABLE;
|
||||
};
|
||||
|
||||
struct dce110_link_encoder {
|
||||
|
|
@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output(
|
|||
enum signal_type signal,
|
||||
const struct dc_crtc_timing *crtc_timing);
|
||||
|
||||
bool dce110_link_encoder_validate_rgb_output(
|
||||
const struct dce110_link_encoder *enc110,
|
||||
const struct dc_crtc_timing *crtc_timing);
|
||||
|
||||
bool dce110_link_encoder_validate_dp_output(
|
||||
const struct dce110_link_encoder *enc110,
|
||||
const struct dc_crtc_timing *crtc_timing);
|
||||
|
|
|
|||
|
|
@ -1567,3 +1567,17 @@ void dce110_stream_encoder_construct(
|
|||
enc110->se_shift = se_shift;
|
||||
enc110->se_mask = se_mask;
|
||||
}
|
||||
|
||||
static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {0};
|
||||
|
||||
void dce110_analog_stream_encoder_construct(
|
||||
struct dce110_stream_encoder *enc110,
|
||||
struct dc_context *ctx,
|
||||
struct dc_bios *bp,
|
||||
enum engine_id eng_id)
|
||||
{
|
||||
enc110->base.funcs = &dce110_an_str_enc_funcs;
|
||||
enc110->base.ctx = ctx;
|
||||
enc110->base.id = eng_id;
|
||||
enc110->base.bp = bp;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -708,6 +708,11 @@ void dce110_stream_encoder_construct(
|
|||
const struct dce_stream_encoder_shift *se_shift,
|
||||
const struct dce_stream_encoder_mask *se_mask);
|
||||
|
||||
void dce110_analog_stream_encoder_construct(
|
||||
struct dce110_stream_encoder *enc110,
|
||||
struct dc_context *ctx,
|
||||
struct dc_bios *bp,
|
||||
enum engine_id eng_id);
|
||||
|
||||
void dce110_se_audio_mute_control(
|
||||
struct stream_encoder *enc, bool mute);
|
||||
|
|
|
|||
|
|
@ -1,141 +0,0 @@
|
|||
# SPDX-License-Identifier: MIT */
|
||||
#
|
||||
# Copyright 2023 Advanced Micro Devices, Inc.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
#
|
||||
# Authors: AMD
|
||||
#
|
||||
# Makefile for dml2.
|
||||
|
||||
dml2_ccflags := $(CC_FLAGS_FPU)
|
||||
dml2_rcflags := $(CC_FLAGS_NO_FPU)
|
||||
|
||||
ifneq ($(CONFIG_FRAME_WARN),0)
|
||||
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
|
||||
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
|
||||
frame_warn_limit := 4096
|
||||
else
|
||||
frame_warn_limit := 3072
|
||||
endif
|
||||
else
|
||||
frame_warn_limit := 2048
|
||||
endif
|
||||
|
||||
ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
|
||||
frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
|
||||
endif
|
||||
endif
|
||||
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
|
||||
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
|
||||
|
||||
DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
|
||||
dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
|
||||
dml_display_rq_dlg_calc.o
|
||||
|
||||
AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
|
||||
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
|
||||
|
||||
DML21 := src/dml2_top/dml2_top_interfaces.o
|
||||
DML21 += src/dml2_top/dml2_top_soc15.o
|
||||
DML21 += src/dml2_core/dml2_core_dcn4.o
|
||||
DML21 += src/dml2_core/dml2_core_utils.o
|
||||
DML21 += src/dml2_core/dml2_core_factory.o
|
||||
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
|
||||
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
|
||||
DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
|
||||
DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
|
||||
DML21 += src/dml2_mcg/dml2_mcg_factory.o
|
||||
DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
|
||||
DML21 += src/dml2_pmo/dml2_pmo_factory.o
|
||||
DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
|
||||
DML21 += src/dml2_standalone_libraries/lib_float_math.o
|
||||
DML21 += dml21_translation_helper.o
|
||||
DML21 += dml21_wrapper.o
|
||||
DML21 += dml21_utils.o
|
||||
|
||||
AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
|
||||
|
||||
140
drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
Normal file
140
drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
# SPDX-License-Identifier: MIT */
|
||||
#
|
||||
# Copyright 2023 Advanced Micro Devices, Inc.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
#
|
||||
# Authors: AMD
|
||||
#
|
||||
# Makefile for dml2.
|
||||
|
||||
dml2_ccflags := $(CC_FLAGS_FPU)
|
||||
dml2_rcflags := $(CC_FLAGS_NO_FPU)
|
||||
|
||||
ifneq ($(CONFIG_FRAME_WARN),0)
|
||||
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
|
||||
ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
|
||||
frame_warn_limit := 4096
|
||||
else
|
||||
frame_warn_limit := 3072
|
||||
endif
|
||||
else
|
||||
frame_warn_limit := 2056
|
||||
endif
|
||||
|
||||
ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
|
||||
frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
|
||||
endif
|
||||
endif
|
||||
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc
|
||||
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
|
||||
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
|
||||
|
||||
DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
|
||||
dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
|
||||
dml_display_rq_dlg_calc.o
|
||||
|
||||
AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags)
|
||||
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags)
|
||||
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags)
|
||||
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags)
|
||||
|
||||
DML21 := src/dml2_top/dml2_top_interfaces.o
|
||||
DML21 += src/dml2_top/dml2_top_soc15.o
|
||||
DML21 += src/dml2_core/dml2_core_dcn4.o
|
||||
DML21 += src/dml2_core/dml2_core_utils.o
|
||||
DML21 += src/dml2_core/dml2_core_factory.o
|
||||
DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
|
||||
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
|
||||
DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
|
||||
DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
|
||||
DML21 += src/dml2_mcg/dml2_mcg_factory.o
|
||||
DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
|
||||
DML21 += src/dml2_pmo/dml2_pmo_factory.o
|
||||
DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
|
||||
DML21 += src/dml2_standalone_libraries/lib_float_math.o
|
||||
DML21 += dml21_translation_helper.o
|
||||
DML21 += dml21_wrapper.o
|
||||
DML21 += dml21_utils.o
|
||||
|
||||
AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
|
||||
|
|
@ -53,17 +53,17 @@ typedef const void *const_pvoid;
|
|||
typedef const char *const_pchar;
|
||||
|
||||
typedef struct rgba_struct {
|
||||
uint8 a;
|
||||
uint8 r;
|
||||
uint8 g;
|
||||
uint8 b;
|
||||
uint8 a;
|
||||
uint8 r;
|
||||
uint8 g;
|
||||
uint8 b;
|
||||
} rgba_t;
|
||||
|
||||
typedef struct {
|
||||
uint8 blue;
|
||||
uint8 green;
|
||||
uint8 red;
|
||||
uint8 alpha;
|
||||
uint8 blue;
|
||||
uint8 green;
|
||||
uint8 red;
|
||||
uint8 alpha;
|
||||
} gen_color_t;
|
||||
|
||||
typedef union {
|
||||
|
|
@ -87,7 +87,7 @@ typedef union {
|
|||
} uintfloat64;
|
||||
|
||||
#ifndef UNREFERENCED_PARAMETER
|
||||
#define UNREFERENCED_PARAMETER(x) x = x
|
||||
#define UNREFERENCED_PARAMETER(x) (x = x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
|
@ -10205,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin
|
|||
return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe);
|
||||
}
|
||||
|
||||
|
||||
#define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \
|
||||
{ \
|
||||
dml_uint_t plane_idx; \
|
||||
|
|
@ -10333,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F
|
|||
dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow);
|
||||
dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte);
|
||||
dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte);
|
||||
|
||||
|
|
@ -274,7 +274,6 @@ enum dml_clk_cfg_policy {
|
|||
dml_use_state_freq = 2
|
||||
};
|
||||
|
||||
|
||||
struct soc_state_bounding_box_st {
|
||||
dml_float_t socclk_mhz;
|
||||
dml_float_t dscclk_mhz;
|
||||
|
|
@ -1894,7 +1893,7 @@ struct display_mode_lib_scratch_st {
|
|||
struct CalculatePrefetchSchedule_params_st CalculatePrefetchSchedule_params;
|
||||
};
|
||||
|
||||
/// @brief Represent the overall soc/ip environment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
|
||||
/// @brief Represent the overall soc/ip enviroment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
|
||||
struct display_mode_lib_st {
|
||||
dml_uint_t project;
|
||||
|
||||
|
|
@ -52,7 +52,7 @@
|
|||
#define __DML_VBA_DEBUG__
|
||||
#define __DML_VBA_ENABLE_INLINE_CHECK_ 0
|
||||
#define __DML_VBA_MIN_VSTARTUP__ 9 //<brief At which vstartup the DML start to try if the mode can be supported
|
||||
#define __DML_ARB_TO_RET_DELAY__ 7 + 95 //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
|
||||
#define __DML_ARB_TO_RET_DELAY__ (7 + 95) //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
|
||||
#define __DML_MIN_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
|
||||
#define __DML_MAX_VRATIO_PRE__ 4.0 //<brief Prefetch schedule max vratio
|
||||
#define __DML_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
|
||||
|
|
@ -30,7 +30,6 @@
|
|||
#include "display_mode_core_structs.h"
|
||||
#include "cmntypes.h"
|
||||
|
||||
|
||||
#include "dml_assert.h"
|
||||
#include "dml_logging.h"
|
||||
|
||||
|
|
@ -72,5 +71,4 @@ __DML_DLL_EXPORT__ dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st
|
|||
__DML_DLL_EXPORT__ dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx);
|
||||
__DML_DLL_EXPORT__ void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane);
|
||||
|
||||
|
||||
#endif
|
||||
|
|
@ -224,9 +224,7 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
|
|||
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
|
||||
|
||||
/* Populate stream, plane mappings and other fields in display config. */
|
||||
DC_FP_START();
|
||||
result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
|
||||
DC_FP_END();
|
||||
if (!result)
|
||||
return false;
|
||||
|
||||
|
|
@ -281,9 +279,9 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co
|
|||
dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context);
|
||||
|
||||
mode_support->dml2_instance = dml_init->dml2_instance;
|
||||
DC_FP_START();
|
||||
dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx);
|
||||
dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming;
|
||||
DC_FP_START();
|
||||
is_supported = dml2_check_mode_supported(mode_support);
|
||||
DC_FP_END();
|
||||
if (!is_supported)
|
||||
|
|
@ -2,7 +2,6 @@
|
|||
//
|
||||
// Copyright 2024 Advanced Micro Devices, Inc.
|
||||
|
||||
|
||||
#ifndef __DML_DML_DCN4_SOC_BB__
|
||||
#define __DML_DML_DCN4_SOC_BB__
|
||||
|
||||
|
|
@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
|
|||
.writeback_interface_buffer_size_kbytes = 90,
|
||||
//Number of pipes after DCN Pipe harvesting
|
||||
.max_num_dpp = 4,
|
||||
.max_num_opp = 4,
|
||||
.max_num_otg = 4,
|
||||
.max_num_wb = 1,
|
||||
.max_dchub_pscl_bw_pix_per_clk = 4,
|
||||
|
|
@ -1303,6 +1303,7 @@ static double TruncToValidBPP(
|
|||
MinDSCBPP = 8;
|
||||
MaxDSCBPP = 16;
|
||||
} else {
|
||||
|
||||
if (Output == dml2_hdmi || Output == dml2_hdmifrl) {
|
||||
NonDSCBPP0 = 24;
|
||||
NonDSCBPP1 = 24;
|
||||
|
|
@ -1320,6 +1321,7 @@ static double TruncToValidBPP(
|
|||
MaxDSCBPP = 16;
|
||||
}
|
||||
}
|
||||
|
||||
if (Output == dml2_dp2p0) {
|
||||
MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0;
|
||||
} else if (DSCEnable && Output == dml2_dp) {
|
||||
|
|
@ -4047,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
|
|||
bool UseDSC,
|
||||
unsigned int NumberOfDSCSlices,
|
||||
unsigned int TotalNumberOfActiveDPP,
|
||||
unsigned int TotalNumberOfActiveOPP,
|
||||
unsigned int MaxNumDPP,
|
||||
unsigned int MaxNumOPP,
|
||||
double DISPCLKRequired,
|
||||
unsigned int NumberOfDPPRequired,
|
||||
unsigned int MaxHActiveForDSC,
|
||||
|
|
@ -4063,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
|
|||
|
||||
if (DISPCLKRequired > MaxDispclk)
|
||||
return false;
|
||||
if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP)
|
||||
if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP)
|
||||
return false;
|
||||
if (are_odm_segments_symmetrical) {
|
||||
if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle))
|
||||
|
|
@ -4109,7 +4113,9 @@ static noinline_for_stack void CalculateODMMode(
|
|||
double MaxDispclk,
|
||||
bool DSCEnable,
|
||||
unsigned int TotalNumberOfActiveDPP,
|
||||
unsigned int TotalNumberOfActiveOPP,
|
||||
unsigned int MaxNumDPP,
|
||||
unsigned int MaxNumOPP,
|
||||
double PixelClock,
|
||||
unsigned int NumberOfDSCSlices,
|
||||
|
||||
|
|
@ -4179,7 +4185,9 @@ static noinline_for_stack void CalculateODMMode(
|
|||
UseDSC,
|
||||
NumberOfDSCSlices,
|
||||
TotalNumberOfActiveDPP,
|
||||
TotalNumberOfActiveOPP,
|
||||
MaxNumDPP,
|
||||
MaxNumOPP,
|
||||
DISPCLKRequired,
|
||||
NumberOfDPPRequired,
|
||||
MaxHActiveForDSC,
|
||||
|
|
@ -8358,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
|
|||
CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
|
||||
|
||||
mode_lib->ms.TotalNumberOfActiveDPP = 0;
|
||||
mode_lib->ms.TotalNumberOfActiveOPP = 0;
|
||||
mode_lib->ms.support.TotalAvailablePipesSupport = true;
|
||||
|
||||
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
|
||||
|
|
@ -8393,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
|
|||
mode_lib->ms.max_dispclk_freq_mhz,
|
||||
false, // DSCEnable
|
||||
mode_lib->ms.TotalNumberOfActiveDPP,
|
||||
mode_lib->ms.TotalNumberOfActiveOPP,
|
||||
mode_lib->ip.max_num_dpp,
|
||||
mode_lib->ip.max_num_opp,
|
||||
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
|
||||
mode_lib->ms.support.NumberOfDSCSlices[k],
|
||||
|
||||
|
|
@ -8412,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
|
|||
mode_lib->ms.max_dispclk_freq_mhz,
|
||||
true, // DSCEnable
|
||||
mode_lib->ms.TotalNumberOfActiveDPP,
|
||||
mode_lib->ms.TotalNumberOfActiveOPP,
|
||||
mode_lib->ip.max_num_dpp,
|
||||
mode_lib->ip.max_num_opp,
|
||||
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
|
||||
mode_lib->ms.support.NumberOfDSCSlices[k],
|
||||
|
||||
|
|
@ -8516,20 +8529,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
|
|||
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
|
||||
mode_lib->ms.MPCCombine[k] = false;
|
||||
mode_lib->ms.NoOfDPP[k] = 1;
|
||||
mode_lib->ms.NoOfOPP[k] = 1;
|
||||
|
||||
if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) {
|
||||
mode_lib->ms.MPCCombine[k] = false;
|
||||
mode_lib->ms.NoOfDPP[k] = 4;
|
||||
mode_lib->ms.NoOfOPP[k] = 4;
|
||||
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) {
|
||||
mode_lib->ms.MPCCombine[k] = false;
|
||||
mode_lib->ms.NoOfDPP[k] = 3;
|
||||
mode_lib->ms.NoOfOPP[k] = 3;
|
||||
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) {
|
||||
mode_lib->ms.MPCCombine[k] = false;
|
||||
mode_lib->ms.NoOfDPP[k] = 2;
|
||||
mode_lib->ms.NoOfOPP[k] = 2;
|
||||
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) {
|
||||
mode_lib->ms.MPCCombine[k] = true;
|
||||
mode_lib->ms.NoOfDPP[k] = 2;
|
||||
mode_lib->ms.TotalNumberOfActiveDPP++;
|
||||
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) {
|
||||
mode_lib->ms.MPCCombine[k] = false;
|
||||
mode_lib->ms.NoOfDPP[k] = 1;
|
||||
|
|
@ -8540,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
|
|||
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
|
||||
mode_lib->ms.MPCCombine[k] = true;
|
||||
mode_lib->ms.NoOfDPP[k] = 2;
|
||||
mode_lib->ms.TotalNumberOfActiveDPP++;
|
||||
}
|
||||
}
|
||||
#if defined(__DML_VBA_DEBUG__)
|
||||
|
|
@ -8548,8 +8563,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
|
|||
#endif
|
||||
}
|
||||
|
||||
mode_lib->ms.TotalNumberOfActiveDPP = 0;
|
||||
mode_lib->ms.TotalNumberOfActiveOPP = 0;
|
||||
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
|
||||
mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k];
|
||||
mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k];
|
||||
}
|
||||
if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp)
|
||||
mode_lib->ms.support.TotalAvailablePipesSupport = false;
|
||||
if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp)
|
||||
mode_lib->ms.support.TotalAvailablePipesSupport = false;
|
||||
|
||||
|
||||
mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0;
|
||||
|
|
@ -36,6 +36,7 @@ struct dml2_core_ip_params {
|
|||
unsigned int max_line_buffer_lines;
|
||||
unsigned int writeback_interface_buffer_size_kbytes;
|
||||
unsigned int max_num_dpp;
|
||||
unsigned int max_num_opp;
|
||||
unsigned int max_num_otg;
|
||||
unsigned int TDLUT_33cube_count;
|
||||
unsigned int max_num_wb;
|
||||
|
|
@ -570,6 +571,7 @@ struct dml2_core_internal_mode_support {
|
|||
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
|
||||
unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES];
|
||||
unsigned int NoOfDPP[DML2_MAX_PLANES];
|
||||
unsigned int NoOfOPP[DML2_MAX_PLANES];
|
||||
bool MPCCombine[DML2_MAX_PLANES];
|
||||
double dcfclk_deepsleep;
|
||||
double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES];
|
||||
|
|
@ -580,6 +582,7 @@ struct dml2_core_internal_mode_support {
|
|||
bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES];
|
||||
bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES];
|
||||
unsigned int TotalNumberOfActiveDPP;
|
||||
unsigned int TotalNumberOfActiveOPP;
|
||||
unsigned int TotalNumberOfSingleDPPSurfaces;
|
||||
unsigned int TotalNumberOfDCCActiveDPP;
|
||||
unsigned int Total3dlutActive;
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue