Beyond Display related:

- Switch to use kernel standard fault injection in i915 (Juha-Pekka)
 
  Display uAPI related:
  - Display uapi vs. hw state fixes (Ville)
  - Expose sharpness only if num_scalers is >= 2 (Nemesa)
 
  Display related:
  - More display driver refactor and clean-ups, specially towards separation (Jani)
  - Add initial support Xe3p_LPD for NVL (Gustavo, Sai, )
  - BMG FBC W/a (Vinod)
  - RPM fix (Dibin)
  - Add MTL+ platforms to support dpll framework (Mika, Imre)
  - Other PLL related fixes (Imre)
  - Fix DIMM_S DRAM decoding on ICL (Ville)
  - Async flip refactor (Ville, Jouni)
  - Go back to using AUX interrupts (Ville)
  - Reduce severity of failed DII FEC enabling (Grzelak)
  - Enable system cache support for FBC (Vinod)
  - Move PSR/Panel Replay sink data into intel_connector and other PSR changes (Jouni)
  - Detect AuxCCS support via display parent interface (Tvrtko)
  - Clean up link BW/DSC slice config computation(Imre)
  - Toggle powerdown states for C10 on HDMI (Gustavo)
  - Add parent interface for PC8 forcewake tricks (Ville)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmlFtn0ACgkQ+mJfZA7r
 E8p8Awf9F/V7qCk8evJJV4gzj1OuUIf7vrn4+tHbRamx8iOSXNdwCJQLC0JRM5BM
 VCmHpyVEDjHdFvDKPefk5fznDs/OEOH3HHIzNztggj4/gFpOBLlPtW1fwdm82qS2
 J+1HedK66BwJSkbFdkdGAnWnkL+Uc/LcaMCPZlOU/F94Gqx61w47omRNaO0nf09f
 k3AFO8piUQjZdsrWdIdVcCvJ9iZ7jjbXhPlYlUpLmrbAaDDLt1EvgR2lHDeH2v66
 drFQ2CIPpidnY+qPiSr+SgQCapVQeVf9e8+MvLRDuBrIp9g+T9xU2L1ZYLOMjByE
 hSlB6C07I2oppD2cd8XVk2vbLSAl0w==
 =MtQI
 -----END PGP SIGNATURE-----

Merge tag 'drm-intel-next-2025-12-19' of https://gitlab.freedesktop.org/drm/i915/kernel into drm-next

Beyond Display related:
 - Switch to use kernel standard fault injection in i915 (Juha-Pekka)

 Display uAPI related:
 - Display uapi vs. hw state fixes (Ville)
 - Expose sharpness only if num_scalers is >= 2 (Nemesa)

 Display related:
 - More display driver refactor and clean-ups, specially towards separation (Jani)
 - Add initial support Xe3p_LPD for NVL (Gustavo, Sai, )
 - BMG FBC W/a (Vinod)
 - RPM fix (Dibin)
 - Add MTL+ platforms to support dpll framework (Mika, Imre)
 - Other PLL related fixes (Imre)
 - Fix DIMM_S DRAM decoding on ICL (Ville)
 - Async flip refactor (Ville, Jouni)
 - Go back to using AUX interrupts (Ville)
 - Reduce severity of failed DII FEC enabling (Grzelak)
 - Enable system cache support for FBC (Vinod)
 - Move PSR/Panel Replay sink data into intel_connector and other PSR changes (Jouni)
 - Detect AuxCCS support via display parent interface (Tvrtko)
 - Clean up link BW/DSC slice config computation(Imre)
 - Toggle powerdown states for C10 on HDMI (Gustavo)
 - Add parent interface for PC8 forcewake tricks (Ville)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patch.msgid.link/aUW3bVDdE63aSFOJ@intel.com
This commit is contained in:
Dave Airlie 2025-12-27 16:25:56 +10:00
commit c5fb82d113
176 changed files with 3877 additions and 2587 deletions

View file

@ -2704,6 +2704,71 @@ u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
}
EXPORT_SYMBOL(drm_dp_dsc_sink_bpp_incr);
/**
* drm_dp_dsc_slice_count_to_mask() - Convert a slice count to a slice count mask
* @slice_count: slice count
*
* Convert @slice_count to a slice count mask.
*
* Returns the slice count mask.
*/
u32 drm_dp_dsc_slice_count_to_mask(int slice_count)
{
return BIT(slice_count - 1);
}
EXPORT_SYMBOL(drm_dp_dsc_slice_count_to_mask);
/**
* drm_dp_dsc_sink_slice_count_mask() - Get the mask of valid DSC sink slice counts
* @dsc_dpcd: the sink's DSC DPCD capabilities
* @is_edp: %true for an eDP sink
*
* Get the mask of supported slice counts from the sink's DSC DPCD register.
*
* Returns:
* Mask of slice counts supported by the DSC sink:
* - > 0: bit#0,1,3,5..,23 set if the sink supports 1,2,4,6..,24 slices
* - 0: if the sink doesn't support any slices
*/
u32 drm_dp_dsc_sink_slice_count_mask(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp)
{
u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
u32 mask = 0;
if (!is_edp) {
/* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(24);
if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(20);
if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(16);
}
/* DP, eDP v1.5+ */
if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(12);
if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(10);
if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(8);
if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(6);
/* DP, eDP v1.4+ */
if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(4);
if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(2);
if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
mask |= drm_dp_dsc_slice_count_to_mask(1);
return mask;
}
EXPORT_SYMBOL(drm_dp_dsc_sink_slice_count_mask);
/**
* drm_dp_dsc_sink_max_slice_count() - Get the max slice count
* supported by the DSC sink.
@ -2723,43 +2788,7 @@ EXPORT_SYMBOL(drm_dp_dsc_sink_bpp_incr);
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp)
{
u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
if (is_edp) {
/* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
return 4;
if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
return 2;
if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
return 1;
} else {
/* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
return 24;
if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
return 20;
if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
return 16;
if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
return 12;
if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
return 10;
if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
return 8;
if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
return 6;
if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
return 4;
if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
return 2;
if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
return 1;
}
return 0;
return fls(drm_dp_dsc_sink_slice_count_mask(dsc_dpcd, is_edp));
}
EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);

View file

@ -27,7 +27,10 @@ i915-y += \
i915_config.o \
i915_driver.o \
i915_drm_client.o \
i915_edram.o \
i915_freq.o \
i915_getparam.o \
i915_gmch.o \
i915_ioctl.o \
i915_irq.o \
i915_mitigations.o \
@ -54,12 +57,6 @@ i915-y += \
vlv_iosf_sb.o \
vlv_suspend.o
# core peripheral code
i915-y += \
soc/intel_dram.o \
soc/intel_gmch.o \
soc/intel_rom.o
# core library code
i915-y += \
i915_memcpy.o \
@ -77,6 +74,12 @@ i915-$(CONFIG_DEBUG_FS) += \
i915-$(CONFIG_PERF_EVENTS) += \
i915_pmu.o
# core display adaptation
i915-y += \
i915_display_pc8.o \
i915_hdcp_gsc.o \
i915_panic.o
# "Graphics Technology" (aka we talk to the gpu)
gt-y += \
gt/gen2_engine_cs.o \
@ -267,6 +270,7 @@ i915-y += \
display/intel_dpll_mgr.o \
display/intel_dpt.o \
display/intel_dpt_common.o \
display/intel_dram.o \
display/intel_drrs.o \
display/intel_dsb.o \
display/intel_dsb_buffer.o \
@ -280,7 +284,6 @@ i915-y += \
display/intel_frontbuffer.o \
display/intel_global_state.o \
display/intel_hdcp.o \
display/intel_hdcp_gsc.o \
display/intel_hdcp_gsc_message.o \
display/intel_hotplug.o \
display/intel_hotplug_irq.o \
@ -292,7 +295,7 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
display/intel_panic.o \
display/intel_parent.o \
display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
@ -301,6 +304,7 @@ i915-y += \
display/intel_pmdemand.o \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_rom.o \
display/intel_sbi.o \
display/intel_sprite.o \
display/intel_sprite_uapi.o \

View file

@ -302,7 +302,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
wakeref = intel_display_power_get_if_enabled(display,
@ -684,12 +684,11 @@ static void intel_enable_dp(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 dp_reg = intel_de_read(display, intel_dp->output_reg);
intel_wakeref_t wakeref;
if (drm_WARN_ON(display->drm, dp_reg & DP_PORT_EN))
return;
with_intel_pps_lock(intel_dp, wakeref) {
with_intel_pps_lock(intel_dp) {
if (display->platform.valleyview || display->platform.cherryview)
vlv_pps_port_enable_unlocked(encoder, pipe_config);

View file

@ -68,7 +68,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
wakeref = intel_display_power_get_if_enabled(display,

View file

@ -22,7 +22,6 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_sprite.h"
@ -134,7 +133,7 @@ static struct intel_fbc *i9xx_plane_fbc(struct intel_display *display,
enum i9xx_plane_id i9xx_plane)
{
if (i9xx_plane_has_fbc(display, i9xx_plane))
return display->fbc[INTEL_FBC_A];
return display->fbc.instances[INTEL_FBC_A];
else
return NULL;
}
@ -724,7 +723,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
u32 val;
@ -819,7 +818,7 @@ unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
if (intel_plane_can_async_flip(plane, fb->format, fb->modifier))
return 256 * 1024;
/* FIXME undocumented so not sure what's actually needed */
@ -843,7 +842,7 @@ static unsigned int g4x_primary_min_alignment(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
if (intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
if (intel_plane_can_async_flip(plane, fb->format, fb->modifier))
return 256 * 1024;
if (intel_scanout_needs_vtd_wa(display))

View file

@ -7,8 +7,6 @@
#include <drm/drm_print.h>
#include "soc/intel_dram.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
@ -19,6 +17,7 @@
#include "intel_display.h"
#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_dram.h"
#include "intel_fb.h"
#include "intel_mchbar_regs.h"
#include "intel_wm.h"
@ -91,7 +90,7 @@ static const struct cxsr_latency cxsr_latency_table[] = {
static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct dram_info *dram_info = intel_dram_info(display);
bool is_ddr3 = dram_info->type == INTEL_DRAM_DDR3;
int i;

View file

@ -1411,7 +1411,7 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
intel_display_power_put(display,
@ -1722,7 +1722,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum transcoder dsi_trans;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
enum port port;
bool ret = false;
u32 tmp;

View file

@ -326,11 +326,9 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
if (intel_dp->as_sdp_supported) {
u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1;
if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP)
if (crtc_state->link_off_after_as_sdp_when_pr_active)
pr_alpm_ctl |= PR_ALPM_CTL_ALLOW_LINK_OFF_BETWEEN_AS_SDP_AND_SU;
if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR))
if (crtc_state->disable_as_sdp_when_pr_active)
pr_alpm_ctl |= PR_ALPM_CTL_AS_SDP_TRANSMISSION_IN_ACTIVE_DISABLE;
intel_de_write(display, PR_ALPM_CTL(display, cpu_transcoder),

View file

@ -1042,10 +1042,10 @@ int intel_audio_min_cdclk(const struct intel_crtc_state *crtc_state)
static unsigned long intel_audio_component_get_power(struct device *kdev)
{
struct intel_display *display = to_intel_display(kdev);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
/* Catch potential impedance mismatches before they occur! */
BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
BUILD_BUG_ON(sizeof(wakeref) > sizeof(unsigned long));
wakeref = intel_display_power_get(display, POWER_DOMAIN_AUDIO_PLAYBACK);
@ -1074,7 +1074,7 @@ static void intel_audio_component_put_power(struct device *kdev,
unsigned long cookie)
{
struct intel_display *display = to_intel_display(kdev);
intel_wakeref_t wakeref = (intel_wakeref_t)cookie;
struct ref_tracker *wakeref = (struct ref_tracker *)cookie;
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--display->audio.power_refcount == 0)

View file

@ -34,14 +34,13 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include "soc/intel_rom.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_gmbus.h"
#include "intel_rom.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
@ -2529,6 +2528,54 @@ intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata
return devdata->child.edp_data_rate_override & edp_rate_override_mask(rate);
}
static void sanitize_dedicated_external(struct intel_bios_encoder_data *devdata,
enum port port)
{
struct intel_display *display = devdata->display;
if (!intel_bios_encoder_is_dedicated_external(devdata))
return;
/*
* Since dedicated_external is for ports connected to PHYs outside of
* the Type-C subsystem, clear bits that would only make sense for ports
* with PHYs in the Type-C subsystem.
*/
/*
* Bit dp_usb_type_c is marked as "don't care" in Bspec when
* dedicated_external is set.
*/
if (devdata->child.dp_usb_type_c) {
drm_dbg_kms(display->drm,
"VBT claims Port %c supports USB Type-C, but the port is dedicated external, ignoring\n",
port_name(port));
devdata->child.dp_usb_type_c = 0;
}
/*
* Bit tbt is marked as "don't care" in Bspec when dedicated_external is
* set.
*/
if (devdata->child.tbt) {
drm_dbg_kms(display->drm,
"VBT claims Port %c supports TBT, but the port is dedicated external, ignoring\n",
port_name(port));
devdata->child.tbt = 0;
}
/*
* DDI allocation for TC capable ports only make sense for PHYs in the
* Type-C subsystem.
*/
if (devdata->child.dyn_port_over_tc) {
drm_dbg_kms(display->drm,
"VBT claims Port %c supports dynamic DDI allocation in TCSS, but the port is dedicated external, ignoring\n",
port_name(port));
devdata->child.dyn_port_over_tc = 0;
}
}
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@ -2693,6 +2740,16 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata)
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
if (intel_bios_encoder_is_dedicated_external(devdata))
drm_dbg_kms(display->drm,
"Port %c is dedicated external\n",
port_name(port));
if (intel_bios_encoder_supports_dyn_port_over_tc(devdata))
drm_dbg_kms(display->drm,
"Port %c supports dynamic DDI allocation in TCSS\n",
port_name(port));
hdmi_level_shift = intel_bios_hdmi_level_shift(devdata);
if (hdmi_level_shift >= 0) {
drm_dbg_kms(display->drm,
@ -2750,6 +2807,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
return;
}
sanitize_dedicated_external(devdata, port);
sanitize_device_type(devdata, port);
sanitize_hdmi_level_shift(devdata, port);
}
@ -2777,7 +2835,7 @@ static int child_device_expected_size(u16 version)
{
BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
if (version > 263)
if (version > 264)
return -ENOENT;
else if (version >= 263)
return 44;
@ -3721,6 +3779,18 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda
return devdata->display->vbt.version >= 209 && devdata->child.tbt;
}
bool intel_bios_encoder_is_dedicated_external(const struct intel_bios_encoder_data *devdata)
{
return devdata->display->vbt.version >= 264 &&
devdata->child.dedicated_external;
}
bool intel_bios_encoder_supports_dyn_port_over_tc(const struct intel_bios_encoder_data *devdata)
{
return devdata->display->vbt.version >= 264 &&
devdata->child.dyn_port_over_tc;
}
bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata)
{
return devdata && devdata->child.lane_reversal;

View file

@ -79,6 +79,8 @@ bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdat
bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_is_dedicated_external(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_dyn_port_over_tc(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata);

View file

@ -6,6 +6,7 @@
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_object_frontbuffer.h"
#include "pxp/intel_pxp.h"
#include "i915_debugfs.h"
#include "intel_bo.h"
@ -29,6 +30,11 @@ bool intel_bo_is_protected(struct drm_gem_object *obj)
return i915_gem_object_is_protected(to_intel_bo(obj));
}
int intel_bo_key_check(struct drm_gem_object *obj)
{
return intel_pxp_key_check(obj, false);
}
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return i915_gem_fb_mmap(to_intel_bo(obj), vma);

View file

@ -16,6 +16,7 @@ bool intel_bo_is_tiled(struct drm_gem_object *obj);
bool intel_bo_is_userptr(struct drm_gem_object *obj);
bool intel_bo_is_shmem(struct drm_gem_object *obj);
bool intel_bo_is_protected(struct drm_gem_object *obj);
int intel_bo_key_check(struct drm_gem_object *obj);
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size);

View file

@ -6,8 +6,6 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_print.h>
#include "soc/intel_dram.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_bw.h"
@ -16,6 +14,7 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_uncore.h"
@ -800,7 +799,7 @@ static unsigned int icl_qgv_bw(struct intel_display *display,
void intel_bw_init_hw(struct intel_display *display)
{
const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct dram_info *dram_info = intel_dram_info(display);
if (!HAS_DISPLAY(display))
return;

View file

@ -28,10 +28,7 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
#include "soc/intel_dram.h"
#include "hsw_ips.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@ -42,11 +39,13 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_step.h"
#include "intel_vdsc.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
@ -668,7 +667,7 @@ static void vlv_set_cdclk(struct intel_display *display,
{
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
int ret;
switch (cdclk) {
@ -758,7 +757,7 @@ static void chv_set_cdclk(struct intel_display *display,
{
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
int ret;
switch (cdclk) {
@ -3738,10 +3737,8 @@ static int pch_rawclk(struct intel_display *display)
static int i9xx_hrawclk(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
/* hrawclock is 1/4 the FSB frequency */
return DIV_ROUND_CLOSEST(intel_fsb_freq(i915), 4);
return DIV_ROUND_CLOSEST(intel_fsb_freq(display), 4);
}
/**

View file

@ -85,7 +85,6 @@ static bool intel_cmtg_transcoder_is_secondary(struct intel_display *display,
enum transcoder trans)
{
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
u32 val = 0;
if (!HAS_TRANSCODER(display, trans))
@ -93,7 +92,7 @@ static bool intel_cmtg_transcoder_is_secondary(struct intel_display *display,
power_domain = POWER_DOMAIN_TRANSCODER(trans);
with_intel_display_power_if_enabled(display, power_domain, wakeref)
with_intel_display_power_if_enabled(display, power_domain)
val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, trans));
return val & CMTG_SECONDARY_MODE;

View file

@ -2,7 +2,9 @@
/*
* Copyright © 2025 Intel Corporation
*/
#include "intel_colorop.h"
#include "intel_display_types.h"
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop)
{

View file

@ -6,7 +6,9 @@
#ifndef __INTEL_COLOROP_H__
#define __INTEL_COLOROP_H__
#include "intel_display_types.h"
enum intel_color_block;
struct drm_colorop;
struct intel_colorop;
struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop);
struct intel_colorop *intel_colorop_alloc(void);

View file

@ -156,27 +156,17 @@ void intel_connector_destroy(struct drm_connector *connector)
int intel_connector_register(struct drm_connector *_connector)
{
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(_connector->dev);
int ret;
ret = intel_panel_register(connector);
if (ret)
goto err;
if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
goto err_panel;
}
return ret;
intel_connector_debugfs_add(connector);
return 0;
err_panel:
intel_panel_unregister(connector);
err:
return ret;
}
ALLOW_ERROR_INJECTION(intel_connector_register, ERRNO);
void intel_connector_unregister(struct drm_connector *_connector)
{

View file

@ -109,7 +109,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
wakeref = intel_display_power_get_if_enabled(display,
@ -847,7 +847,7 @@ intel_crt_detect(struct drm_connector *connector,
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
struct drm_atomic_state *state;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
int status;
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] force=%d\n",
@ -936,7 +936,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct intel_display *display = to_intel_display(connector->dev);
struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector));
struct intel_encoder *encoder = &crt->base;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
struct i2c_adapter *ddc;
int ret;

View file

@ -13,8 +13,6 @@
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i9xx_plane.h"
#include "icl_dsi.h"
#include "intel_atomic.h"
@ -28,6 +26,7 @@
#include "intel_drrs.h"
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_parent.h"
#include "intel_pipe_crc.h"
#include "intel_plane.h"
#include "intel_psr.h"
@ -309,7 +308,7 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
.get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
int intel_crtc_init(struct intel_display *display, enum pipe pipe)
static int __intel_crtc_init(struct intel_display *display, enum pipe pipe)
{
struct intel_plane *primary, *cursor;
const struct drm_crtc_funcs *funcs;
@ -396,7 +395,7 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
if (HAS_CASF(display))
if (HAS_CASF(display) && crtc->num_scalers >= 2)
drm_crtc_create_sharpness_strength_property(&crtc->base);
return 0;
@ -407,6 +406,23 @@ fail:
return ret;
}
int intel_crtc_init(struct intel_display *display)
{
enum pipe pipe;
int ret;
drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
INTEL_NUM_PIPES(display), str_plural(INTEL_NUM_PIPES(display)));
for_each_pipe(display, pipe) {
ret = __intel_crtc_init(display, pipe);
if (ret)
return ret;
}
return 0;
}
int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@ -553,7 +569,7 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (old_plane_state->uapi.crtc == &crtc->base)
if (old_plane_state->hw.crtc == &crtc->base)
intel_plane_init_cursor_vblank_work(old_plane_state,
new_plane_state);
}
@ -671,7 +687,6 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get();
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
drm_WARN_ON(display->drm, new_crtc_state->use_dsb);
@ -706,7 +721,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
int i;
for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
if (old_plane_state->uapi.crtc == &crtc->base &&
if (old_plane_state->hw.crtc == &crtc->base &&
old_plane_state->unpin_work.vblank) {
drm_vblank_work_schedule(&old_plane_state->unpin_work,
drm_crtc_accurate_vblank_count(&crtc->base) + 1,
@ -737,7 +752,7 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
local_irq_enable();
if (intel_vgpu_active(dev_priv))
if (intel_parent_vgpu_active(display))
goto out;
if (crtc->debug.start_vbl_count &&

View file

@ -37,7 +37,7 @@ void intel_crtc_arm_vblank_event(struct intel_crtc_state *crtc_state);
void intel_crtc_prepare_vblank_event(struct intel_crtc_state *crtc_state,
struct drm_pending_vblank_event **event);
u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state);
int intel_crtc_init(struct intel_display *display, enum pipe pipe);
int intel_crtc_init(struct intel_display *display);
int intel_crtc_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc);

View file

@ -324,7 +324,7 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(PIPE_A);
@ -727,7 +727,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
u32 val;
@ -974,6 +974,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = intel_cursor_format_mod_supported,
.format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static void intel_cursor_add_size_hints_property(struct intel_plane *plane)

File diff suppressed because it is too large Load diff

View file

@ -11,6 +11,7 @@
#define MB_WRITE_COMMITTED true
#define MB_WRITE_UNCOMMITTED false
struct drm_printer;
enum icl_port_dpll_id;
struct intel_atomic_state;
struct intel_c10pll_state;
@ -19,6 +20,8 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_cx0pll_state;
struct intel_display;
struct intel_dpll;
struct intel_dpll_hw_state;
struct intel_encoder;
struct intel_hdmi;
@ -26,22 +29,30 @@ void intel_clear_response_ready_flag(struct intel_encoder *encoder,
int lane);
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state);
void intel_mtl_pll_disable(struct intel_encoder *encoder);
enum icl_port_dpll_id
intel_mtl_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_mtl_pll_enable_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_mtl_pll_disable_clock(struct intel_encoder *encoder);
void intel_mtl_pll_disable_clock(struct intel_encoder *encoder);
void intel_mtl_tbt_pll_enable_clock(struct intel_encoder *encoder,
int port_clock);
void intel_mtl_tbt_pll_disable_clock(struct intel_encoder *encoder);
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder);
void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
int intel_cx0pll_calc_state(const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
struct intel_dpll_hw_state *hw_state);
bool intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_cx0pll_state *pll_state);
int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state);
void intel_cx0pll_dump_hw_state(struct intel_display *display,
void intel_cx0pll_dump_hw_state(struct drm_printer *p,
const struct intel_cx0pll_state *hw_state);
void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
@ -59,7 +70,13 @@ void intel_cx0_write(struct intel_encoder *encoder,
int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
int command, int lane, u32 *val);
void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane);
void intel_mtl_tbt_pll_calc_state(struct intel_dpll_hw_state *hw_state);
bool intel_mtl_tbt_pll_readout_hw_state(struct intel_display *display,
struct intel_dpll *pll,
struct intel_dpll_hw_state *hw_state);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);

View file

@ -89,6 +89,8 @@
#include "skl_scaler.h"
#include "skl_universal_plane.h"
struct intel_dpll;
static const u8 index_to_dp_signal_levels[] = {
[0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0,
[1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1,
@ -726,7 +728,7 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
bool enable, u32 hdcp_mask)
{
struct intel_display *display = to_intel_display(intel_encoder);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
int ret = 0;
wakeref = intel_display_power_get_if_enabled(display,
@ -747,7 +749,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
enum transcoder cpu_transcoder;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
enum pipe pipe = 0;
u32 ddi_mode;
bool ret;
@ -803,7 +805,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
enum pipe p;
u32 tmp;
u8 mst_pipe_mask = 0, dp128b132b_pipe_mask = 0;
@ -846,7 +848,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
for_each_pipe(display, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
u32 port_mask, ddi_select, ddi_mode;
intel_wakeref_t trans_wakeref;
struct ref_tracker *trans_wakeref;
trans_wakeref = intel_display_power_get_if_enabled(display,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
@ -1000,7 +1002,7 @@ main_link_aux_power_domain_put(struct intel_digital_port *dig_port,
struct intel_display *display = to_intel_display(dig_port);
enum intel_display_power_domain domain =
intel_ddi_main_link_aux_domain(dig_port, crtc_state);
intel_wakeref_t wf;
struct ref_tracker *wf;
wf = fetch_and_zero(&dig_port->aux_wakeref);
if (!wf)
@ -2446,7 +2448,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
return;
}
drm_err(display->drm, "Failed to enable FEC after retries\n");
drm_dbg_kms(display->drm, "Failed to enable FEC after retries\n");
}
static void intel_ddi_disable_fec(struct intel_encoder *encoder,
@ -3128,7 +3130,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &dig_port->dp;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
@ -3196,7 +3198,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
@ -3667,8 +3669,8 @@ void intel_ddi_update_active_dpll(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc *pipe_crtc;
/* FIXME: Add MTL pll_mgr */
if (DISPLAY_VER(display) >= 14 || !intel_encoder_is_tc(encoder))
/* FIXME: Add NVL+ and DG2 pll_mgr */
if (!intel_encoder_is_tc(encoder) || !display->dpll.mgr)
return;
for_each_intel_crtc_in_pipe_mask(display->drm, pipe_crtc,
@ -3963,7 +3965,7 @@ static void bdw_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
for_each_cpu_transcoder_masked(display, cpu_transcoder, transcoders) {
enum intel_display_power_domain power_domain;
intel_wakeref_t trans_wakeref;
struct ref_tracker *trans_wakeref;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
trans_wakeref = intel_display_power_get_if_enabled(display,
@ -4255,19 +4257,70 @@ static void xe3plpd_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
static bool icl_ddi_tc_pll_is_tbt(const struct intel_dpll *pll)
{
intel_cx0pll_readout_hw_state(encoder, &crtc_state->dpll_hw_state.cx0pll);
return pll->info->id == DPLL_ID_ICL_TBTPLL;
}
if (crtc_state->dpll_hw_state.cx0pll.tbt_mode)
static void mtl_ddi_cx0_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
enum icl_port_dpll_id port_dpll_id,
enum intel_dpll_id pll_id)
{
struct intel_display *display = to_intel_display(encoder);
struct icl_port_dpll *port_dpll;
struct intel_dpll *pll;
bool pll_active;
port_dpll = &crtc_state->icl_port_dplls[port_dpll_id];
pll = intel_get_dpll_by_id(display, pll_id);
if (drm_WARN_ON(display->drm, !pll))
return;
port_dpll->pll = pll;
pll_active = intel_dpll_get_hw_state(display, pll, &port_dpll->hw_state);
drm_WARN_ON(display->drm, !pll_active);
icl_set_active_port_dpll(crtc_state, port_dpll_id);
if (icl_ddi_tc_pll_is_tbt(crtc_state->intel_dpll))
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
else
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
crtc_state->port_clock = intel_dpll_get_freq(display, crtc_state->intel_dpll,
&crtc_state->dpll_hw_state);
intel_ddi_get_config(encoder, crtc_state);
}
/*
* Get the configuration for either a port using a C10 PHY PLL, or a port using a
* C20 PHY PLL in the cases of:
* - BMG port A/B
* - PTL port B eDP over TypeC PHY
*/
static void mtl_ddi_non_tc_phy_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
mtl_ddi_cx0_get_config(encoder, crtc_state, ICL_PORT_DPLL_DEFAULT,
mtl_port_to_pll_id(display, encoder->port));
}
static void mtl_ddi_tc_phy_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
mtl_ddi_cx0_get_config(encoder, crtc_state, ICL_PORT_DPLL_DEFAULT,
DPLL_ID_ICL_TBTPLL);
else
mtl_ddi_cx0_get_config(encoder, crtc_state, ICL_PORT_DPLL_MG_PHY,
mtl_port_to_pll_id(display, encoder->port));
}
static void dg2_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@ -4305,11 +4358,6 @@ static void icl_ddi_combo_get_config(struct intel_encoder *encoder,
intel_ddi_get_config(encoder, crtc_state);
}
static bool icl_ddi_tc_pll_is_tbt(const struct intel_dpll *pll)
{
return pll->info->id == DPLL_ID_ICL_TBTPLL;
}
static enum icl_port_dpll_id
icl_ddi_tc_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
@ -5100,7 +5148,7 @@ static const char *intel_ddi_encoder_name(struct intel_display *display,
port_name(port - PORT_D_XELPD + PORT_D),
phy_name(phy));
} else if (DISPLAY_VER(display) >= 12) {
enum tc_port tc_port = intel_port_to_tc(display, port);
enum tc_port tc_port = intel_tc_phy_port_to_tc(display, port);
seq_buf_printf(s, "DDI %s%c/PHY %s%c",
port >= PORT_TC1 ? "TC" : "",
@ -5108,7 +5156,7 @@ static const char *intel_ddi_encoder_name(struct intel_display *display,
tc_port != TC_PORT_NONE ? "TC" : "",
tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
} else if (DISPLAY_VER(display) >= 11) {
enum tc_port tc_port = intel_port_to_tc(display, port);
enum tc_port tc_port = intel_tc_phy_port_to_tc(display, port);
seq_buf_printf(s, "DDI %c%s/PHY %s%c",
port_name(port),
@ -5252,10 +5300,13 @@ void intel_ddi_init(struct intel_display *display,
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = xe3plpd_ddi_get_config;
} else if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
encoder->get_config = mtl_ddi_get_config;
encoder->enable_clock = intel_mtl_pll_enable_clock;
encoder->disable_clock = intel_mtl_pll_disable_clock;
encoder->port_pll_type = icl_ddi_tc_port_pll_type;
if (intel_encoder_is_tc(encoder))
encoder->get_config = mtl_ddi_tc_phy_get_config;
else
encoder->get_config = mtl_ddi_non_tc_phy_get_config;
} else if (display->platform.dg2) {
encoder->enable_clock = intel_mpllb_enable;
encoder->disable_clock = intel_mpllb_disable;
@ -5372,6 +5423,17 @@ void intel_ddi_init(struct intel_display *display,
goto err;
}
/*
* FIXME: We currently need to store dedicated_external because devdata
* does not live long enough for when intel_encoder_is_tc() is called on
* the unbind path. This needs to be fixed by making sure that the VBT
* data is kept long enough, so that
* intel_bios_encoder_is_dedicated_external() can be called directly
* from intel_encoder_is_tc().
*/
if (intel_bios_encoder_is_dedicated_external(devdata))
dig_port->dedicated_external = true;
if (intel_encoder_is_tc(encoder)) {
bool is_legacy =
!intel_bios_encoder_supports_typec_usb(devdata) &&

View file

@ -372,7 +372,7 @@ void assert_transcoder(struct intel_display *display,
{
bool cur_state;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
/* we keep both pipes enabled on 830 */
if (display->platform.i830)
@ -1810,7 +1810,17 @@ bool intel_phy_is_combo(struct intel_display *display, enum phy phy)
return false;
}
/* Prefer intel_encoder_is_tc() */
/*
* This function returns true if the DDI port respective to the PHY enumeration
* is a Type-C capable port.
*
* Depending on the VBT, the port might be configured
* as a "dedicated external" port, meaning that actual physical PHY is outside
* of the Type-C subsystem and, as such, not really a "Type-C PHY".
*
* Prefer intel_encoder_is_tc(), especially if you really need to know if we
* are dealing with Type-C connections.
*/
bool intel_phy_is_tc(struct intel_display *display, enum phy phy)
{
/*
@ -1859,17 +1869,32 @@ enum phy intel_port_to_phy(struct intel_display *display, enum port port)
}
/* Prefer intel_encoder_to_tc() */
/*
* Return TC_PORT_1..I915_MAX_TC_PORTS for any TypeC DDI port. The function
* can be also called for TypeC DDI ports not connected to a TypeC PHY such as
* the PORT_TC1..4 ports on RKL/ADLS/BMG.
*/
enum tc_port intel_port_to_tc(struct intel_display *display, enum port port)
{
if (!intel_phy_is_tc(display, intel_port_to_phy(display, port)))
return TC_PORT_NONE;
if (DISPLAY_VER(display) >= 12)
return TC_PORT_1 + port - PORT_TC1;
else
return TC_PORT_1 + port - PORT_C;
}
/*
* Return TC_PORT_1..I915_MAX_TC_PORTS for TypeC DDI ports connected to a TypeC PHY.
* Note that on RKL, ADLS, BMG the PORT_TC1..4 ports are connected to a non-TypeC
* PHY, so on those platforms the function returns TC_PORT_NONE.
*/
enum tc_port intel_tc_phy_port_to_tc(struct intel_display *display, enum port port)
{
if (!intel_phy_is_tc(display, intel_port_to_phy(display, port)))
return TC_PORT_NONE;
return intel_port_to_tc(display, port);
}
enum phy intel_encoder_to_phy(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
@ -1894,6 +1919,10 @@ bool intel_encoder_is_snps(struct intel_encoder *encoder)
bool intel_encoder_is_tc(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (dig_port && dig_port->dedicated_external)
return false;
return intel_phy_is_tc(display, intel_encoder_to_phy(encoder));
}
@ -1902,7 +1931,7 @@ enum tc_port intel_encoder_to_tc(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
return intel_port_to_tc(display, encoder->port);
return intel_tc_phy_port_to_tc(display, encoder->port);
}
enum intel_display_power_domain
@ -3020,7 +3049,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_display *display = to_intel_display(crtc);
enum intel_display_power_domain power_domain;
enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret = false;
u32 tmp;
@ -3364,7 +3393,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
struct intel_display *display = to_intel_display(crtc);
enum intel_display_power_domain power_domain;
enum transcoder cpu_transcoder = (enum transcoder)crtc->pipe;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret = false;
u32 tmp;
@ -3454,12 +3483,11 @@ static bool transcoder_ddi_func_is_enabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
u32 tmp = 0;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
with_intel_display_power_if_enabled(display, power_domain, wakeref)
with_intel_display_power_if_enabled(display, power_domain)
tmp = intel_de_read(display,
TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
@ -3481,10 +3509,9 @@ static void enabled_uncompressed_joiner_pipes(struct intel_display *display,
joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = POWER_DOMAIN_PIPE(pipe);
with_intel_display_power_if_enabled(display, power_domain, wakeref) {
with_intel_display_power_if_enabled(display, power_domain) {
u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (tmp & UNCOMPRESSED_JOINER_PRIMARY)
@ -3510,10 +3537,9 @@ static void enabled_bigjoiner_pipes(struct intel_display *display,
joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
with_intel_display_power_if_enabled(display, power_domain, wakeref) {
with_intel_display_power_if_enabled(display, power_domain) {
u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (!(tmp & BIG_JOINER_ENABLE))
@ -3580,10 +3606,9 @@ static void enabled_ultrajoiner_pipes(struct intel_display *display,
joiner_pipes(display)) {
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
power_domain = intel_dsc_power_domain(crtc, (enum transcoder)pipe);
with_intel_display_power_if_enabled(display, power_domain, wakeref) {
with_intel_display_power_if_enabled(display, power_domain) {
u32 tmp = intel_de_read(display, ICL_PIPE_DSS_CTL1(pipe));
if (!(tmp & ULTRA_JOINER_ENABLE))
@ -3741,12 +3766,11 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
for_each_cpu_transcoder_masked(display, cpu_transcoder,
panel_transcoder_mask) {
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
enum pipe trans_pipe;
u32 tmp = 0;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
with_intel_display_power_if_enabled(display, power_domain, wakeref)
with_intel_display_power_if_enabled(display, power_domain)
tmp = intel_de_read(display,
TRANS_DDI_FUNC_CTL(display, cpu_transcoder));
@ -4977,24 +5001,6 @@ pipe_config_pll_mismatch(struct drm_printer *p, bool fastset,
intel_dpll_dump_hw_state(display, p, b);
}
static void
pipe_config_cx0pll_mismatch(struct drm_printer *p, bool fastset,
const struct intel_crtc *crtc,
const char *name,
const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b)
{
struct intel_display *display = to_intel_display(crtc);
char *chipname = a->use_c10 ? "C10" : "C20";
pipe_config_mismatch(p, fastset, crtc, name, chipname);
drm_printf(p, "expected:\n");
intel_cx0pll_dump_hw_state(display, a);
drm_printf(p, "found:\n");
intel_cx0pll_dump_hw_state(display, b);
}
static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(old_crtc_state);
@ -5146,16 +5152,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
#define PIPE_CONF_CHECK_PLL_CX0(name) do { \
if (!intel_cx0pll_compare_hw_state(&current_config->name, \
&pipe_config->name)) { \
pipe_config_cx0pll_mismatch(&p, fastset, crtc, __stringify(name), \
&current_config->name, \
&pipe_config->name); \
ret = false; \
} \
} while (0)
#define PIPE_CONF_CHECK_PLL_LT(name) do { \
if (!intel_lt_phy_pll_compare_hw_state(&current_config->name, \
&pipe_config->name)) { \
@ -5395,8 +5391,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
/* FIXME convert MTL+ platforms over to dpll_mgr */
if (HAS_LT_PHY(display))
PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll);
else if (DISPLAY_VER(display) >= 14)
PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
@ -6032,14 +6026,6 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return -EINVAL;
}
/* FIXME: selective fetch should be disabled for async flips */
if (new_crtc_state->enable_psr2_sel_fetch) {
drm_dbg_kms(display->drm,
"[CRTC:%d:%s] async flip disallowed with PSR2 selective fetch\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (plane->pipe != crtc->pipe)
@ -6130,7 +6116,7 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
if (!plane->async_flip)
continue;
if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format->format,
if (!intel_plane_can_async_flip(plane, new_plane_state->hw.fb->format,
new_plane_state->hw.fb->modifier)) {
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] pixel format %p4cc / modifier 0x%llx does not support async flip\n",
@ -7399,7 +7385,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
struct intel_crtc *crtc;
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
intel_wakeref_t wakeref = NULL;
struct ref_tracker *wakeref = NULL;
int i;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)

View file

@ -451,6 +451,7 @@ bool intel_phy_is_combo(struct intel_display *display, enum phy phy);
bool intel_phy_is_tc(struct intel_display *display, enum phy phy);
bool intel_phy_is_snps(struct intel_display *display, enum phy phy);
enum tc_port intel_port_to_tc(struct intel_display *display, enum port port);
enum tc_port intel_tc_phy_port_to_tc(struct intel_display *display, enum port port);
enum phy intel_encoder_to_phy(struct intel_encoder *encoder);
bool intel_encoder_is_combo(struct intel_encoder *encoder);

View file

@ -386,7 +386,7 @@ struct intel_display {
struct {
struct intel_dmc *dmc;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
} dmc;
struct {
@ -394,6 +394,21 @@ struct intel_display {
u32 mmio_base;
} dsi;
struct {
const struct dram_info *info;
} dram;
struct {
struct intel_fbc *instances[I915_MAX_FBCS];
/* xe3p_lpd+: FBC instance utilizing the system cache */
struct sys_cache_cfg {
/* Protect concurrecnt access to system cache configuration */
struct mutex lock;
enum intel_fbc_id id;
} sys_cache;
} fbc;
struct {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
@ -611,7 +626,6 @@ struct intel_display {
struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
struct intel_audio audio;
struct intel_dpll_global dpll;
struct intel_fbc *fbc[I915_MAX_FBCS];
struct intel_frontbuffer_tracking fb_tracking;
struct intel_hotplug hotplug;
struct intel_opregion *opregion;

View file

@ -86,7 +86,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct intel_display *display = node_to_intel_display(m->private);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool sr_enabled = false;
wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);

View file

@ -1420,6 +1420,10 @@ static const struct platform_desc ptl_desc = {
}
};
static const struct platform_desc nvl_desc = {
PLATFORM(novalake),
};
__diag_pop();
/*
@ -1495,6 +1499,7 @@ static const struct {
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
INTEL_NVLS_IDS(INTEL_DISPLAY_DEVICE, &nvl_desc),
};
static const struct {

View file

@ -103,7 +103,9 @@ struct pci_dev;
func(battlemage) \
/* Display ver 30 (based on GMD ID) */ \
func(pantherlake) \
func(pantherlake_wildcatlake)
func(pantherlake_wildcatlake) \
/* Display ver 35 (based on GMD ID) */ \
func(novalake)
#define __MEMBER(name) unsigned long name:1;
@ -147,7 +149,7 @@ struct intel_display_platforms {
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
#define HAS_AUX_CCS(__display) (IS_DISPLAY_VER(__display, 9, 12) || (__display)->platform.alderlake_p || (__display)->platform.meteorlake)
#define HAS_AUX_DIST(__display) (IS_DISPLAY_VER(__display, 9, 12) || (__display)->platform.alderlake_p || (__display)->platform.meteorlake)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
#define HAS_CASF(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
@ -173,6 +175,7 @@ struct intel_display_platforms {
#define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display))
#define HAS_FBC(__display) (DISPLAY_RUNTIME_INFO(__display)->fbc_mask != 0)
#define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_FBC_SYS_CACHE(__display) (DISPLAY_VER(__display) >= 35 && !(__display)->platform.dgfx)
#define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg)
#define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3)
#define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake)
@ -185,6 +188,7 @@ struct intel_display_platforms {
#define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell)
#define HAS_LRR(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_LSPCON(__display) (IS_DISPLAY_VER(__display, 9, 10))
#define HAS_LT_PHY(__display) ((__display)->platform.novalake)
#define HAS_MBUS_JOINING(__display) ((__display)->platform.alderlake_p || DISPLAY_VER(__display) >= 14)
#define HAS_MSO(__display) (DISPLAY_VER(__display) >= 12)
#define HAS_OVERLAY(__display) (DISPLAY_INFO(__display)->has_overlay)
@ -197,6 +201,7 @@ struct intel_display_platforms {
#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
BIT(trans)) != 0)
#define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13)
#define HAS_UNDERRUN_DBG_INFO(__display) (DISPLAY_VER(__display) >= 35)
#define HAS_ULTRAJOINER(__display) (((__display)->platform.dgfx && \
DISPLAY_VER(__display) == 14) && HAS_DSC(__display))
#define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11)

View file

@ -199,12 +199,8 @@ void intel_display_driver_early_probe(struct intel_display *display)
/* part #1: call before irq install */
int intel_display_driver_probe_noirq(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
if (i915_inject_probe_failure(i915))
return -ENODEV;
if (HAS_DISPLAY(display)) {
ret = drm_vblank_init(display->drm,
INTEL_NUM_PIPES(display));
@ -317,6 +313,7 @@ cleanup_bios:
return ret;
}
ALLOW_ERROR_INJECTION(intel_display_driver_probe_noirq, ERRNO);
static void set_display_access(struct intel_display *display,
bool any_task_allowed,
@ -452,7 +449,6 @@ bool intel_display_driver_check_access(struct intel_display *display)
/* part #2: call after irq install, but before gem init */
int intel_display_driver_probe_nogem(struct intel_display *display)
{
enum pipe pipe;
int ret;
if (!HAS_DISPLAY(display))
@ -466,15 +462,9 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_gmbus_setup(display);
drm_dbg_kms(display->drm, "%d display pipe%s available.\n",
INTEL_NUM_PIPES(display),
INTEL_NUM_PIPES(display) > 1 ? "s" : "");
for_each_pipe(display, pipe) {
ret = intel_crtc_init(display, pipe);
if (ret)
goto err_mode_config;
}
ret = intel_crtc_init(display);
if (ret)
goto err_mode_config;
intel_plane_possible_crtcs_init(display);
intel_dpll_init(display);

View file

@ -6,8 +6,6 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "icl_dsi_regs.h"
#include "intel_crtc.h"
@ -19,57 +17,83 @@
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dmc_wl.h"
#include "intel_dp_aux.h"
#include "intel_dsb.h"
#include "intel_fdi_regs.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hotplug_irq.h"
#include "intel_parent.h"
#include "intel_pipe_crc_regs.h"
#include "intel_plane.h"
#include "intel_pmdemand.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_uncore.h"
static void
intel_display_irq_regs_init(struct intel_display *display, struct i915_irq_regs regs,
u32 imr_val, u32 ier_val)
static void irq_reset(struct intel_display *display, struct i915_irq_regs regs)
{
intel_dmc_wl_get(display, regs.imr);
intel_dmc_wl_get(display, regs.ier);
intel_dmc_wl_get(display, regs.iir);
intel_de_write(display, regs.imr, 0xffffffff);
intel_de_posting_read(display, regs.imr);
gen2_irq_init(to_intel_uncore(display->drm), regs, imr_val, ier_val);
intel_de_write(display, regs.ier, 0);
intel_dmc_wl_put(display, regs.iir);
intel_dmc_wl_put(display, regs.ier);
intel_dmc_wl_put(display, regs.imr);
/* IIR can theoretically queue up two events. Be paranoid. */
intel_de_write(display, regs.iir, 0xffffffff);
intel_de_posting_read(display, regs.iir);
intel_de_write(display, regs.iir, 0xffffffff);
intel_de_posting_read(display, regs.iir);
}
static void
intel_display_irq_regs_reset(struct intel_display *display, struct i915_irq_regs regs)
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
static void assert_iir_is_zero(struct intel_display *display, i915_reg_t reg)
{
intel_dmc_wl_get(display, regs.imr);
intel_dmc_wl_get(display, regs.ier);
intel_dmc_wl_get(display, regs.iir);
u32 val = intel_de_read(display, reg);
gen2_irq_reset(to_intel_uncore(display->drm), regs);
if (val == 0)
return;
intel_dmc_wl_put(display, regs.iir);
intel_dmc_wl_put(display, regs.ier);
intel_dmc_wl_put(display, regs.imr);
drm_WARN(display->drm, 1,
"Interrupt register 0x%x is not zero: 0x%08x\n",
i915_mmio_reg_offset(reg), val);
intel_de_write(display, reg, 0xffffffff);
intel_de_posting_read(display, reg);
intel_de_write(display, reg, 0xffffffff);
intel_de_posting_read(display, reg);
}
static void
intel_display_irq_regs_assert_irr_is_zero(struct intel_display *display, i915_reg_t reg)
static void irq_init(struct intel_display *display, struct i915_irq_regs regs,
u32 imr_val, u32 ier_val)
{
intel_dmc_wl_get(display, reg);
assert_iir_is_zero(display, regs.iir);
gen2_assert_iir_is_zero(to_intel_uncore(display->drm), reg);
intel_de_write(display, regs.ier, ier_val);
intel_de_write(display, regs.imr, imr_val);
intel_de_posting_read(display, regs.imr);
}
intel_dmc_wl_put(display, reg);
static void error_reset(struct intel_display *display, struct i915_error_regs regs)
{
intel_de_write(display, regs.emr, 0xffffffff);
intel_de_posting_read(display, regs.emr);
intel_de_write(display, regs.eir, 0xffffffff);
intel_de_posting_read(display, regs.eir);
intel_de_write(display, regs.eir, 0xffffffff);
intel_de_posting_read(display, regs.eir);
}
static void error_init(struct intel_display *display, struct i915_error_regs regs,
u32 emr_val)
{
intel_de_write(display, regs.eir, 0xffffffff);
intel_de_posting_read(display, regs.eir);
intel_de_write(display, regs.eir, 0xffffffff);
intel_de_posting_read(display, regs.eir);
intel_de_write(display, regs.emr, emr_val);
intel_de_posting_read(display, regs.emr);
}
struct pipe_fault_handler {
@ -135,7 +159,6 @@ intel_handle_vblank(struct intel_display *display, enum pipe pipe)
void ilk_update_display_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
lockdep_assert_held(&display->irq.lock);
@ -146,7 +169,7 @@ void ilk_update_display_irq(struct intel_display *display,
new_val |= (~enabled_irq_mask & interrupt_mask);
if (new_val != display->irq.ilk_de_imr_mask &&
!drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
!drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display))) {
display->irq.ilk_de_imr_mask = new_val;
intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask);
intel_de_posting_read(display, DEIMR);
@ -172,7 +195,6 @@ void ilk_disable_display_irq(struct intel_display *display, u32 bits)
void bdw_update_port_irq(struct intel_display *display,
u32 interrupt_mask, u32 enabled_irq_mask)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
u32 old_val;
@ -180,7 +202,7 @@ void bdw_update_port_irq(struct intel_display *display,
drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
if (drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display)))
return;
old_val = intel_de_read(display, GEN8_DE_PORT_IMR);
@ -206,14 +228,13 @@ static void bdw_update_pipe_irq(struct intel_display *display,
enum pipe pipe, u32 interrupt_mask,
u32 enabled_irq_mask)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 new_val;
lockdep_assert_held(&display->irq.lock);
drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
if (drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display)))
return;
new_val = display->irq.de_pipe_imr_mask[pipe];
@ -249,7 +270,6 @@ void ibx_display_interrupt_update(struct intel_display *display,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 sdeimr = intel_de_read(display, SDEIMR);
sdeimr &= ~interrupt_mask;
@ -259,7 +279,7 @@ void ibx_display_interrupt_update(struct intel_display *display,
lockdep_assert_held(&display->irq.lock);
if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
if (drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display)))
return;
intel_de_write(display, SDEIMR, sdeimr);
@ -323,7 +343,6 @@ out:
void i915_enable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
@ -332,7 +351,7 @@ void i915_enable_pipestat(struct intel_display *display,
pipe_name(pipe), status_mask);
lockdep_assert_held(&display->irq.lock);
drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display));
if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
@ -347,7 +366,6 @@ void i915_enable_pipestat(struct intel_display *display,
void i915_disable_pipestat(struct intel_display *display,
enum pipe pipe, u32 status_mask)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t reg = PIPESTAT(display, pipe);
u32 enable_mask;
@ -356,7 +374,7 @@ void i915_disable_pipestat(struct intel_display *display,
pipe_name(pipe), status_mask);
lockdep_assert_held(&display->irq.lock);
drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
drm_WARN_ON(display->drm, !intel_parent_irq_enabled(display));
if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0)
return;
@ -1918,15 +1936,14 @@ static void _vlv_display_irq_reset(struct intel_display *display)
else
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
gen2_error_reset(to_intel_uncore(display->drm),
VLV_ERROR_REGS);
error_reset(display, VLV_ERROR_REGS);
i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0);
intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0);
i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
irq_reset(display, VLV_IRQ_REGS);
display->irq.vlv_imr_mask = ~0u;
}
@ -2014,8 +2031,7 @@ static void _vlv_display_irq_postinstall(struct intel_display *display)
DPINVGTT_STATUS_MASK_VLV |
DPINVGTT_EN_MASK_VLV);
gen2_error_init(to_intel_uncore(display->drm),
VLV_ERROR_REGS, ~vlv_error_mask());
error_init(display, VLV_ERROR_REGS, ~vlv_error_mask());
pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
@ -2038,7 +2054,7 @@ static void _vlv_display_irq_postinstall(struct intel_display *display)
display->irq.vlv_imr_mask = ~enable_mask;
intel_display_irq_regs_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask);
irq_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask);
}
void vlv_display_irq_postinstall(struct intel_display *display)
@ -2054,7 +2070,7 @@ static void ibx_display_irq_reset(struct intel_display *display)
if (HAS_PCH_NOP(display))
return;
gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
irq_reset(display, SDE_IRQ_REGS);
if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
intel_de_write(display, SERR_INT, 0xffffffff);
@ -2062,9 +2078,7 @@ static void ibx_display_irq_reset(struct intel_display *display)
void ilk_display_irq_reset(struct intel_display *display)
{
struct intel_uncore *uncore = to_intel_uncore(display->drm);
gen2_irq_reset(uncore, DE_IRQ_REGS);
irq_reset(display, DE_IRQ_REGS);
display->irq.ilk_de_imr_mask = ~0u;
if (DISPLAY_VER(display) == 7)
@ -2091,10 +2105,10 @@ void gen8_display_irq_reset(struct intel_display *display)
for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
irq_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
irq_reset(display, GEN8_DE_PORT_IRQ_REGS);
irq_reset(display, GEN8_DE_MISC_IRQ_REGS);
if (HAS_PCH_SPLIT(display))
ibx_display_irq_reset(display);
@ -2136,39 +2150,38 @@ void gen11_display_irq_reset(struct intel_display *display)
for_each_pipe(display, pipe)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
irq_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
irq_reset(display, GEN8_DE_PORT_IRQ_REGS);
irq_reset(display, GEN8_DE_MISC_IRQ_REGS);
if (DISPLAY_VER(display) >= 14)
intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS);
irq_reset(display, PICAINTERRUPT_IRQ_REGS);
else
intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS);
irq_reset(display, GEN11_DE_HPD_IRQ_REGS);
if (INTEL_PCH_TYPE(display) >= PCH_ICP)
intel_display_irq_regs_reset(display, SDE_IRQ_REGS);
irq_reset(display, SDE_IRQ_REGS);
}
void gen8_irq_power_well_post_enable(struct intel_display *display,
u8 pipe_mask)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN |
gen8_de_pipe_flip_done_mask(display);
enum pipe pipe;
spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
if (!intel_parent_irq_enabled(display)) {
spin_unlock_irq(&display->irq.lock);
return;
}
for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
display->irq.de_pipe_imr_mask[pipe],
~display->irq.de_pipe_imr_mask[pipe] | extra_ier);
irq_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
display->irq.de_pipe_imr_mask[pipe],
~display->irq.de_pipe_imr_mask[pipe] | extra_ier);
spin_unlock_irq(&display->irq.lock);
}
@ -2176,23 +2189,22 @@ void gen8_irq_power_well_post_enable(struct intel_display *display,
void gen8_irq_power_well_pre_disable(struct intel_display *display,
u8 pipe_mask)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
enum pipe pipe;
spin_lock_irq(&display->irq.lock);
if (!intel_irqs_enabled(dev_priv)) {
if (!intel_parent_irq_enabled(display)) {
spin_unlock_irq(&display->irq.lock);
return;
}
for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
irq_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe));
spin_unlock_irq(&display->irq.lock);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
intel_parent_irq_synchronize(display);
}
/*
@ -2220,13 +2232,11 @@ static void ibx_irq_postinstall(struct intel_display *display)
else
mask = SDE_GMBUS_CPT;
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
irq_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
void valleyview_enable_display_irqs(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
spin_lock_irq(&display->irq.lock);
if (display->irq.vlv_display_irqs_enabled)
@ -2234,7 +2244,7 @@ void valleyview_enable_display_irqs(struct intel_display *display)
display->irq.vlv_display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv)) {
if (intel_parent_irq_enabled(display)) {
_vlv_display_irq_reset(display);
_vlv_display_irq_postinstall(display);
}
@ -2245,8 +2255,6 @@ out:
void valleyview_disable_display_irqs(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
spin_lock_irq(&display->irq.lock);
if (!display->irq.vlv_display_irqs_enabled)
@ -2254,7 +2262,7 @@ void valleyview_disable_display_irqs(struct intel_display *display)
display->irq.vlv_display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
if (intel_parent_irq_enabled(display))
_vlv_display_irq_reset(display);
out:
spin_unlock_irq(&display->irq.lock);
@ -2286,7 +2294,7 @@ void ilk_de_irq_postinstall(struct intel_display *display)
}
if (display->platform.haswell) {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
assert_iir_is_zero(display, EDP_PSR_IIR);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@ -2297,8 +2305,8 @@ void ilk_de_irq_postinstall(struct intel_display *display)
ibx_irq_postinstall(display);
intel_display_irq_regs_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask,
display_mask | extra_mask);
irq_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask,
display_mask | extra_mask);
}
static void mtp_irq_postinstall(struct intel_display *display);
@ -2374,11 +2382,10 @@ void gen8_de_irq_postinstall(struct intel_display *display)
if (!intel_display_power_is_enabled(display, domain))
continue;
intel_display_irq_regs_assert_irr_is_zero(display,
TRANS_PSR_IIR(display, trans));
assert_iir_is_zero(display, TRANS_PSR_IIR(display, trans));
}
} else {
intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR);
assert_iir_is_zero(display, EDP_PSR_IIR);
}
for_each_pipe(display, pipe) {
@ -2386,44 +2393,50 @@ void gen8_de_irq_postinstall(struct intel_display *display)
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
display->irq.de_pipe_imr_mask[pipe],
de_pipe_enables);
irq_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
display->irq.de_pipe_imr_mask[pipe],
de_pipe_enables);
}
intel_display_irq_regs_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked,
de_port_enables);
intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked,
de_misc_masked);
irq_init(display, GEN8_DE_PORT_IRQ_REGS, ~de_port_masked, de_port_enables);
irq_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked);
if (IS_DISPLAY_VER(display, 11, 13)) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
intel_display_irq_regs_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked,
de_hpd_enables);
irq_init(display, GEN11_DE_HPD_IRQ_REGS, ~de_hpd_masked, de_hpd_enables);
}
}
u32 xelpdp_pica_aux_mask(struct intel_display *display)
{
u32 mask = XELPDP_AUX_TC_MASK;
if (DISPLAY_VER(display) >= 20)
mask |= XE2LPD_AUX_DDI_MASK;
return mask;
}
static void mtp_irq_postinstall(struct intel_display *display)
{
u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
u32 de_hpd_mask = xelpdp_pica_aux_mask(display);
u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
XELPDP_TBT_HOTPLUG_MASK;
intel_display_irq_regs_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask,
de_hpd_enables);
irq_init(display, PICAINTERRUPT_IRQ_REGS, ~de_hpd_mask, de_hpd_enables);
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
irq_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff);
}
static void icp_irq_postinstall(struct intel_display *display)
{
u32 mask = SDE_GMBUS_ICP;
intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
irq_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff);
}
void gen11_de_irq_postinstall(struct intel_display *display)

View file

@ -16,6 +16,8 @@ struct drm_printer;
struct intel_display;
struct intel_display_irq_snapshot;
u32 xelpdp_pica_aux_mask(struct intel_display *display);
void valleyview_enable_display_irqs(struct intel_display *display);
void valleyview_disable_display_irqs(struct intel_display *display);

View file

@ -8,10 +8,7 @@
#include <drm/drm_print.h>
#include "soc/intel_dram.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
@ -26,7 +23,9 @@
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_parent.h"
#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pmdemand.h"
@ -545,8 +544,8 @@ __intel_display_power_get_domain(struct intel_display *display,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
struct ref_tracker *intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &display->power.domains;
struct ref_tracker *wakeref;
@ -572,7 +571,7 @@ intel_wakeref_t intel_display_power_get(struct intel_display *display,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
intel_wakeref_t
struct ref_tracker *
intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain)
{
@ -639,7 +638,7 @@ static void __intel_display_power_put(struct intel_display *display,
static void
queue_async_put_domains_work(struct i915_power_domains *power_domains,
intel_wakeref_t wakeref,
struct ref_tracker *wakeref,
int delay_ms)
{
struct intel_display *display = container_of(power_domains,
@ -741,7 +740,7 @@ out_verify:
*/
void __intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
struct ref_tracker *wakeref,
int delay_ms)
{
struct i915_power_domains *power_domains = &display->power.domains;
@ -800,7 +799,7 @@ void intel_display_power_flush_work(struct intel_display *display)
{
struct i915_power_domains *power_domains = &display->power.domains;
struct intel_power_domain_mask async_put_mask;
intel_wakeref_t work_wakeref;
struct ref_tracker *work_wakeref;
mutex_lock(&power_domains->lock);
@ -854,7 +853,7 @@ intel_display_power_flush_work_sync(struct intel_display *display)
*/
void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
struct ref_tracker *wakeref)
{
__intel_display_power_put(display, domain);
intel_display_rpm_put(display, wakeref);
@ -886,7 +885,7 @@ intel_display_power_get_in_set(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
intel_wakeref_t __maybe_unused wf;
struct ref_tracker *__maybe_unused wf;
drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
@ -902,7 +901,7 @@ intel_display_power_get_in_set_if_enabled(struct intel_display *display,
struct intel_display_power_domain_set *power_domain_set,
enum intel_display_power_domain domain)
{
intel_wakeref_t wf;
struct ref_tracker *wf;
drm_WARN_ON(display->drm, test_bit(domain, power_domain_set->mask.bits));
@ -929,7 +928,7 @@ intel_display_power_put_mask_in_set(struct intel_display *display,
!bitmap_subset(mask->bits, power_domain_set->mask.bits, POWER_DOMAIN_NUM));
for_each_power_domain(domain, mask) {
intel_wakeref_t __maybe_unused wf = INTEL_WAKEREF_DEF;
struct ref_tracker *__maybe_unused wf = INTEL_WAKEREF_DEF;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
wf = fetch_and_zero(&power_domain_set->wakerefs[domain]);
@ -1202,7 +1201,6 @@ static void hsw_assert_cdclk(struct intel_display *display)
static void assert_can_disable_lcpll(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_crtc *crtc;
for_each_intel_crtc(display->drm, crtc)
@ -1247,7 +1245,7 @@ static void assert_can_disable_lcpll(struct intel_display *display)
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
INTEL_DISPLAY_STATE_WARN(display, intel_irqs_enabled(dev_priv),
INTEL_DISPLAY_STATE_WARN(display, intel_parent_irq_enabled(display),
"IRQs enabled\n");
}
@ -1341,10 +1339,10 @@ static void hsw_restore_lcpll(struct intel_display *display)
return;
/*
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
* Make sure we're not on PC8 state before disabling
* PC8, otherwise we'll hang the machine.
*/
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
intel_parent_pc8_block(display);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@ -1374,7 +1372,7 @@ static void hsw_restore_lcpll(struct intel_display *display)
"Switching back to LCPLL failed\n");
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_parent_pc8_unblock(display);
intel_update_cdclk(display);
intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK");
@ -1417,8 +1415,6 @@ static void hsw_enable_pc8(struct intel_display *display)
static void hsw_disable_pc8(struct intel_display *display)
{
struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
drm_dbg_kms(display->drm, "Disabling package C8+\n");
hsw_restore_lcpll(display);
@ -1426,7 +1422,7 @@ static void hsw_disable_pc8(struct intel_display *display)
/* Many display registers don't survive PC8+ */
#ifdef I915 /* FIXME */
intel_clock_gating_init(dev_priv);
intel_clock_gating_init(display->drm);
#endif
}
@ -1618,7 +1614,7 @@ static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = {
static void tgl_bw_buddy_init(struct intel_display *display)
{
const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct dram_info *dram_info = intel_dram_info(display);
const struct buddy_page_mask *table;
unsigned long abox_mask = DISPLAY_INFO(display)->abox_mask;
int config, i;
@ -2006,7 +2002,7 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume)
*/
void intel_power_domains_driver_remove(struct intel_display *display)
{
intel_wakeref_t wakeref __maybe_unused =
struct ref_tracker *wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
@ -2067,7 +2063,7 @@ void intel_power_domains_sanitize_state(struct intel_display *display)
*/
void intel_power_domains_enable(struct intel_display *display)
{
intel_wakeref_t wakeref __maybe_unused =
struct ref_tracker *wakeref __maybe_unused =
fetch_and_zero(&display->power.domains.init_wakeref);
intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
@ -2106,7 +2102,7 @@ void intel_power_domains_disable(struct intel_display *display)
void intel_power_domains_suspend(struct intel_display *display, bool s2idle)
{
struct i915_power_domains *power_domains = &display->power.domains;
intel_wakeref_t wakeref __maybe_unused =
struct ref_tracker *wakeref __maybe_unused =
fetch_and_zero(&power_domains->init_wakeref);
intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);

View file

@ -9,15 +9,17 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include "intel_wakeref.h"
enum aux_ch;
enum port;
struct i915_power_well;
struct intel_display;
struct intel_encoder;
struct ref_tracker;
struct seq_file;
/* -ENOENT means we got the ref, but there's no tracking */
#define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT)
/*
* Keep the pipe, transcoder, port (DDI_LANES,DDI_IO,AUX) domain instances
* consecutive, so that the pipe,transcoder,port -> power domain macros
@ -142,14 +144,14 @@ struct i915_power_domains {
u32 target_dc_state;
u32 allowed_dc_mask;
intel_wakeref_t init_wakeref;
intel_wakeref_t disable_wakeref;
struct ref_tracker *init_wakeref;
struct ref_tracker *disable_wakeref;
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
struct delayed_work async_put_work;
intel_wakeref_t async_put_wakeref;
struct ref_tracker *async_put_wakeref;
struct intel_power_domain_mask async_put_domains[2];
int async_put_next_delay;
@ -159,7 +161,7 @@ struct i915_power_domains {
struct intel_display_power_domain_set {
struct intel_power_domain_mask mask;
#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
struct ref_tracker *wakerefs[POWER_DOMAIN_NUM];
#endif
};
@ -187,24 +189,24 @@ u32 intel_display_power_get_current_dc_state(struct intel_display *display);
bool intel_display_power_is_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
intel_wakeref_t intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain);
intel_wakeref_t
struct ref_tracker *intel_display_power_get(struct intel_display *display,
enum intel_display_power_domain domain);
struct ref_tracker *
intel_display_power_get_if_enabled(struct intel_display *display,
enum intel_display_power_domain domain);
void __intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
struct ref_tracker *wakeref,
int delay_ms);
void intel_display_power_flush_work(struct intel_display *display);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref);
struct ref_tracker *wakeref);
static inline void
intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
struct ref_tracker *wakeref)
{
__intel_display_power_put_async(display, domain, wakeref, -1);
}
@ -212,7 +214,7 @@ intel_display_power_put_async(struct intel_display *display,
static inline void
intel_display_power_put_async_delay(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
struct ref_tracker *wakeref,
int delay_ms)
{
__intel_display_power_put_async(display, domain, wakeref, delay_ms);
@ -224,7 +226,7 @@ void intel_display_power_put_unchecked(struct intel_display *display,
static inline void
intel_display_power_put(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
struct ref_tracker *wakeref)
{
intel_display_power_put_unchecked(display, domain);
}
@ -232,7 +234,7 @@ intel_display_power_put(struct intel_display *display,
static inline void
intel_display_power_put_async(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
struct ref_tracker *wakeref)
{
__intel_display_power_put_async(display, domain, INTEL_WAKEREF_DEF, -1);
}
@ -240,7 +242,7 @@ intel_display_power_put_async(struct intel_display *display,
static inline void
intel_display_power_put_async_delay(struct intel_display *display,
enum intel_display_power_domain domain,
intel_wakeref_t wakeref,
struct ref_tracker *wakeref,
int delay_ms)
{
__intel_display_power_put_async(display, domain, INTEL_WAKEREF_DEF, delay_ms);
@ -297,12 +299,18 @@ enum dbuf_slice {
void gen9_dbuf_slices_update(struct intel_display *display,
u8 req_slices);
#define with_intel_display_power(display, domain, wf) \
for ((wf) = intel_display_power_get((display), (domain)); (wf); \
#define __with_intel_display_power(display, domain, wf) \
for (struct ref_tracker *(wf) = intel_display_power_get((display), (domain)); (wf); \
intel_display_power_put_async((display), (domain), (wf)), (wf) = NULL)
#define with_intel_display_power_if_enabled(display, domain, wf) \
for ((wf) = intel_display_power_get_if_enabled((display), (domain)); (wf); \
#define with_intel_display_power(display, domain) \
__with_intel_display_power(display, domain, __UNIQUE_ID(wakeref))
#define __with_intel_display_power_if_enabled(display, domain, wf) \
for (struct ref_tracker *(wf) = intel_display_power_get_if_enabled((display), (domain)); (wf); \
intel_display_power_put_async((display), (domain), (wf)), (wf) = NULL)
#define with_intel_display_power_if_enabled(display, domain) \
__with_intel_display_power_if_enabled(display, domain, __UNIQUE_ID(wakeref))
#endif /* __INTEL_DISPLAY_POWER_H__ */

View file

@ -7,8 +7,6 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
@ -28,6 +26,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_parent.h"
#include "intel_pcode.h"
#include "intel_pps.h"
#include "intel_psr.h"
@ -258,8 +257,9 @@ aux_ch_to_digital_port(struct intel_display *display,
return NULL;
}
static enum phy icl_aux_pw_to_phy(struct intel_display *display,
const struct i915_power_well *power_well)
static struct intel_encoder *
icl_aux_pw_to_encoder(struct intel_display *display,
const struct i915_power_well *power_well)
{
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(display, aux_ch);
@ -271,7 +271,23 @@ static enum phy icl_aux_pw_to_phy(struct intel_display *display,
* as HDMI-only and routed to a combo PHY, the encoder either won't be
* present at all or it will not have an aux_ch assigned.
*/
return dig_port ? intel_encoder_to_phy(&dig_port->base) : PHY_NONE;
return dig_port ? &dig_port->base : NULL;
}
static enum phy icl_aux_pw_to_phy(struct intel_display *display,
const struct i915_power_well *power_well)
{
struct intel_encoder *encoder = icl_aux_pw_to_encoder(display, power_well);
return encoder ? intel_encoder_to_phy(encoder) : PHY_NONE;
}
static bool icl_aux_pw_is_tc_phy(struct intel_display *display,
const struct i915_power_well *power_well)
{
struct intel_encoder *encoder = icl_aux_pw_to_encoder(display, power_well);
return encoder && intel_encoder_is_tc(encoder);
}
static void hsw_wait_for_power_well_enable(struct intel_display *display,
@ -570,9 +586,7 @@ static void
icl_aux_power_well_enable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(display, phy))
if (icl_aux_pw_is_tc_phy(display, power_well))
return icl_tc_phy_aux_power_well_enable(display, power_well);
else if (display->platform.icelake)
return icl_combo_phy_aux_power_well_enable(display,
@ -585,9 +599,7 @@ static void
icl_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(display, phy))
if (icl_aux_pw_is_tc_phy(display, power_well))
return hsw_power_well_disable(display, power_well);
else if (display->platform.icelake)
return icl_combo_phy_aux_power_well_disable(display,
@ -628,8 +640,6 @@ static bool hsw_power_well_enabled(struct intel_display *display,
static void assert_can_enable_dc9(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
drm_WARN_ONCE(display->drm,
(intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_DC9),
"DC9 already programmed to be enabled.\n");
@ -641,7 +651,7 @@ static void assert_can_enable_dc9(struct intel_display *display)
intel_de_read(display, HSW_PWR_WELL_CTL2) &
HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
"Power well 2 on.\n");
drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv),
drm_WARN_ONCE(display->drm, intel_parent_irq_enabled(display),
"Interrupts not disabled yet.\n");
/*
@ -655,9 +665,7 @@ static void assert_can_enable_dc9(struct intel_display *display)
static void assert_can_disable_dc9(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
drm_WARN_ONCE(display->drm, intel_irqs_enabled(dev_priv),
drm_WARN_ONCE(display->drm, intel_parent_irq_enabled(display),
"Interrupts not disabled yet.\n");
drm_WARN_ONCE(display->drm,
intel_de_read(display, DC_STATE_EN) &
@ -1281,12 +1289,10 @@ static void vlv_display_power_well_init(struct intel_display *display)
static void vlv_display_power_well_deinit(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
valleyview_disable_display_irqs(display);
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
intel_parent_irq_synchronize(display);
vlv_pps_reset_all(display);
@ -1852,7 +1858,7 @@ static void xelpdp_aux_power_well_enable(struct intel_display *display,
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
enum phy phy = icl_aux_pw_to_phy(display, power_well);
if (intel_phy_is_tc(display, phy))
if (icl_aux_pw_is_tc_phy(display, power_well))
icl_tc_port_assert_ref_held(display, power_well,
aux_ch_to_digital_port(display, aux_ch));
@ -1860,19 +1866,19 @@ static void xelpdp_aux_power_well_enable(struct intel_display *display,
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST);
/*
* The power status flag cannot be used to determine whether aux
* power wells have finished powering up. Instead we're
* expected to just wait a fixed 600us after raising the request
* bit.
*/
if (DISPLAY_VER(display) >= 35) {
if (HAS_LT_PHY(display)) {
if (intel_de_wait_for_set_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2))
drm_warn(display->drm,
"Timeout waiting for PHY %c AUX channel power to be up\n",
phy_name(phy));
} else {
/*
* The power status flag cannot be used to determine whether aux
* power wells have finished powering up. Instead we're
* expected to just wait a fixed 600us after raising the request
* bit.
*/
usleep_range(600, 1200);
}
}
@ -1887,7 +1893,7 @@ static void xelpdp_aux_power_well_disable(struct intel_display *display,
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
0);
if (DISPLAY_VER(display) >= 35) {
if (HAS_LT_PHY(display)) {
if (intel_de_wait_for_clear_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1))
drm_warn(display->drm,

View file

@ -882,6 +882,21 @@
#define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */
#define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id))
#define _UNDERRUN_DBG1_A 0x70064
#define _UNDERRUN_DBG1_B 0x71064
#define UNDERRUN_DBG1(pipe) _MMIO_PIPE(pipe, _UNDERRUN_DBG1_A, _UNDERRUN_DBG1_B)
#define UNDERRUN_DBUF_BLOCK_NOT_VALID_MASK REG_GENMASK(29, 24)
#define UNDERRUN_DDB_EMPTY_MASK REG_GENMASK(21, 16)
#define UNDERRUN_DBUF_NOT_FILLED_MASK REG_GENMASK(13, 8)
#define UNDERRUN_BELOW_WM0_MASK REG_GENMASK(5, 0)
#define _UNDERRUN_DBG2_A 0x70068
#define _UNDERRUN_DBG2_B 0x71068
#define UNDERRUN_DBG2(pipe) _MMIO_PIPE(pipe, _UNDERRUN_DBG2_A, _UNDERRUN_DBG2_B)
#define UNDERRUN_FRAME_LINE_COUNTERS_FROZEN REG_BIT(31)
#define UNDERRUN_PIPE_FRAME_COUNT_MASK REG_GENMASK(30, 20)
#define UNDERRUN_LINE_COUNT_MASK REG_GENMASK(19, 0)
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
@ -1416,6 +1431,7 @@
#define GEN12_DCPR_STATUS_1 _MMIO(0x46440)
#define XELPDP_PMDEMAND_INFLIGHT_STATUS REG_BIT(26)
#define XE3P_UNDERRUN_PKGC REG_BIT(21)
#define FUSE_STRAP _MMIO(0x42014)
#define ILK_INTERNAL_GRAPHICS_DISABLE REG_BIT(31)
@ -2349,8 +2365,13 @@ enum skl_power_gate {
#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
#define DDI_A_4_LANES REG_BIT(4)
#define DDI_PORT_WIDTH_MASK REG_GENMASK(3, 1)
#define DDI_PORT_WIDTH_ENCODE(width) ((width) == 3 ? 4 : (width) - 1)
#define DDI_PORT_WIDTH_DECODE(regval) ((regval) == 4 ? 3 : (regval) + 1)
#define DDI_PORT_WIDTH(width) REG_FIELD_PREP(DDI_PORT_WIDTH_MASK, \
((width) == 3 ? 4 : (width) - 1))
DDI_PORT_WIDTH_ENCODE(width))
#define DDI_PORT_WIDTH_GET(regval) DDI_PORT_WIDTH_DECODE(REG_FIELD_GET(DDI_PORT_WIDTH_MASK, \
(regval)))
#define DDI_PORT_WIDTH_SHIFT 1
#define DDI_INIT_DISPLAY_DETECTED REG_BIT(0)

View file

@ -6,13 +6,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include "i915_drv.h"
#include "intel_clock_gating.h"
#include "intel_cx0_phy.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_reset.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_hotplug.h"
#include "intel_pps.h"
@ -79,7 +79,6 @@ bool intel_display_reset_prepare(struct intel_display *display,
void intel_display_reset_finish(struct intel_display *display, bool test_only)
{
struct drm_i915_private *i915 = to_i915(display->drm);
struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
@ -107,7 +106,7 @@ void intel_display_reset_finish(struct intel_display *display, bool test_only)
*/
intel_pps_unlock_regs_wa(display);
intel_display_driver_init_hw(display);
intel_clock_gating_init(i915);
intel_clock_gating_init(display->drm);
intel_cx0_pll_power_save_wa(display);
intel_hpd_init(display);

View file

@ -3,38 +3,39 @@
* Copyright © 2023 Intel Corporation
*/
#include <linux/dma-fence.h>
#include <drm/drm_crtc.h>
#include <drm/drm_vblank.h>
#include "gt/intel_rps.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_display_core.h"
#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
#include "intel_parent.h"
struct wait_rps_boost {
struct wait_queue_entry wait;
struct drm_crtc *crtc;
struct i915_request *request;
struct dma_fence *fence;
};
static int do_rps_boost(struct wait_queue_entry *_wait,
unsigned mode, int sync, void *key)
{
struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
struct i915_request *rq = wait->request;
struct intel_display *display = to_intel_display(wait->crtc->dev);
/*
* If we missed the vblank, but the request is already running it
* is reasonable to assume that it will complete before the next
* vblank without our intervention, so leave RPS alone.
* vblank without our intervention, so leave RPS alone if not started.
*/
if (!i915_request_started(rq))
intel_rps_boost(rq);
i915_request_put(rq);
intel_parent_rps_boost_if_not_started(display, wait->fence);
dma_fence_put(wait->fence);
drm_crtc_vblank_put(wait->crtc);
@ -49,7 +50,7 @@ void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct intel_display *display = to_intel_display(crtc->dev);
struct wait_rps_boost *wait;
if (!dma_fence_is_i915(fence))
if (!intel_parent_rps_available(display))
return;
if (DISPLAY_VER(display) < 6)
@ -64,7 +65,7 @@ void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
return;
}
wait->request = to_request(dma_fence_get(fence));
wait->fence = dma_fence_get(fence);
wait->crtc = crtc;
wait->wait.func = do_rps_boost;
@ -77,12 +78,14 @@ void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive)
{
struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_parent_rps_available(display))
return;
if (state->rps_interactive == interactive)
return;
intel_rps_mark_interactive(&to_gt(i915)->rps, interactive);
intel_parent_rps_mark_interactive(display, interactive);
state->rps_interactive = interactive;
}
@ -102,7 +105,5 @@ void ilk_display_rps_disable(struct intel_display *display)
void ilk_display_rps_irq_handler(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
gen5_rps_irq_handler(&to_gt(i915)->rps);
intel_parent_rps_ilk_irq_handler(display);
}

View file

@ -13,7 +13,6 @@ struct drm_crtc;
struct intel_atomic_state;
struct intel_display;
#ifdef I915
void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence);
void intel_display_rps_mark_interactive(struct intel_display *display,
@ -22,25 +21,5 @@ void intel_display_rps_mark_interactive(struct intel_display *display,
void ilk_display_rps_enable(struct intel_display *display);
void ilk_display_rps_disable(struct intel_display *display);
void ilk_display_rps_irq_handler(struct intel_display *display);
#else
static inline void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence)
{
}
static inline void intel_display_rps_mark_interactive(struct intel_display *display,
struct intel_atomic_state *state,
bool interactive)
{
}
static inline void ilk_display_rps_enable(struct intel_display *display)
{
}
static inline void ilk_display_rps_disable(struct intel_display *display)
{
}
static inline void ilk_display_rps_irq_handler(struct intel_display *display)
{
}
#endif
#endif /* __INTEL_DISPLAY_RPS_H__ */

View file

@ -509,6 +509,12 @@ struct intel_hdcp {
bool force_hdcp14;
};
enum intel_panel_replay_dsc_support {
INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED,
INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY,
INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE,
};
struct intel_connector {
struct drm_connector base;
/*
@ -561,6 +567,30 @@ struct intel_connector {
} overall_throughput;
int max_line_width;
} dsc_branch_caps;
struct {
u8 dpcd[DP_PANEL_REPLAY_CAP_SIZE];
#define INTEL_PR_DPCD_INDEX(pr_dpcd_register) ((pr_dpcd_register) - DP_PANEL_REPLAY_CAP_SUPPORT)
bool support;
bool su_support;
enum intel_panel_replay_dsc_support dsc_support;
u16 su_w_granularity;
u16 su_y_granularity;
} panel_replay_caps;
struct {
u8 dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
bool support;
bool su_support;
u16 su_w_granularity;
u16 su_y_granularity;
u8 sync_latency;
} psr_caps;
} dp;
struct {
@ -955,12 +985,6 @@ struct intel_csc_matrix {
u16 postoff[3];
};
enum intel_panel_replay_dsc_support {
INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED,
INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY,
INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE,
};
struct scaler_filter_coeff {
u16 sign;
u16 exp;
@ -1152,6 +1176,8 @@ struct intel_crtc_state {
bool enable_psr2_su_region_et;
bool req_psr2_sdp_prior_scanline;
bool has_panel_replay;
bool link_off_after_as_sdp_when_pr_active;
bool disable_as_sdp_when_pr_active;
bool wm_level_disabled;
bool pkg_c_latency_used;
/* Only used for state verification. */
@ -1662,7 +1688,7 @@ struct intel_pps {
unsigned long last_power_on;
unsigned long last_backlight_off;
ktime_t panel_power_off_time;
intel_wakeref_t vdd_wakeref;
struct ref_tracker *vdd_wakeref;
union {
/*
@ -1716,14 +1742,12 @@ struct intel_psr {
bool active;
struct work_struct work;
unsigned int busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool sel_update_enabled;
bool psr2_sel_fetch_enabled;
bool psr2_sel_fetch_cff_enabled;
bool su_region_et_enabled;
bool req_psr2_sdp_prior_scanline;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
ktime_t last_exit;
bool sink_not_reliable;
@ -1732,8 +1756,6 @@ struct intel_psr {
u16 su_y_granularity;
bool source_panel_replay_support;
bool sink_panel_replay_support;
bool sink_panel_replay_su_support;
enum intel_panel_replay_dsc_support sink_panel_replay_dsc_support;
bool panel_replay_enabled;
u32 dc3co_exitline;
u32 dc3co_exit_delay;
@ -1760,9 +1782,6 @@ struct intel_dp {
bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
u8 pr_dpcd[DP_PANEL_REPLAY_CAP_SIZE];
#define INTEL_PR_DPCD_INDEX(pr_dpcd_register) ((pr_dpcd_register) - DP_PANEL_REPLAY_CAP_SUPPORT)
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
@ -1940,12 +1959,13 @@ struct intel_digital_port {
bool lane_reversal;
bool ddi_a_4_lanes;
bool release_cl2_override;
bool dedicated_external;
u8 max_lanes;
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
intel_wakeref_t ddi_io_wakeref;
intel_wakeref_t aux_wakeref;
struct ref_tracker *ddi_io_wakeref;
struct ref_tracker *aux_wakeref;
struct intel_tc_port *tc;

View file

@ -70,6 +70,10 @@ bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa,
return DISPLAY_VER(display) == 13;
case INTEL_DISPLAY_WA_22014263786:
return IS_DISPLAY_VERx100(display, 1100, 1400);
case INTEL_DISPLAY_WA_15018326506:
return display->platform.battlemage;
case INTEL_DISPLAY_WA_14025769978:
return DISPLAY_VER(display) == 35;
default:
drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name);
break;

View file

@ -26,6 +26,8 @@ enum intel_display_wa {
INTEL_DISPLAY_WA_16025573575,
INTEL_DISPLAY_WA_14011503117,
INTEL_DISPLAY_WA_22014263786,
INTEL_DISPLAY_WA_15018326506,
INTEL_DISPLAY_WA_14025769978,
};
bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name);

View file

@ -1322,7 +1322,7 @@ static void intel_dmc_runtime_pm_get(struct intel_display *display)
static void intel_dmc_runtime_pm_put(struct intel_display *display)
{
intel_wakeref_t wakeref __maybe_unused =
struct ref_tracker *wakeref __maybe_unused =
fetch_and_zero(&display->dmc.wakeref);
intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);

View file

@ -424,18 +424,44 @@ static int intel_dp_min_lane_count(struct intel_dp *intel_dp)
return 1;
}
int intel_dp_link_bw_overhead(int link_clock, int lane_count, int hdisplay,
int dsc_slice_count, int bpp_x16, unsigned long flags)
{
int overhead;
WARN_ON(flags & ~(DRM_DP_BW_OVERHEAD_MST | DRM_DP_BW_OVERHEAD_SSC_REF_CLK |
DRM_DP_BW_OVERHEAD_FEC));
if (drm_dp_is_uhbr_rate(link_clock))
flags |= DRM_DP_BW_OVERHEAD_UHBR;
if (dsc_slice_count)
flags |= DRM_DP_BW_OVERHEAD_DSC;
overhead = drm_dp_bw_overhead(lane_count, hdisplay,
dsc_slice_count,
bpp_x16,
flags);
/*
* TODO: clarify whether a minimum required by the fixed FEC overhead
* in the bspec audio programming sequence is required here.
*/
return max(overhead, intel_dp_bw_fec_overhead(flags & DRM_DP_BW_OVERHEAD_FEC));
}
/*
* The required data bandwidth for a mode with given pixel clock and bpp. This
* is the required net bandwidth independent of the data bandwidth efficiency.
*
* TODO: check if callers of this functions should use
* intel_dp_effective_data_rate() instead.
*/
int
intel_dp_link_required(int pixel_clock, int bpp)
int intel_dp_link_required(int link_clock, int lane_count,
int mode_clock, int mode_hdisplay,
int link_bpp_x16, unsigned long bw_overhead_flags)
{
/* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
return DIV_ROUND_UP(pixel_clock * bpp, 8);
int bw_overhead = intel_dp_link_bw_overhead(link_clock, lane_count, mode_hdisplay,
0, link_bpp_x16, bw_overhead_flags);
return intel_dp_effective_data_rate(mode_clock, link_bpp_x16, bw_overhead);
}
/**
@ -520,7 +546,8 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (intel_encoder_is_c10phy(encoder))
if (intel_encoder_is_c10phy(encoder) ||
display->platform.pantherlake_wildcatlake)
return 810000;
if (DISPLAY_VERx100(display) == 1401)
@ -1013,6 +1040,8 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int num_joined_pipes)
{
struct intel_display *display = to_intel_display(connector);
u32 sink_slice_count_mask =
drm_dp_dsc_sink_slice_count_mask(connector->dp.dsc_dpcd, false);
u8 min_slice_count, i;
int max_slice_width;
int tp_rgb_yuv444;
@ -1084,9 +1113,9 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
(!HAS_DSC_3ENGINES(display) || num_joined_pipes != 4))
continue;
if (test_slice_count >
drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, false))
break;
if (!(drm_dp_dsc_slice_count_to_mask(test_slice_count) &
sink_slice_count_mask))
continue;
/*
* Bigjoiner needs small joiner to be enabled.
@ -1103,8 +1132,14 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
return test_slice_count;
}
drm_dbg_kms(display->drm, "Unsupported Slice Count %d\n",
min_slice_count);
/* Print slice count 1,2,4,..24 if bit#0,1,3,..23 is set in the mask. */
sink_slice_count_mask <<= 1;
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Unsupported slice count (min: %d, sink supported: %*pbl)\n",
connector->base.base.id, connector->base.name,
min_slice_count,
(int)BITS_PER_TYPE(sink_slice_count_mask), &sink_slice_count_mask);
return 0;
}
@ -1226,7 +1261,7 @@ int intel_dp_min_bpp(enum intel_output_format output_format)
return 8 * 3;
}
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
int intel_dp_output_format_link_bpp_x16(enum intel_output_format output_format, int pipe_bpp)
{
/*
* bpp value was assumed to RGB format. And YCbCr 4:2:0 output
@ -1234,9 +1269,9 @@ int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
* of bytes of RGB pixel.
*/
if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
bpp /= 2;
pipe_bpp /= 2;
return bpp;
return fxp_q4_from_int(pipe_bpp);
}
static enum intel_output_format
@ -1252,8 +1287,8 @@ intel_dp_sink_format(struct intel_connector *connector,
}
static int
intel_dp_mode_min_output_bpp(struct intel_connector *connector,
const struct drm_display_mode *mode)
intel_dp_mode_min_link_bpp_x16(struct intel_connector *connector,
const struct drm_display_mode *mode)
{
enum intel_output_format output_format, sink_format;
@ -1261,7 +1296,8 @@ intel_dp_mode_min_output_bpp(struct intel_connector *connector,
output_format = intel_dp_output_format(connector, sink_format);
return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
return intel_dp_output_format_link_bpp_x16(output_format,
intel_dp_min_bpp(output_format));
}
static bool intel_dp_hdisplay_bad(struct intel_display *display,
@ -1333,11 +1369,11 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
/* If PCON supports FRL MODE, check FRL bandwidth constraints */
if (intel_dp->dfp.pcon_max_frl_bw) {
int link_bpp_x16 = intel_dp_mode_min_link_bpp_x16(connector, mode);
int target_bw;
int max_frl_bw;
int bpp = intel_dp_mode_min_output_bpp(connector, mode);
target_bw = bpp * target_clock;
target_bw = fxp_q4_to_int_roundup(link_bpp_x16) * target_clock;
max_frl_bw = intel_dp->dfp.pcon_max_frl_bw;
@ -1452,6 +1488,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
enum drm_mode_status status;
bool dsc = false;
int num_joined_pipes;
int link_bpp_x16;
status = intel_cpu_transcoder_mode_valid(display, mode);
if (status != MODE_OK)
@ -1494,8 +1531,10 @@ intel_dp_mode_valid(struct drm_connector *_connector,
max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock,
intel_dp_mode_min_output_bpp(connector, mode));
link_bpp_x16 = intel_dp_mode_min_link_bpp_x16(connector, mode);
mode_rate = intel_dp_link_required(max_link_clock, max_lanes,
target_clock, mode->hdisplay,
link_bpp_x16, 0);
if (intel_dp_has_dsc(connector)) {
int pipe_bpp;
@ -1802,14 +1841,13 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
const struct link_config_limits *limits)
{
int bpp, i, lane_count, clock = intel_dp_mode_clock(pipe_config, conn_state);
int mode_rate, link_rate, link_avail;
int link_rate, link_avail;
for (bpp = fxp_q4_to_int(limits->link.max_bpp_x16);
bpp >= fxp_q4_to_int(limits->link.min_bpp_x16);
bpp -= 2 * 3) {
int link_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
mode_rate = intel_dp_link_required(clock, link_bpp);
int link_bpp_x16 =
intel_dp_output_format_link_bpp_x16(pipe_config->output_format, bpp);
for (i = 0; i < intel_dp->num_common_rates; i++) {
link_rate = intel_dp_common_rate(intel_dp, i);
@ -1820,11 +1858,17 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
int mode_rate =
intel_dp_link_required(link_rate, lane_count,
clock, adjusted_mode->hdisplay,
link_bpp_x16, 0);
link_avail = intel_dp_max_link_data_rate(intel_dp,
link_rate,
lane_count);
if (mode_rate <= link_avail) {
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = bpp;
@ -1982,17 +2026,21 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
}
static bool is_bw_sufficient_for_dsc_config(int dsc_bpp_x16, u32 link_clock,
u32 lane_count, u32 mode_clock,
enum intel_output_format output_format,
int timeslots)
static bool is_bw_sufficient_for_dsc_config(struct intel_dp *intel_dp,
int link_clock, int lane_count,
int mode_clock, int mode_hdisplay,
int dsc_slice_count, int link_bpp_x16,
unsigned long bw_overhead_flags)
{
u32 available_bw, required_bw;
int available_bw;
int required_bw;
available_bw = (link_clock * lane_count * timeslots * 16) / 8;
required_bw = dsc_bpp_x16 * (intel_dp_mode_to_fec_clock(mode_clock));
available_bw = intel_dp_max_link_data_rate(intel_dp, link_clock, lane_count);
required_bw = intel_dp_link_required(link_clock, lane_count,
mode_clock, mode_hdisplay,
link_bpp_x16, bw_overhead_flags);
return available_bw > required_bw;
return available_bw >= required_bw;
}
static int dsc_compute_link_config(struct intel_dp *intel_dp,
@ -2038,10 +2086,16 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
if (ret)
continue;
} else {
if (!is_bw_sufficient_for_dsc_config(dsc_bpp_x16, link_rate,
lane_count, adjusted_mode->clock,
pipe_config->output_format,
timeslots))
unsigned long bw_overhead_flags =
pipe_config->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
if (!is_bw_sufficient_for_dsc_config(intel_dp,
link_rate, lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->hdisplay,
pipe_config->dsc.slice_count,
dsc_bpp_x16,
bw_overhead_flags))
continue;
}
@ -2192,24 +2246,17 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
const struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int output_bpp;
int min_bpp_x16, max_bpp_x16, bpp_step_x16;
int dsc_joiner_max_bpp;
int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
int link_bpp_x16;
int bpp_x16;
int ret;
dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(display, adjusted_mode->clock,
adjusted_mode->hdisplay,
num_joined_pipes);
max_bpp_x16 = min(fxp_q4_from_int(dsc_joiner_max_bpp), limits->link.max_bpp_x16);
max_bpp_x16 = limits->link.max_bpp_x16;
bpp_step_x16 = intel_dp_dsc_bpp_step_x16(connector);
/* Compressed BPP should be less than the Input DSC bpp */
output_bpp = intel_dp_output_bpp(pipe_config->output_format, pipe_bpp);
max_bpp_x16 = min(max_bpp_x16, fxp_q4_from_int(output_bpp) - bpp_step_x16);
link_bpp_x16 = intel_dp_output_format_link_bpp_x16(pipe_config->output_format, pipe_bpp);
max_bpp_x16 = min(max_bpp_x16, link_bpp_x16 - bpp_step_x16);
drm_WARN_ON(display->drm, !is_power_of_2(bpp_step_x16));
min_bpp_x16 = round_up(limits->link.min_bpp_x16, bpp_step_x16);
@ -2560,6 +2607,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
int throughput_max_bpp_x16;
int joiner_max_bpp;
dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
@ -2567,17 +2615,20 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->link.min_bpp_x16 = fxp_q4_from_int(dsc_min_bpp);
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
joiner_max_bpp =
get_max_compressed_bpp_with_joiner(display,
adjusted_mode->crtc_clock,
adjusted_mode->hdisplay,
intel_crtc_num_joined_pipes(crtc_state));
dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
crtc_state,
limits->pipe.max_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ?
min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
dsc_max_bpp = min(dsc_sink_max_bpp, dsc_src_max_bpp);
dsc_max_bpp = min(dsc_max_bpp, joiner_max_bpp);
max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp));
throughput_max_bpp_x16 = dsc_throughput_quirk_max_bpp_x16(connector, crtc_state);
throughput_max_bpp_x16 = clamp(throughput_max_bpp_x16,
limits->link.min_bpp_x16, max_link_bpp_x16);
if (throughput_max_bpp_x16 < max_link_bpp_x16) {
max_link_bpp_x16 = throughput_max_bpp_x16;
@ -2592,7 +2643,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->link.max_bpp_x16 = max_link_bpp_x16;
drm_dbg_kms(display->drm,
"[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d max link_bpp " FXP_Q4_FMT "\n",
"[ENCODER:%d:%s][CRTC:%d:%s] DP link limits: pixel clock %d kHz DSC %s max lanes %d max rate %d max pipe_bpp %d min link_bpp " FXP_Q4_FMT " max link_bpp " FXP_Q4_FMT "\n",
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
adjusted_mode->crtc_clock,
@ -2600,21 +2651,40 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
limits->max_lane_count,
limits->max_rate,
limits->pipe.max_bpp,
FXP_Q4_ARGS(limits->link.min_bpp_x16),
FXP_Q4_ARGS(limits->link.max_bpp_x16));
if (limits->link.min_bpp_x16 <= 0 ||
limits->link.min_bpp_x16 > limits->link.max_bpp_x16)
return false;
return true;
}
static void
intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
static bool
intel_dp_dsc_compute_pipe_bpp_limits(struct intel_connector *connector,
struct link_config_limits *limits)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_display *display = to_intel_display(connector);
const struct link_config_limits orig_limits = *limits;
int dsc_min_bpc = intel_dp_dsc_min_src_input_bpc();
int dsc_max_bpc = intel_dp_dsc_max_src_input_bpc(display);
limits->pipe.max_bpp = clamp(limits->pipe.max_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
limits->pipe.min_bpp = clamp(limits->pipe.min_bpp, dsc_min_bpc * 3, dsc_max_bpc * 3);
limits->pipe.min_bpp = max(limits->pipe.min_bpp, dsc_min_bpc * 3);
limits->pipe.max_bpp = min(limits->pipe.max_bpp, dsc_max_bpc * 3);
if (limits->pipe.min_bpp <= 0 ||
limits->pipe.min_bpp > limits->pipe.max_bpp) {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] Invalid DSC src/sink input BPP (src:%d-%d pipe:%d-%d)\n",
connector->base.base.id, connector->base.name,
dsc_min_bpc * 3, dsc_max_bpc * 3,
orig_limits.pipe.min_bpp, orig_limits.pipe.max_bpp);
return false;
}
return true;
}
bool
@ -2654,8 +2724,8 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
respect_downstream_limits);
}
if (dsc)
intel_dp_dsc_compute_pipe_bpp_limits(intel_dp, limits);
if (dsc && !intel_dp_dsc_compute_pipe_bpp_limits(connector, limits))
return false;
if (is_mst || intel_dp->use_max_params) {
/*
@ -2686,11 +2756,13 @@ int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp = crtc_state->dsc.compression_enable ?
fxp_q4_to_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
crtc_state->pipe_bpp;
int link_bpp_x16 = crtc_state->dsc.compression_enable ?
crtc_state->dsc.compressed_bpp_x16 :
fxp_q4_from_int(crtc_state->pipe_bpp);
return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
return intel_dp_link_required(crtc_state->port_clock, crtc_state->lane_count,
adjusted_mode->crtc_clock, adjusted_mode->hdisplay,
link_bpp_x16, 0);
}
bool intel_dp_joiner_needs_dsc(struct intel_display *display,
@ -3259,8 +3331,8 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
if (crtc_state->dsc.compression_enable)
link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
else
link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
crtc_state->pipe_bpp));
link_bpp_x16 = intel_dp_output_format_link_bpp_x16(crtc_state->output_format,
crtc_state->pipe_bpp);
/* Calculate min Hblank Link Layer Symbol Cycle Count for 8b/10b MST & 128b/132b */
hactive_sym_cycles = drm_dp_link_symbol_cycles(max_lane_count,
@ -3370,8 +3442,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (pipe_config->dsc.compression_enable)
link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
else
link_bpp_x16 = fxp_q4_from_int(intel_dp_output_bpp(pipe_config->output_format,
pipe_config->pipe_bpp));
link_bpp_x16 = intel_dp_output_format_link_bpp_x16(pipe_config->output_format,
pipe_config->pipe_bpp);
if (intel_dp->mso_link_count) {
int n = intel_dp->mso_link_count;
@ -4562,7 +4634,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
*/
intel_psr_init_dpcd(intel_dp);
intel_psr_init_dpcd(intel_dp, connector);
intel_edp_set_sink_rates(intel_dp);
intel_dp_set_max_sink_lane_count(intel_dp);
@ -5791,9 +5863,8 @@ bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_glitch_free = intel_tc_port_handles_hpd_glitches(dig_port);
bool is_connected = false;
intel_wakeref_t wakeref;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE) {
poll_timeout_us(is_connected = dig_port->connected(encoder),
is_connected || is_glitch_free,
30, 4000, false);
@ -6049,10 +6120,19 @@ intel_dp_detect(struct drm_connector *_connector,
if (status == connector_status_disconnected) {
intel_dp_test_reset(intel_dp);
/*
* FIXME: Resetting these caps here cause
* state computation fail if the connector need to be
* modeset after sink disconnect. Move resetting them
* to where new sink is connected.
*/
memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
memset(connector->dp.panel_replay_caps.dpcd, 0,
sizeof(connector->dp.panel_replay_caps.dpcd));
intel_dp->psr.sink_panel_replay_support = false;
intel_dp->psr.sink_panel_replay_su_support = false;
intel_dp->psr.sink_panel_replay_dsc_support =
connector->dp.panel_replay_caps.support = false;
connector->dp.panel_replay_caps.su_support = false;
connector->dp.panel_replay_caps.dsc_support =
INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
intel_dp_mst_disconnect(intel_dp);
@ -6075,7 +6155,7 @@ intel_dp_detect(struct drm_connector *_connector,
connector->base.epoch_counter++;
if (!intel_dp_is_edp(intel_dp))
intel_psr_init_dpcd(intel_dp);
intel_psr_init_dpcd(intel_dp, connector);
intel_dp_detect_dsc_caps(intel_dp, connector);

View file

@ -117,7 +117,11 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
bool intel_dp_source_supports_tps3(struct intel_display *display);
bool intel_dp_source_supports_tps4(struct intel_display *display);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_link_bw_overhead(int link_clock, int lane_count, int hdisplay,
int dsc_slice_count, int bpp_x16, unsigned long flags);
int intel_dp_link_required(int link_clock, int lane_count,
int mode_clock, int mode_hdisplay,
int link_bpp_x16, unsigned long bw_overhead_flags);
int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
int bw_overhead);
int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
@ -193,7 +197,8 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
void intel_dp_invalidate_source_oui(struct intel_dp *intel_dp);
void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
int intel_dp_output_format_link_bpp_x16(enum intel_output_format output_format,
int pipe_bpp);
bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state,

View file

@ -6,6 +6,7 @@
#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display_jiffies.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dp.h"
@ -60,16 +61,17 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
const unsigned int timeout_ms = 10;
u32 status;
int ret;
bool done;
ret = intel_de_wait_ms(display, ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY, 0,
timeout_ms, &status);
#define C (((status = intel_de_read_notrace(display, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
done = wait_event_timeout(display->gmbus.wait_queue, C,
msecs_to_jiffies_timeout(timeout_ms));
if (ret == -ETIMEDOUT)
if (!done)
drm_err(display->drm,
"%s: did not complete or timeout within %ums (status 0x%08x)\n",
intel_dp->aux.name, timeout_ms, status);
#undef C
return status;
}
@ -245,8 +247,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain;
intel_wakeref_t aux_wakeref;
intel_wakeref_t pps_wakeref = NULL;
struct ref_tracker *aux_wakeref;
struct ref_tracker *pps_wakeref = NULL;
int i, ret, recv_bytes;
int try, clock = 0;
u32 status;

View file

@ -1195,7 +1195,9 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
intel_panel_preferred_fixed_mode(intel_dp->attached_connector);
int mode_rate, max_rate;
mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
mode_rate = intel_dp_link_required(link_rate, lane_count,
fixed_mode->clock, fixed_mode->hdisplay,
fxp_q4_from_int(18), 0);
max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
if (mode_rate > max_rate)
return false;

View file

@ -180,26 +180,16 @@ static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
int overhead;
flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
if (dsc_slice_count)
flags |= DRM_DP_BW_OVERHEAD_DSC;
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
adjusted_mode->hdisplay,
dsc_slice_count,
bpp_x16,
flags);
/*
* TODO: clarify whether a minimum required by the fixed FEC overhead
* in the bspec audio programming sequence is required here.
*/
return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
return intel_dp_link_bw_overhead(crtc_state->port_clock,
crtc_state->lane_count,
adjusted_mode->hdisplay,
dsc_slice_count,
bpp_x16,
flags);
}
static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
@ -344,8 +334,8 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
}
link_bpp_x16 = dsc ? bpp_x16 :
fxp_q4_from_int(intel_dp_output_bpp(crtc_state->output_format,
fxp_q4_to_int(bpp_x16)));
intel_dp_output_format_link_bpp_x16(crtc_state->output_format,
fxp_q4_to_int(bpp_x16));
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state,
false, dsc_slice_count, link_bpp_x16);
@ -1468,6 +1458,8 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
const int min_bpp = 18;
int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
unsigned long bw_overhead_flags =
DRM_DP_BW_OVERHEAD_MST | DRM_DP_BW_OVERHEAD_SSC_REF_CLK;
int ret;
bool dsc = false;
u16 dsc_max_compressed_bpp = 0;
@ -1499,7 +1491,10 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
max_rate = intel_dp_max_link_data_rate(intel_dp,
max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, min_bpp);
mode_rate = intel_dp_link_required(max_link_clock, max_lanes,
mode->clock, mode->hdisplay,
fxp_q4_from_int(min_bpp),
bw_overhead_flags);
/*
* TODO:

View file

@ -1212,27 +1212,6 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
int ret;
ret = intel_cx0pll_calc_state(crtc_state, encoder);
if (ret)
return ret;
/* TODO: Do the readback via intel_dpll_compute() */
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->dpll_hw_state.cx0pll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
return 0;
}
static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@ -1719,7 +1698,8 @@ static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = {
};
static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
.crtc_compute_clock = mtl_crtc_compute_clock,
.crtc_compute_clock = hsw_crtc_compute_clock,
.crtc_get_dpll = hsw_crtc_get_dpll,
};
static const struct intel_dpll_global_funcs dg2_dpll_funcs = {

View file

@ -203,6 +203,22 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
return tc_port - TC_PORT_1 + DPLL_ID_ICL_MGPLL1;
}
enum intel_dpll_id mtl_port_to_pll_id(struct intel_display *display, enum port port)
{
if (port >= PORT_TC1)
return icl_tc_port_to_pll_id(intel_port_to_tc(display, port));
switch (port) {
case PORT_A:
return DPLL_ID_ICL_DPLL0;
case PORT_B:
return DPLL_ID_ICL_DPLL1;
default:
MISSING_CASE(port);
return DPLL_ID_ICL_DPLL0;
}
}
static i915_reg_t
intel_combo_pll_enable_reg(struct intel_display *display,
struct intel_dpll *pll)
@ -531,7 +547,7 @@ static bool ibx_pch_dpll_get_hw_state(struct intel_display *display,
{
struct i9xx_dpll_hw_state *hw_state = &dpll_hw_state->i9xx;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
u32 val;
wakeref = intel_display_power_get_if_enabled(display,
@ -752,7 +768,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct intel_display *display,
{
struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
u32 val;
wakeref = intel_display_power_get_if_enabled(display,
@ -773,7 +789,7 @@ static bool hsw_ddi_spll_get_hw_state(struct intel_display *display,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct hsw_dpll_hw_state *hw_state = &dpll_hw_state->hsw;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
u32 val;
wakeref = intel_display_power_get_if_enabled(display,
@ -1431,7 +1447,7 @@ static bool skl_ddi_pll_get_hw_state(struct intel_display *display,
struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
u32 val;
@ -1469,7 +1485,7 @@ static bool skl_ddi_dpll0_get_hw_state(struct intel_display *display,
struct skl_dpll_hw_state *hw_state = &dpll_hw_state->skl;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
u32 val;
bool ret;
@ -2172,7 +2188,7 @@ static bool bxt_ddi_pll_get_hw_state(struct intel_display *display,
{
struct bxt_dpll_hw_state *hw_state = &dpll_hw_state->bxt;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
enum dpio_phy phy;
enum dpio_channel ch;
u32 val;
@ -3490,6 +3506,37 @@ err_unreference_tbt_pll:
return ret;
}
/*
* Get the PLL for either a port using a C10 PHY PLL, or for a port using a
* C20 PHY PLL in the cases of:
* - BMG port A/B
* - PTL port B eDP over TypeC PHY
*/
static int mtl_get_non_tc_phy_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
enum intel_dpll_id pll_id = mtl_port_to_pll_id(display, encoder->port);
port_dpll->pll = intel_find_dpll(state, crtc,
&port_dpll->hw_state,
BIT(pll_id));
if (!port_dpll->pll)
return -EINVAL;
intel_reference_dpll(state, crtc,
port_dpll->pll, &port_dpll->hw_state);
icl_update_active_dpll(state, crtc, encoder);
return 0;
}
static int icl_compute_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@ -3551,7 +3598,7 @@ static bool mg_pll_get_hw_state(struct intel_display *display,
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret = false;
u32 val;
@ -3618,7 +3665,7 @@ static bool dkl_pll_get_hw_state(struct intel_display *display,
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
enum tc_port tc_port = icl_pll_id_to_tc_port(id);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret = false;
u32 val;
@ -3690,7 +3737,7 @@ static bool icl_pll_get_hw_state(struct intel_display *display,
{
struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret = false;
u32 val;
@ -3753,9 +3800,9 @@ static bool combo_pll_get_hw_state(struct intel_display *display,
return icl_pll_get_hw_state(display, pll, dpll_hw_state, enable_reg);
}
static bool tbt_pll_get_hw_state(struct intel_display *display,
struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
static bool icl_tbt_pll_get_hw_state(struct intel_display *display,
struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
return icl_pll_get_hw_state(display, pll, dpll_hw_state, TBT_PLL_ENABLE);
}
@ -3984,9 +4031,9 @@ static void combo_pll_enable(struct intel_display *display,
/* DVFS post sequence would be here. See the comment above. */
}
static void tbt_pll_enable(struct intel_display *display,
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
static void icl_tbt_pll_enable(struct intel_display *display,
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
const struct icl_dpll_hw_state *hw_state = &dpll_hw_state->icl;
@ -4069,8 +4116,8 @@ static void combo_pll_disable(struct intel_display *display,
icl_pll_disable(display, pll, enable_reg);
}
static void tbt_pll_disable(struct intel_display *display,
struct intel_dpll *pll)
static void icl_tbt_pll_disable(struct intel_display *display,
struct intel_dpll *pll)
{
icl_pll_disable(display, pll, TBT_PLL_ENABLE);
}
@ -4142,10 +4189,10 @@ static const struct intel_dpll_funcs combo_pll_funcs = {
.get_freq = icl_ddi_combo_pll_get_freq,
};
static const struct intel_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
static const struct intel_dpll_funcs icl_tbt_pll_funcs = {
.enable = icl_tbt_pll_enable,
.disable = icl_tbt_pll_disable,
.get_hw_state = icl_tbt_pll_get_hw_state,
.get_freq = icl_ddi_tbt_pll_get_freq,
};
@ -4159,7 +4206,7 @@ static const struct intel_dpll_funcs mg_pll_funcs = {
static const struct dpll_info icl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
{ .name = "TBT PLL", .funcs = &icl_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
.is_alt_port_dpll = true, },
{ .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
@ -4207,7 +4254,7 @@ static const struct intel_dpll_funcs dkl_pll_funcs = {
static const struct dpll_info tgl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
{ .name = "TBT PLL", .funcs = &icl_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
.is_alt_port_dpll = true, },
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
@ -4285,7 +4332,7 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
static const struct dpll_info adlp_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
{ .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
{ .name = "TBT PLL", .funcs = &icl_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
.is_alt_port_dpll = true, },
{ .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
@ -4305,6 +4352,224 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
.compare_hw_state = icl_compare_hw_state,
};
static struct intel_encoder *get_intel_encoder(struct intel_display *display,
const struct intel_dpll *pll)
{
struct intel_encoder *encoder;
enum intel_dpll_id mtl_id;
for_each_intel_encoder(display->drm, encoder) {
mtl_id = mtl_port_to_pll_id(display, encoder->port);
if (mtl_id == pll->info->id)
return encoder;
}
return NULL;
}
static bool mtl_pll_get_hw_state(struct intel_display *display,
struct intel_dpll *pll,
struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_encoder *encoder = get_intel_encoder(display, pll);
if (!encoder)
return false;
return intel_cx0pll_readout_hw_state(encoder, &dpll_hw_state->cx0pll);
}
static int mtl_pll_get_freq(struct intel_display *display,
const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_encoder *encoder = get_intel_encoder(display, pll);
if (drm_WARN_ON(display->drm, !encoder))
return -EINVAL;
return intel_cx0pll_calc_port_clock(encoder, &dpll_hw_state->cx0pll);
}
static void mtl_pll_enable(struct intel_display *display,
struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
struct intel_encoder *encoder = get_intel_encoder(display, pll);
if (drm_WARN_ON(display->drm, !encoder))
return;
intel_mtl_pll_enable(encoder, pll, dpll_hw_state);
}
static void mtl_pll_disable(struct intel_display *display,
struct intel_dpll *pll)
{
struct intel_encoder *encoder = get_intel_encoder(display, pll);
if (drm_WARN_ON(display->drm, !encoder))
return;
intel_mtl_pll_disable(encoder);
}
static const struct intel_dpll_funcs mtl_pll_funcs = {
.enable = mtl_pll_enable,
.disable = mtl_pll_disable,
.get_hw_state = mtl_pll_get_hw_state,
.get_freq = mtl_pll_get_freq,
};
static void mtl_tbt_pll_enable(struct intel_display *display,
struct intel_dpll *pll,
const struct intel_dpll_hw_state *hw_state)
{
}
static void mtl_tbt_pll_disable(struct intel_display *display,
struct intel_dpll *pll)
{
}
static int mtl_tbt_pll_get_freq(struct intel_display *display,
const struct intel_dpll *pll,
const struct intel_dpll_hw_state *dpll_hw_state)
{
/*
* The PLL outputs multiple frequencies at the same time, selection is
* made at DDI clock mux level.
*/
drm_WARN_ON(display->drm, 1);
return 0;
}
static const struct intel_dpll_funcs mtl_tbt_pll_funcs = {
.enable = mtl_tbt_pll_enable,
.disable = mtl_tbt_pll_disable,
.get_hw_state = intel_mtl_tbt_pll_readout_hw_state,
.get_freq = mtl_tbt_pll_get_freq,
};
static const struct dpll_info mtl_plls[] = {
{ .name = "DPLL 0", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
{ .name = "TBT PLL", .funcs = &mtl_tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
.is_alt_port_dpll = true, .always_on = true },
{ .name = "TC PLL 1", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
{ .name = "TC PLL 2", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
{ .name = "TC PLL 3", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
{ .name = "TC PLL 4", .funcs = &mtl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
{}
};
/*
* Compute the state for either a C10 PHY PLL, or in the case of the PTL port B,
* eDP on TypeC PHY case for a C20 PHY PLL.
*/
static int mtl_compute_non_tc_phy_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
int ret;
ret = intel_cx0pll_calc_state(crtc_state, encoder, &port_dpll->hw_state);
if (ret)
return ret;
/* this is mainly for the fastset check */
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder,
&port_dpll->hw_state.cx0pll);
return 0;
}
static int mtl_compute_tc_phy_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll;
int ret;
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
intel_mtl_tbt_pll_calc_state(&port_dpll->hw_state);
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
ret = intel_cx0pll_calc_state(crtc_state, encoder, &port_dpll->hw_state);
if (ret)
return ret;
/* this is mainly for the fastset check */
if (old_crtc_state->intel_dpll &&
old_crtc_state->intel_dpll->info->id == DPLL_ID_ICL_TBTPLL)
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
else
icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder,
&port_dpll->hw_state.cx0pll);
return 0;
}
static int mtl_compute_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
if (intel_encoder_is_tc(encoder))
return mtl_compute_tc_phy_dplls(state, crtc, encoder);
else
return mtl_compute_non_tc_phy_dpll(state, crtc, encoder);
}
static int mtl_get_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
if (intel_encoder_is_tc(encoder))
return icl_get_tc_phy_dplls(state, crtc, encoder);
else
return mtl_get_non_tc_phy_dpll(state, crtc, encoder);
}
static void mtl_dump_hw_state(struct drm_printer *p,
const struct intel_dpll_hw_state *dpll_hw_state)
{
intel_cx0pll_dump_hw_state(p, &dpll_hw_state->cx0pll);
}
static bool mtl_compare_hw_state(const struct intel_dpll_hw_state *_a,
const struct intel_dpll_hw_state *_b)
{
const struct intel_cx0pll_state *a = &_a->cx0pll;
const struct intel_cx0pll_state *b = &_b->cx0pll;
return intel_cx0pll_compare_hw_state(a, b);
}
static const struct intel_dpll_mgr mtl_pll_mgr = {
.dpll_info = mtl_plls,
.compute_dplls = mtl_compute_dplls,
.get_dplls = mtl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
.update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = mtl_dump_hw_state,
.compare_hw_state = mtl_compare_hw_state,
};
/**
* intel_dpll_init - Initialize DPLLs
* @display: intel_display device
@ -4319,9 +4584,11 @@ void intel_dpll_init(struct intel_display *display)
mutex_init(&display->dpll.lock);
if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
/* No shared DPLLs on DG2; port PLLs are part of the PHY */
if (DISPLAY_VER(display) >= 35 || display->platform.dg2)
/* No shared DPLLs on NVL or DG2; port PLLs are part of the PHY */
dpll_mgr = NULL;
else if (DISPLAY_VER(display) >= 14)
dpll_mgr = &mtl_pll_mgr;
else if (display->platform.alderlake_p)
dpll_mgr = &adlp_pll_mgr;
else if (display->platform.alderlake_s)
@ -4675,11 +4942,18 @@ verify_single_dpll_state(struct intel_display *display,
"%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
pll->info->name, pipe_mask, pll->state.pipe_mask);
INTEL_DISPLAY_STATE_WARN(display,
pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"%s: pll hw state mismatch\n",
pll->info->name);
if (INTEL_DISPLAY_STATE_WARN(display,
pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"%s: pll hw state mismatch\n",
pll->info->name)) {
struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
drm_printf(&p, "PLL %s HW state:\n", pll->info->name);
intel_dpll_dump_hw_state(display, &p, &dpll_hw_state);
drm_printf(&p, "PLL %s SW state:\n", pll->info->name);
intel_dpll_dump_hw_state(display, &p, &pll->state.hw_state);
}
}
static bool has_alt_port_dpll(const struct intel_dpll *old_pll,

View file

@ -28,7 +28,6 @@
#include <linux/types.h>
#include "intel_display_power.h"
#include "intel_wakeref.h"
#define for_each_dpll(__display, __pll, __i) \
for ((__i) = 0; (__i) < (__display)->dpll.num_dpll && \
@ -42,6 +41,7 @@ struct intel_crtc_state;
struct intel_dpll_funcs;
struct intel_encoder;
struct intel_shared_dpll;
struct ref_tracker;
/**
* enum intel_dpll_id - possible DPLL ids
@ -255,6 +255,11 @@ struct intel_c20pll_state {
u16 mplla[10];
u16 mpllb[11];
};
struct intel_c20pll_vdr_state {
u8 custom_width;
u8 serdes_rate;
u8 hdmi_rate;
} vdr;
};
struct intel_cx0pll_state {
@ -262,6 +267,7 @@ struct intel_cx0pll_state {
struct intel_c10pll_state c10;
struct intel_c20pll_state c20;
};
int lane_count;
bool ssc_enabled;
bool use_c10;
bool tbt_mode;
@ -390,7 +396,7 @@ struct intel_dpll {
* @wakeref: In some platforms a device-level runtime pm reference may
* need to be grabbed to disable DC states while this DPLL is enabled
*/
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
};
#define SKL_DPLL0 0
@ -444,6 +450,7 @@ bool intel_dpll_compare_hw_state(struct intel_display *display,
const struct intel_dpll_hw_state *a,
const struct intel_dpll_hw_state *b);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
enum intel_dpll_id mtl_port_to_pll_id(struct intel_display *display, enum port port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
void intel_dpll_state_verify(struct intel_atomic_state *state,

View file

@ -8,11 +8,10 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include "../display/intel_display_core.h" /* FIXME */
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
#include "intel_display_core.h"
#include "intel_display_utils.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
@ -57,14 +56,17 @@ const char *intel_dram_type_str(enum intel_dram_type type)
#undef DRAM_TYPE_STR
static enum intel_dram_type pnv_dram_type(struct drm_i915_private *i915)
static enum intel_dram_type pnv_dram_type(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3 ?
INTEL_DRAM_DDR3 : INTEL_DRAM_DDR2;
}
static unsigned int pnv_mem_freq(struct drm_i915_private *dev_priv)
static unsigned int pnv_mem_freq(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 tmp;
tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG);
@ -81,8 +83,9 @@ static unsigned int pnv_mem_freq(struct drm_i915_private *dev_priv)
return 0;
}
static unsigned int ilk_mem_freq(struct drm_i915_private *dev_priv)
static unsigned int ilk_mem_freq(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
u16 ddrpll;
ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1);
@ -96,19 +99,19 @@ static unsigned int ilk_mem_freq(struct drm_i915_private *dev_priv)
case 0x18:
return 1600000;
default:
drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n",
ddrpll & 0xff);
drm_dbg_kms(display->drm, "unknown memory frequency 0x%02x\n",
ddrpll & 0xff);
return 0;
}
}
static unsigned int chv_mem_freq(struct drm_i915_private *i915)
static unsigned int chv_mem_freq(struct intel_display *display)
{
u32 val;
vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_CCK));
val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_CCK, CCK_FUSE_REG);
vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_CCK));
vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK));
val = vlv_iosf_sb_read(display->drm, VLV_IOSF_SB_CCK, CCK_FUSE_REG);
vlv_iosf_sb_put(display->drm, BIT(VLV_IOSF_SB_CCK));
switch ((val >> 2) & 0x7) {
case 3:
@ -118,13 +121,13 @@ static unsigned int chv_mem_freq(struct drm_i915_private *i915)
}
}
static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
static unsigned int vlv_mem_freq(struct intel_display *display)
{
u32 val;
vlv_iosf_sb_get(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
val = vlv_iosf_sb_read(&i915->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
vlv_iosf_sb_put(&i915->drm, BIT(VLV_IOSF_SB_PUNIT));
vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_PUNIT));
val = vlv_iosf_sb_read(display->drm, VLV_IOSF_SB_PUNIT, PUNIT_REG_GPU_FREQ_STS);
vlv_iosf_sb_put(display->drm, BIT(VLV_IOSF_SB_PUNIT));
switch ((val >> 6) & 3) {
case 0:
@ -139,22 +142,23 @@ static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
return 0;
}
unsigned int intel_mem_freq(struct drm_i915_private *i915)
unsigned int intel_mem_freq(struct intel_display *display)
{
if (IS_PINEVIEW(i915))
return pnv_mem_freq(i915);
else if (GRAPHICS_VER(i915) == 5)
return ilk_mem_freq(i915);
else if (IS_CHERRYVIEW(i915))
return chv_mem_freq(i915);
else if (IS_VALLEYVIEW(i915))
return vlv_mem_freq(i915);
if (display->platform.pineview)
return pnv_mem_freq(display);
else if (DISPLAY_VER(display) == 5)
return ilk_mem_freq(display);
else if (display->platform.cherryview)
return chv_mem_freq(display);
else if (display->platform.valleyview)
return vlv_mem_freq(display);
else
return 0;
}
static unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
static unsigned int i9xx_fsb_freq(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
u32 fsb;
/*
@ -167,7 +171,7 @@ static unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
*/
fsb = intel_uncore_read(&i915->uncore, CLKCFG) & CLKCFG_FSB_MASK;
if (IS_PINEVIEW(i915) || IS_MOBILE(i915)) {
if (display->platform.pineview || display->platform.mobile) {
switch (fsb) {
case CLKCFG_FSB_400:
return 400000;
@ -208,8 +212,9 @@ static unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
}
}
static unsigned int ilk_fsb_freq(struct drm_i915_private *dev_priv)
static unsigned int ilk_fsb_freq(struct intel_display *display)
{
struct drm_i915_private *dev_priv = to_i915(display->drm);
u16 fsb;
fsb = intel_uncore_read16(&dev_priv->uncore, CSIPLL0) & 0x3ff;
@ -230,33 +235,33 @@ static unsigned int ilk_fsb_freq(struct drm_i915_private *dev_priv)
case 0x018:
return 6400000;
default:
drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", fsb);
drm_dbg_kms(display->drm, "unknown fsb frequency 0x%04x\n", fsb);
return 0;
}
}
unsigned int intel_fsb_freq(struct drm_i915_private *i915)
unsigned int intel_fsb_freq(struct intel_display *display)
{
if (GRAPHICS_VER(i915) == 5)
return ilk_fsb_freq(i915);
else if (GRAPHICS_VER(i915) == 3 || GRAPHICS_VER(i915) == 4)
return i9xx_fsb_freq(i915);
if (DISPLAY_VER(display) == 5)
return ilk_fsb_freq(display);
else if (IS_DISPLAY_VER(display, 3, 4))
return i9xx_fsb_freq(display);
else
return 0;
}
static int i915_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
static int i915_get_dram_info(struct intel_display *display, struct dram_info *dram_info)
{
dram_info->fsb_freq = intel_fsb_freq(i915);
dram_info->fsb_freq = intel_fsb_freq(display);
if (dram_info->fsb_freq)
drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", dram_info->fsb_freq);
drm_dbg_kms(display->drm, "FSB frequency: %d kHz\n", dram_info->fsb_freq);
dram_info->mem_freq = intel_mem_freq(i915);
dram_info->mem_freq = intel_mem_freq(display);
if (dram_info->mem_freq)
drm_dbg(&i915->drm, "DDR speed: %d kHz\n", dram_info->mem_freq);
drm_dbg_kms(display->drm, "DDR speed: %d kHz\n", dram_info->mem_freq);
if (IS_PINEVIEW(i915))
dram_info->type = pnv_dram_type(i915);
if (display->platform.pineview)
dram_info->type = pnv_dram_type(display);
return 0;
}
@ -267,69 +272,121 @@ static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
}
/* Returns total Gb for the whole DIMM */
static int skl_get_dimm_size(u16 val)
static int skl_get_dimm_s_size(u32 val)
{
return (val & SKL_DRAM_SIZE_MASK) * 8;
return REG_FIELD_GET(SKL_DIMM_S_SIZE_MASK, val) * 8;
}
static int skl_get_dimm_width(u16 val)
static int skl_get_dimm_l_size(u32 val)
{
if (skl_get_dimm_size(val) == 0)
return REG_FIELD_GET(SKL_DIMM_L_SIZE_MASK, val) * 8;
}
static int skl_get_dimm_s_width(u32 val)
{
if (skl_get_dimm_s_size(val) == 0)
return 0;
switch (val & SKL_DRAM_WIDTH_MASK) {
case SKL_DRAM_WIDTH_X8:
case SKL_DRAM_WIDTH_X16:
case SKL_DRAM_WIDTH_X32:
val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
return 8 << val;
switch (val & SKL_DIMM_S_WIDTH_MASK) {
case SKL_DIMM_S_WIDTH_X8:
case SKL_DIMM_S_WIDTH_X16:
case SKL_DIMM_S_WIDTH_X32:
return 8 << REG_FIELD_GET(SKL_DIMM_S_WIDTH_MASK, val);
default:
MISSING_CASE(val);
return 0;
}
}
static int skl_get_dimm_ranks(u16 val)
static int skl_get_dimm_l_width(u32 val)
{
if (skl_get_dimm_size(val) == 0)
if (skl_get_dimm_l_size(val) == 0)
return 0;
val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
switch (val & SKL_DIMM_L_WIDTH_MASK) {
case SKL_DIMM_L_WIDTH_X8:
case SKL_DIMM_L_WIDTH_X16:
case SKL_DIMM_L_WIDTH_X32:
return 8 << REG_FIELD_GET(SKL_DIMM_L_WIDTH_MASK, val);
default:
MISSING_CASE(val);
return 0;
}
}
return val + 1;
static int skl_get_dimm_s_ranks(u32 val)
{
if (skl_get_dimm_s_size(val) == 0)
return 0;
return REG_FIELD_GET(SKL_DIMM_S_RANK_MASK, val) + 1;
}
static int skl_get_dimm_l_ranks(u32 val)
{
if (skl_get_dimm_l_size(val) == 0)
return 0;
return REG_FIELD_GET(SKL_DIMM_L_RANK_MASK, val) + 1;
}
/* Returns total Gb for the whole DIMM */
static int icl_get_dimm_size(u16 val)
static int icl_get_dimm_s_size(u32 val)
{
return (val & ICL_DRAM_SIZE_MASK) * 8 / 2;
return REG_FIELD_GET(ICL_DIMM_S_SIZE_MASK, val) * 8 / 2;
}
static int icl_get_dimm_width(u16 val)
static int icl_get_dimm_l_size(u32 val)
{
if (icl_get_dimm_size(val) == 0)
return REG_FIELD_GET(ICL_DIMM_L_SIZE_MASK, val) * 8 / 2;
}
static int icl_get_dimm_s_width(u32 val)
{
if (icl_get_dimm_s_size(val) == 0)
return 0;
switch (val & ICL_DRAM_WIDTH_MASK) {
case ICL_DRAM_WIDTH_X8:
case ICL_DRAM_WIDTH_X16:
case ICL_DRAM_WIDTH_X32:
val = (val & ICL_DRAM_WIDTH_MASK) >> ICL_DRAM_WIDTH_SHIFT;
return 8 << val;
switch (val & ICL_DIMM_S_WIDTH_MASK) {
case ICL_DIMM_S_WIDTH_X8:
case ICL_DIMM_S_WIDTH_X16:
case ICL_DIMM_S_WIDTH_X32:
return 8 << REG_FIELD_GET(ICL_DIMM_S_WIDTH_MASK, val);
default:
MISSING_CASE(val);
return 0;
}
}
static int icl_get_dimm_ranks(u16 val)
static int icl_get_dimm_l_width(u32 val)
{
if (icl_get_dimm_size(val) == 0)
if (icl_get_dimm_l_size(val) == 0)
return 0;
val = (val & ICL_DRAM_RANK_MASK) >> ICL_DRAM_RANK_SHIFT;
switch (val & ICL_DIMM_L_WIDTH_MASK) {
case ICL_DIMM_L_WIDTH_X8:
case ICL_DIMM_L_WIDTH_X16:
case ICL_DIMM_L_WIDTH_X32:
return 8 << REG_FIELD_GET(ICL_DIMM_L_WIDTH_MASK, val);
default:
MISSING_CASE(val);
return 0;
}
}
return val + 1;
static int icl_get_dimm_s_ranks(u32 val)
{
if (icl_get_dimm_s_size(val) == 0)
return 0;
return REG_FIELD_GET(ICL_DIMM_S_RANK_MASK, val) + 1;
}
static int icl_get_dimm_l_ranks(u32 val)
{
if (icl_get_dimm_l_size(val) == 0)
return 0;
return REG_FIELD_GET(ICL_DIMM_L_RANK_MASK, val) + 1;
}
static bool
@ -340,38 +397,62 @@ skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
}
static void
skl_dram_get_dimm_info(struct drm_i915_private *i915,
struct dram_dimm_info *dimm,
int channel, char dimm_name, u16 val)
skl_dram_print_dimm_info(struct intel_display *display,
struct dram_dimm_info *dimm,
int channel, char dimm_name)
{
if (GRAPHICS_VER(i915) >= 11) {
dimm->size = icl_get_dimm_size(val);
dimm->width = icl_get_dimm_width(val);
dimm->ranks = icl_get_dimm_ranks(val);
} else {
dimm->size = skl_get_dimm_size(val);
dimm->width = skl_get_dimm_width(val);
dimm->ranks = skl_get_dimm_ranks(val);
}
drm_dbg_kms(&i915->drm,
drm_dbg_kms(display->drm,
"CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb+ DIMMs: %s\n",
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
str_yes_no(skl_is_16gb_dimm(dimm)));
}
static void
skl_dram_get_dimm_l_info(struct intel_display *display,
struct dram_dimm_info *dimm,
int channel, u32 val)
{
if (DISPLAY_VER(display) >= 11) {
dimm->size = icl_get_dimm_l_size(val);
dimm->width = icl_get_dimm_l_width(val);
dimm->ranks = icl_get_dimm_l_ranks(val);
} else {
dimm->size = skl_get_dimm_l_size(val);
dimm->width = skl_get_dimm_l_width(val);
dimm->ranks = skl_get_dimm_l_ranks(val);
}
skl_dram_print_dimm_info(display, dimm, channel, 'L');
}
static void
skl_dram_get_dimm_s_info(struct intel_display *display,
struct dram_dimm_info *dimm,
int channel, u32 val)
{
if (DISPLAY_VER(display) >= 11) {
dimm->size = icl_get_dimm_s_size(val);
dimm->width = icl_get_dimm_s_width(val);
dimm->ranks = icl_get_dimm_s_ranks(val);
} else {
dimm->size = skl_get_dimm_s_size(val);
dimm->width = skl_get_dimm_s_width(val);
dimm->ranks = skl_get_dimm_s_ranks(val);
}
skl_dram_print_dimm_info(display, dimm, channel, 'S');
}
static int
skl_dram_get_channel_info(struct drm_i915_private *i915,
skl_dram_get_channel_info(struct intel_display *display,
struct dram_channel_info *ch,
int channel, u32 val)
{
skl_dram_get_dimm_info(i915, &ch->dimm_l,
channel, 'L', val & 0xffff);
skl_dram_get_dimm_info(i915, &ch->dimm_s,
channel, 'S', val >> 16);
skl_dram_get_dimm_l_info(display, &ch->dimm_l, channel, val);
skl_dram_get_dimm_s_info(display, &ch->dimm_s, channel, val);
if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
drm_dbg_kms(display->drm, "CH%u not populated\n", channel);
return -EINVAL;
}
@ -385,7 +466,7 @@ skl_dram_get_channel_info(struct drm_i915_private *i915,
ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
skl_is_16gb_dimm(&ch->dimm_s);
drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb+ DIMMs: %s\n",
drm_dbg_kms(display->drm, "CH%u ranks: %u, 16Gb+ DIMMs: %s\n",
channel, ch->ranks, str_yes_no(ch->is_16gb_dimm));
return 0;
@ -401,8 +482,9 @@ intel_is_dram_symmetric(const struct dram_channel_info *ch0,
}
static int
skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram_info)
skl_dram_get_channels_info(struct intel_display *display, struct dram_info *dram_info)
{
struct drm_i915_private *i915 = to_i915(display->drm);
struct dram_channel_info ch0 = {}, ch1 = {};
u32 val;
int ret;
@ -412,23 +494,23 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
val = intel_uncore_read(&i915->uncore,
SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
ret = skl_dram_get_channel_info(display, &ch0, 0, val);
if (ret == 0)
dram_info->num_channels++;
val = intel_uncore_read(&i915->uncore,
SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
ret = skl_dram_get_channel_info(display, &ch1, 1, val);
if (ret == 0)
dram_info->num_channels++;
if (dram_info->num_channels == 0) {
drm_info(&i915->drm, "Number of memory channels is zero\n");
drm_info(display->drm, "Number of memory channels is zero\n");
return -EINVAL;
}
if (ch0.ranks == 0 && ch1.ranks == 0) {
drm_info(&i915->drm, "couldn't get memory rank information\n");
drm_info(display->drm, "couldn't get memory rank information\n");
return -EINVAL;
}
@ -436,18 +518,19 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
drm_dbg_kms(display->drm, "Memory configuration is symmetric? %s\n",
str_yes_no(dram_info->symmetric_memory));
drm_dbg_kms(&i915->drm, "16Gb+ DIMMs: %s\n",
drm_dbg_kms(display->drm, "16Gb+ DIMMs: %s\n",
str_yes_no(dram_info->has_16gb_dimms));
return 0;
}
static enum intel_dram_type
skl_get_dram_type(struct drm_i915_private *i915)
skl_get_dram_type(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
u32 val;
val = intel_uncore_read(&i915->uncore,
@ -469,13 +552,13 @@ skl_get_dram_type(struct drm_i915_private *i915)
}
static int
skl_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
skl_get_dram_info(struct intel_display *display, struct dram_info *dram_info)
{
int ret;
dram_info->type = skl_get_dram_type(i915);
dram_info->type = skl_get_dram_type(display);
ret = skl_dram_get_channels_info(i915, dram_info);
ret = skl_dram_get_channels_info(display, dram_info);
if (ret)
return ret;
@ -560,8 +643,9 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm);
}
static int bxt_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
static int bxt_get_dram_info(struct intel_display *display, struct dram_info *dram_info)
{
struct drm_i915_private *i915 = to_i915(display->drm);
u32 val;
u8 valid_ranks = 0;
int i;
@ -582,11 +666,11 @@ static int bxt_get_dram_info(struct drm_i915_private *i915, struct dram_info *dr
bxt_get_dimm_info(&dimm, val);
type = bxt_get_dimm_type(val);
drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
drm_WARN_ON(display->drm, type != INTEL_DRAM_UNKNOWN &&
dram_info->type != INTEL_DRAM_UNKNOWN &&
dram_info->type != type);
drm_dbg_kms(&i915->drm,
drm_dbg_kms(display->drm,
"CH%u DIMM size: %u Gb, width: X%u, ranks: %u\n",
i - BXT_D_CR_DRP0_DUNIT_START,
dimm.size, dimm.width, dimm.ranks);
@ -599,25 +683,25 @@ static int bxt_get_dram_info(struct drm_i915_private *i915, struct dram_info *dr
}
if (dram_info->type == INTEL_DRAM_UNKNOWN || valid_ranks == 0) {
drm_info(&i915->drm, "couldn't get memory information\n");
drm_info(display->drm, "couldn't get memory information\n");
return -EINVAL;
}
return 0;
}
static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
static int icl_pcode_read_mem_global_info(struct intel_display *display,
struct dram_info *dram_info)
{
u32 val = 0;
int ret;
ret = intel_pcode_read(&dev_priv->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
if (GRAPHICS_VER(dev_priv) == 12) {
if (DISPLAY_VER(display) >= 12) {
switch (val & 0xf) {
case 0:
dram_info->type = INTEL_DRAM_DDR4;
@ -668,25 +752,25 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
return 0;
}
static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
static int gen11_get_dram_info(struct intel_display *display, struct dram_info *dram_info)
{
int ret;
ret = skl_dram_get_channels_info(i915, dram_info);
ret = skl_dram_get_channels_info(display, dram_info);
if (ret)
return ret;
return icl_pcode_read_mem_global_info(i915, dram_info);
return icl_pcode_read_mem_global_info(display, dram_info);
}
static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
static int gen12_get_dram_info(struct intel_display *display, struct dram_info *dram_info)
{
return icl_pcode_read_mem_global_info(i915, dram_info);
return icl_pcode_read_mem_global_info(display, dram_info);
}
static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
static int xelpdp_get_dram_info(struct intel_display *display, struct dram_info *dram_info)
{
struct intel_display *display = i915->display;
struct drm_i915_private *i915 = to_i915(display->drm);
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
@ -709,11 +793,11 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info
dram_info->type = INTEL_DRAM_LPDDR3;
break;
case 8:
drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
drm_WARN_ON(display->drm, !display->platform.dgfx);
dram_info->type = INTEL_DRAM_GDDR;
break;
case 9:
drm_WARN_ON(&i915->drm, !IS_DGFX(i915));
drm_WARN_ON(display->drm, !display->platform.dgfx);
dram_info->type = INTEL_DRAM_GDDR_ECC;
break;
default:
@ -731,41 +815,40 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info
return 0;
}
int intel_dram_detect(struct drm_i915_private *i915)
int intel_dram_detect(struct intel_display *display)
{
struct intel_display *display = i915->display;
struct dram_info *dram_info;
int ret;
if (IS_DG2(i915) || !intel_display_device_present(display))
if (display->platform.dg2 || !HAS_DISPLAY(display))
return 0;
dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL);
dram_info = drmm_kzalloc(display->drm, sizeof(*dram_info), GFP_KERNEL);
if (!dram_info)
return -ENOMEM;
i915->dram_info = dram_info;
display->dram.info = dram_info;
if (DISPLAY_VER(display) >= 14)
ret = xelpdp_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 11)
ret = gen11_get_dram_info(i915, dram_info);
else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
ret = bxt_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 9)
ret = skl_get_dram_info(i915, dram_info);
ret = xelpdp_get_dram_info(display, dram_info);
else if (DISPLAY_VER(display) >= 12)
ret = gen12_get_dram_info(display, dram_info);
else if (DISPLAY_VER(display) >= 11)
ret = gen11_get_dram_info(display, dram_info);
else if (display->platform.broxton || display->platform.geminilake)
ret = bxt_get_dram_info(display, dram_info);
else if (DISPLAY_VER(display) >= 9)
ret = skl_get_dram_info(display, dram_info);
else
ret = i915_get_dram_info(i915, dram_info);
ret = i915_get_dram_info(display, dram_info);
drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
drm_dbg_kms(display->drm, "DRAM type: %s\n",
intel_dram_type_str(dram_info->type));
drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
drm_dbg_kms(display->drm, "DRAM channels: %u\n", dram_info->num_channels);
drm_dbg_kms(&i915->drm, "Num QGV points %u\n", dram_info->num_qgv_points);
drm_dbg_kms(&i915->drm, "Num PSF GV points %u\n", dram_info->num_psf_gv_points);
drm_dbg_kms(display->drm, "Num QGV points %u\n", dram_info->num_qgv_points);
drm_dbg_kms(display->drm, "Num PSF GV points %u\n", dram_info->num_psf_gv_points);
/* TODO: Do we want to abort probe on dram detection failures? */
if (ret)
@ -779,45 +862,7 @@ int intel_dram_detect(struct drm_i915_private *i915)
* checks, and prefer not dereferencing on platforms that shouldn't look at dram
* info, to catch accidental and incorrect dram info checks.
*/
const struct dram_info *intel_dram_info(struct drm_device *drm)
const struct dram_info *intel_dram_info(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(drm);
return i915->dram_info;
}
static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
{
static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
static const u8 sets[4] = { 1, 1, 2, 2 };
return EDRAM_NUM_BANKS(cap) *
ways[EDRAM_WAYS_IDX(cap)] *
sets[EDRAM_SETS_IDX(cap)];
}
void intel_dram_edram_detect(struct drm_i915_private *i915)
{
u32 edram_cap = 0;
if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || GRAPHICS_VER(i915) >= 9))
return;
edram_cap = intel_uncore_read_fw(&i915->uncore, HSW_EDRAM_CAP);
/* NB: We can't write IDICR yet because we don't have gt funcs set up */
if (!(edram_cap & EDRAM_ENABLED))
return;
/*
* The needed capability bits for size calculation are not there with
* pre gen9 so return 128MB always.
*/
if (GRAPHICS_VER(i915) < 9)
i915->edram_size_mb = 128;
else
i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
drm_info(&i915->drm, "Found %uMB of eDRAM\n", i915->edram_size_mb);
return display->dram.info;
}

View file

@ -8,8 +8,7 @@
#include <linux/types.h>
struct drm_i915_private;
struct drm_device;
struct intel_display;
struct dram_info {
enum intel_dram_type {
@ -35,11 +34,10 @@ struct dram_info {
bool has_16gb_dimms;
};
void intel_dram_edram_detect(struct drm_i915_private *i915);
int intel_dram_detect(struct drm_i915_private *i915);
unsigned int intel_fsb_freq(struct drm_i915_private *i915);
unsigned int intel_mem_freq(struct drm_i915_private *i915);
const struct dram_info *intel_dram_info(struct drm_device *drm);
int intel_dram_detect(struct intel_display *display);
unsigned int intel_fsb_freq(struct intel_display *display);
unsigned int intel_mem_freq(struct intel_display *display);
const struct dram_info *intel_dram_info(struct intel_display *display);
const char *intel_dram_type_str(enum intel_dram_type type);
#endif /* __INTEL_DRAM_H__ */

View file

@ -26,7 +26,7 @@
struct intel_dsb {
enum intel_dsb_id id;
struct intel_dsb_buffer dsb_buf;
struct intel_dsb_buffer *dsb_buf;
struct intel_crtc *crtc;
/*
@ -211,10 +211,10 @@ static void intel_dsb_dump(struct intel_dsb *dsb)
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
drm_dbg_kms(display->drm,
" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
intel_dsb_buffer_read(&dsb->dsb_buf, i),
intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
intel_dsb_buffer_read(dsb->dsb_buf, i),
intel_dsb_buffer_read(dsb->dsb_buf, i + 1),
intel_dsb_buffer_read(dsb->dsb_buf, i + 2),
intel_dsb_buffer_read(dsb->dsb_buf, i + 3));
drm_dbg_kms(display->drm, "}\n");
}
@ -231,12 +231,12 @@ unsigned int intel_dsb_size(struct intel_dsb *dsb)
unsigned int intel_dsb_head(struct intel_dsb *dsb)
{
return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
return intel_dsb_buffer_ggtt_offset(dsb->dsb_buf);
}
static unsigned int intel_dsb_tail(struct intel_dsb *dsb)
{
return intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + intel_dsb_size(dsb);
return intel_dsb_buffer_ggtt_offset(dsb->dsb_buf) + intel_dsb_size(dsb);
}
static void intel_dsb_ins_align(struct intel_dsb *dsb)
@ -263,8 +263,8 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
dsb->ins[0] = ldw;
dsb->ins[1] = udw;
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
}
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
@ -335,13 +335,13 @@ void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
/* Update the count */
dsb->ins[0]++;
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 0,
intel_dsb_buffer_write(dsb->dsb_buf, dsb->ins_start_offset + 0,
dsb->ins[0]);
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, val);
/* if number of data words is odd, then the last dword should be 0.*/
if (dsb->free_pos & 0x1)
intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos, 0);
}
void intel_dsb_reg_write(struct intel_dsb *dsb,
@ -521,7 +521,7 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
if (aligned_tail > tail)
intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
intel_dsb_buffer_memset(dsb->dsb_buf, dsb->free_pos, 0,
aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
@ -541,7 +541,7 @@ static void intel_dsb_gosub_align(struct intel_dsb *dsb)
* "Ensure GOSUB is not placed in cacheline QW slot 6 or 7 (numbered 0-7)"
*/
if (aligned_tail - tail <= 2 * 8)
intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
intel_dsb_buffer_memset(dsb->dsb_buf, dsb->free_pos, 0,
aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
@ -606,14 +606,14 @@ void intel_dsb_gosub_finish(struct intel_dsb *dsb)
*/
intel_dsb_noop(dsb, 8);
intel_dsb_buffer_flush_map(&dsb->dsb_buf);
intel_dsb_buffer_flush_map(dsb->dsb_buf);
}
void intel_dsb_finish(struct intel_dsb *dsb)
{
intel_dsb_align_tail(dsb);
intel_dsb_buffer_flush_map(&dsb->dsb_buf);
intel_dsb_buffer_flush_map(dsb->dsb_buf);
}
static u32 dsb_error_int_status(struct intel_display *display)
@ -888,7 +888,7 @@ void intel_dsb_wait(struct intel_dsb *dsb)
!is_busy,
100, 1000, false);
if (ret) {
u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
u32 offset = intel_dsb_buffer_ggtt_offset(dsb->dsb_buf);
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
DSB_ENABLE | DSB_HALT);
@ -934,6 +934,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
unsigned int max_cmds)
{
struct intel_display *display = to_intel_display(state);
struct intel_dsb_buffer *dsb_buf;
struct ref_tracker *wakeref;
struct intel_dsb *dsb;
unsigned int size;
@ -953,9 +954,12 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
dsb_buf = intel_dsb_buffer_create(display->drm, size);
if (IS_ERR(dsb_buf))
goto out_put_rpm;
dsb->dsb_buf = dsb_buf;
intel_display_rpm_put(display, wakeref);
dsb->id = dsb_id;
@ -988,7 +992,7 @@ out:
*/
void intel_dsb_cleanup(struct intel_dsb *dsb)
{
intel_dsb_buffer_cleanup(&dsb->dsb_buf);
intel_dsb_buffer_cleanup(dsb->dsb_buf);
kfree(dsb);
}

View file

@ -7,9 +7,14 @@
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
#include "i915_vma.h"
#include "intel_display_types.h"
#include "intel_dsb_buffer.h"
struct intel_dsb_buffer {
u32 *cmd_buf;
struct i915_vma *vma;
size_t buf_size;
};
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
return i915_ggtt_offset(dsb_buf->vma);
@ -32,48 +37,66 @@ void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val,
memset(&dsb_buf->cmd_buf[idx], val, size);
}
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct drm_i915_private *i915 = to_i915(drm);
struct intel_dsb_buffer *dsb_buf;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *buf;
int ret;
dsb_buf = kzalloc(sizeof(*dsb_buf), GFP_KERNEL);
if (!dsb_buf)
return ERR_PTR(-ENOMEM);
if (HAS_LMEM(i915)) {
obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
I915_BO_ALLOC_CONTIGUOUS);
if (IS_ERR(obj))
return false;
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err;
}
} else {
obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
return false;
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err;
}
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
i915_gem_object_put(obj);
return false;
goto err;
}
buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
return false;
goto err;
}
dsb_buf->vma = vma;
dsb_buf->cmd_buf = buf;
dsb_buf->buf_size = size;
return true;
return dsb_buf;
err:
kfree(dsb_buf);
return ERR_PTR(ret);
}
void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
{
i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
kfree(dsb_buf);
}
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)

View file

@ -8,21 +8,14 @@
#include <linux/types.h>
struct intel_crtc;
struct i915_vma;
struct intel_dsb_buffer {
u32 *cmd_buf;
struct i915_vma *vma;
size_t buf_size;
};
struct drm_device;
struct intel_dsb_buffer;
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf);
void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx);
void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf,
size_t size);
struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size);
void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf);
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf);

View file

@ -29,6 +29,9 @@
#include "intel_display_types.h"
struct intel_dsi_host;
struct ref_tracker;
#define INTEL_DSI_VIDEO_MODE 0
#define INTEL_DSI_COMMAND_MODE 1
@ -37,13 +40,11 @@
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
struct intel_dsi_host;
struct intel_dsi {
struct intel_encoder base;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
intel_wakeref_t io_wakeref[I915_MAX_PORTS];
struct ref_tracker *io_wakeref[I915_MAX_PORTS];
/* GPIO Desc for panel and backlight control */
struct gpio_desc *gpio_panel;

View file

@ -20,7 +20,7 @@
#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
#include "intel_panic.h"
#include "intel_parent.h"
#include "intel_plane.h"
#define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a))
@ -558,7 +558,7 @@ static bool plane_has_modifier(struct intel_display *display,
* where supported.
*/
if (intel_fb_is_ccs_modifier(md->modifier) &&
HAS_AUX_CCS(display) != !!md->ccs.packed_aux_planes)
intel_parent_has_auxccs(display) != !!md->ccs.packed_aux_planes)
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
@ -2216,7 +2216,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
int ret;
int i;
intel_fb->panic = intel_panic_alloc();
intel_fb->panic = intel_parent_panic_alloc(display);
if (!intel_fb->panic)
return -ENOMEM;

View file

@ -45,12 +45,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
#include "gem/i915_gem_stolen.h"
#include "gt/intel_gt_types.h"
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i915_vma.h"
#include "i9xx_plane_regs.h"
#include "intel_de.h"
@ -64,6 +58,8 @@
#include "intel_fbc.h"
#include "intel_fbc_regs.h"
#include "intel_frontbuffer.h"
#include "intel_parent.h"
#include "intel_step.h"
#define for_each_fbc_id(__display, __fbc_id) \
for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \
@ -71,7 +67,9 @@
#define for_each_intel_fbc(__display, __fbc, __fbc_id) \
for_each_fbc_id((__display), (__fbc_id)) \
for_each_if((__fbc) = (__display)->fbc[(__fbc_id)])
for_each_if((__fbc) = (__display)->fbc.instances[(__fbc_id)])
#define FBC_SYS_CACHE_ID_NONE I915_MAX_FBCS
struct intel_fbc_funcs {
void (*activate)(struct intel_fbc *fbc);
@ -129,6 +127,19 @@ struct intel_fbc {
const char *no_fbc_reason;
};
static struct intel_fbc *intel_fbc_for_pipe(struct intel_display *display, enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
struct intel_plane *primary = NULL;
primary = to_intel_plane(crtc->base.primary);
if (drm_WARN_ON(display->drm, !primary))
return NULL;
return primary->fbc;
}
/* plane stride in pixels */
static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
{
@ -204,7 +215,7 @@ static unsigned int _intel_fbc_cfb_stride(struct intel_display *display,
static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
@ -235,7 +246,7 @@ static unsigned int _intel_fbc_cfb_size(struct intel_display *display,
static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
unsigned int height = drm_rect_height(&plane_state->uapi.src) >> 16;
return _intel_fbc_cfb_size(display, height, intel_fbc_cfb_stride(plane_state));
@ -243,7 +254,7 @@ static unsigned int intel_fbc_cfb_size(const struct intel_plane_state *plane_sta
static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
unsigned int stride_aligned = intel_fbc_cfb_stride(plane_state);
unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@ -267,9 +278,7 @@ static u16 intel_fbc_override_cfb_stride(const struct intel_plane_state *plane_s
static bool intel_fbc_has_fences(struct intel_display *display)
{
struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm);
return intel_gt_support_legacy_fencing(to_gt(i915));
return intel_parent_has_fenced_regions(display);
}
static u32 i8xx_fbc_ctl(struct intel_fbc *fbc)
@ -382,17 +391,17 @@ static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
drm_WARN_ON(display->drm,
range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm),
i915_gem_stolen_node_offset(fbc->compressed_fb),
range_end_overflows_t(u64, intel_parent_stolen_area_address(display),
intel_parent_stolen_node_offset(display, fbc->compressed_fb),
U32_MAX));
drm_WARN_ON(display->drm,
range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm),
i915_gem_stolen_node_offset(fbc->compressed_llb),
range_end_overflows_t(u64, intel_parent_stolen_area_address(display),
intel_parent_stolen_node_offset(display, fbc->compressed_llb),
U32_MAX));
intel_de_write(display, FBC_CFB_BASE,
i915_gem_stolen_node_address(fbc->compressed_fb));
intel_parent_stolen_node_address(display, fbc->compressed_fb));
intel_de_write(display, FBC_LL_BASE,
i915_gem_stolen_node_address(fbc->compressed_llb));
intel_parent_stolen_node_address(display, fbc->compressed_llb));
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@ -500,7 +509,7 @@ static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
intel_de_write(display, DPFC_CB_BASE,
i915_gem_stolen_node_offset(fbc->compressed_fb));
intel_parent_stolen_node_offset(display, fbc->compressed_fb));
}
static const struct intel_fbc_funcs g4x_fbc_funcs = {
@ -569,7 +578,7 @@ static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id),
i915_gem_stolen_node_offset(fbc->compressed_fb));
intel_parent_stolen_node_offset(display, fbc->compressed_fb));
}
static const struct intel_fbc_funcs ilk_fbc_funcs = {
@ -808,7 +817,7 @@ static u64 intel_fbc_stolen_end(struct intel_display *display)
* underruns, even if that range is not reserved by the BIOS. */
if (display->platform.broadwell ||
(DISPLAY_VER(display) == 9 && !display->platform.broxton))
end = i915_gem_stolen_area_size(display->drm) - 8 * 1024 * 1024;
end = intel_parent_stolen_area_size(display) - 8 * 1024 * 1024;
else
end = U64_MAX;
@ -843,14 +852,14 @@ static int find_compression_limit(struct intel_fbc *fbc,
size /= limit;
/* Try to over-allocate to reduce reallocations and fragmentation. */
ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size <<= 1, 4096, 0, end);
ret = intel_parent_stolen_insert_node_in_range(display, fbc->compressed_fb,
size <<= 1, 4096, 0, end);
if (ret == 0)
return limit;
for (; limit <= intel_fbc_max_limit(display); limit <<= 1) {
ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size >>= 1, 4096, 0, end);
ret = intel_parent_stolen_insert_node_in_range(display, fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
return limit;
}
@ -865,12 +874,12 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
int ret;
drm_WARN_ON(display->drm,
i915_gem_stolen_node_allocated(fbc->compressed_fb));
intel_parent_stolen_node_allocated(display, fbc->compressed_fb));
drm_WARN_ON(display->drm,
i915_gem_stolen_node_allocated(fbc->compressed_llb));
intel_parent_stolen_node_allocated(display, fbc->compressed_llb));
if (DISPLAY_VER(display) < 5 && !display->platform.g4x) {
ret = i915_gem_stolen_insert_node(fbc->compressed_llb, 4096, 4096);
ret = intel_parent_stolen_insert_node(display, fbc->compressed_llb, 4096, 4096);
if (ret)
goto err;
}
@ -886,14 +895,14 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
drm_dbg_kms(display->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
i915_gem_stolen_node_size(fbc->compressed_fb), fbc->limit);
intel_parent_stolen_node_size(display, fbc->compressed_fb), fbc->limit);
return 0;
err_llb:
if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
i915_gem_stolen_remove_node(fbc->compressed_llb);
if (intel_parent_stolen_node_allocated(display, fbc->compressed_llb))
intel_parent_stolen_remove_node(display, fbc->compressed_llb);
err:
if (i915_gem_stolen_initialized(display->drm))
if (intel_parent_stolen_initialized(display))
drm_info_once(display->drm,
"not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
@ -945,15 +954,83 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
fbc_compressor_clkgate_disable_wa(fbc, true);
}
static void fbc_sys_cache_update_config(struct intel_display *display, u32 reg,
enum intel_fbc_id id)
{
if (!HAS_FBC_SYS_CACHE(display))
return;
lockdep_assert_held(&display->fbc.sys_cache.lock);
/*
* Wa_14025769978:
* Fixes: SoC hardware issue in read caching
* Workaround: disable cache read setting which is enabled by default.
*/
if (!intel_display_wa(display, 14025769978))
/* Cache read enable is set by default */
reg |= FBC_SYS_CACHE_READ_ENABLE;
intel_de_write(display, XE3P_LPD_FBC_SYS_CACHE_USAGE_CFG, reg);
display->fbc.sys_cache.id = id;
}
static void fbc_sys_cache_disable(const struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
struct sys_cache_cfg *sys_cache = &display->fbc.sys_cache;
mutex_lock(&sys_cache->lock);
/* clear only if "fbc" reserved the cache */
if (sys_cache->id == fbc->id)
fbc_sys_cache_update_config(display, 0, FBC_SYS_CACHE_ID_NONE);
mutex_unlock(&sys_cache->lock);
}
static int fbc_sys_cache_limit(struct intel_display *display)
{
if (DISPLAY_VER(display) == 35)
return 2 * 1024 * 1024;
return 0;
}
static void fbc_sys_cache_enable(const struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
struct sys_cache_cfg *sys_cache = &display->fbc.sys_cache;
int range, offset;
u32 cfg;
if (!HAS_FBC_SYS_CACHE(display))
return;
range = fbc_sys_cache_limit(display) / (64 * 1024);
offset = intel_parent_stolen_node_offset(display, fbc->compressed_fb) / (4 * 1024);
cfg = FBC_SYS_CACHE_TAG_USE_RES_SPACE | FBC_SYS_CACHEABLE_RANGE(range) |
FBC_SYS_CACHE_START_BASE(offset);
mutex_lock(&sys_cache->lock);
/* update sys cache config only if sys cache is unassigned */
if (sys_cache->id == FBC_SYS_CACHE_ID_NONE)
fbc_sys_cache_update_config(display, cfg, fbc->id);
mutex_unlock(&sys_cache->lock);
}
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
i915_gem_stolen_remove_node(fbc->compressed_llb);
if (i915_gem_stolen_node_allocated(fbc->compressed_fb))
i915_gem_stolen_remove_node(fbc->compressed_fb);
if (intel_parent_stolen_node_allocated(display, fbc->compressed_llb))
intel_parent_stolen_remove_node(display, fbc->compressed_llb);
if (intel_parent_stolen_node_allocated(display, fbc->compressed_fb))
intel_parent_stolen_remove_node(display, fbc->compressed_fb);
}
void intel_fbc_cleanup(struct intel_display *display)
@ -966,11 +1043,16 @@ void intel_fbc_cleanup(struct intel_display *display)
__intel_fbc_cleanup_cfb(fbc);
mutex_unlock(&fbc->lock);
i915_gem_stolen_node_free(fbc->compressed_fb);
i915_gem_stolen_node_free(fbc->compressed_llb);
intel_parent_stolen_node_free(display, fbc->compressed_fb);
intel_parent_stolen_node_free(display, fbc->compressed_llb);
kfree(fbc);
}
mutex_lock(&display->fbc.sys_cache.lock);
drm_WARN_ON(display->drm,
display->fbc.sys_cache.id != FBC_SYS_CACHE_ID_NONE);
mutex_unlock(&display->fbc.sys_cache.lock);
}
static bool i8xx_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
@ -1016,7 +1098,7 @@ static bool icl_fbc_stride_is_valid(const struct intel_plane_state *plane_state)
static bool stride_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
if (DISPLAY_VER(display) >= 11)
return icl_fbc_stride_is_valid(plane_state);
@ -1032,7 +1114,7 @@ static bool stride_is_valid(const struct intel_plane_state *plane_state)
static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->format->format) {
@ -1052,7 +1134,7 @@ static bool i8xx_fbc_pixel_format_is_valid(const struct intel_plane_state *plane
static bool g4x_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
switch (fb->format->format) {
@ -1131,7 +1213,7 @@ intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
if (DISPLAY_VER(display) >= 35)
return xe3p_lpd_fbc_pixel_format_is_valid(plane_state);
@ -1167,7 +1249,7 @@ static bool skl_fbc_rotation_is_valid(const struct intel_plane_state *plane_stat
static bool rotation_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
if (DISPLAY_VER(display) >= 9)
return skl_fbc_rotation_is_valid(plane_state);
@ -1206,7 +1288,7 @@ static void intel_fbc_max_surface_size(struct intel_display *display,
*/
static bool intel_fbc_surface_size_ok(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
unsigned int effective_w, effective_h, max_w, max_h;
intel_fbc_max_surface_size(display, &max_w, &max_h);
@ -1239,7 +1321,7 @@ static void intel_fbc_max_plane_size(struct intel_display *display,
static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
unsigned int w, h, max_w, max_h;
intel_fbc_max_plane_size(display, &max_w, &max_h);
@ -1264,7 +1346,7 @@ static bool skl_fbc_tiling_valid(const struct intel_plane_state *plane_state)
static bool tiling_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
if (DISPLAY_VER(display) >= 9)
return skl_fbc_tiling_valid(plane_state);
@ -1344,7 +1426,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(state->base.dev);
struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct intel_plane_state *plane_state =
@ -1377,7 +1459,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
struct intel_display *display = to_intel_display(plane_state);
/*
* The use of a CPU fence is one of two ways to detect writes by the
@ -1398,12 +1480,13 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct intel_fbc *fbc = plane->fbc;
return intel_fbc_min_limit(plane_state) <= fbc->limit &&
intel_fbc_cfb_size(plane_state) <= fbc->limit *
i915_gem_stolen_node_size(fbc->compressed_fb);
intel_parent_stolen_node_size(display, fbc->compressed_fb);
}
static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
@ -1484,8 +1567,7 @@ static int _intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(state->base.dev);
struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_display *display = to_intel_display(state);
struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@ -1496,12 +1578,12 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (!fbc)
return 0;
if (!i915_gem_stolen_initialized(display->drm)) {
if (!intel_parent_stolen_initialized(display)) {
plane_state->no_fbc_reason = "stolen memory not initialised";
return 0;
}
if (intel_vgpu_active(i915)) {
if (intel_parent_vgpu_active(display)) {
plane_state->no_fbc_reason = "VGPU active";
return 0;
}
@ -1521,6 +1603,16 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
/*
* Wa_15018326506:
* Fixes: Underrun during media decode
* Workaround: Do not enable FBC
*/
if (intel_display_wa(display, 15018326506)) {
plane_state->no_fbc_reason = "Wa_15018326506";
return 0;
}
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
if (intel_display_vtd_active(display) &&
(display->platform.skylake || display->platform.broxton)) {
@ -1702,7 +1794,7 @@ static bool __intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(state->base.dev);
struct intel_display *display = to_intel_display(state);
struct intel_fbc *fbc = plane->fbc;
bool need_vblank_wait = false;
@ -1775,6 +1867,8 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
__intel_fbc_cleanup_cfb(fbc);
fbc_sys_cache_disable(fbc);
/* wa_18038517565 Enable DPFC clock gating after FBC disable */
if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
fbc_compressor_clkgate_disable_wa(fbc, false);
@ -1915,7 +2009,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_plane *plane)
{
struct intel_display *display = to_intel_display(state->base.dev);
struct intel_display *display = to_intel_display(state);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = plane->fbc;
@ -1967,6 +2061,8 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
intel_fbc_program_workarounds(fbc);
intel_fbc_program_cfb(fbc);
fbc_sys_cache_enable(fbc);
}
/**
@ -1977,7 +2073,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
*/
void intel_fbc_disable(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc->base.dev);
struct intel_display *display = to_intel_display(crtc);
struct intel_plane *plane;
for_each_intel_plane(display->drm, plane) {
@ -2119,6 +2215,37 @@ void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display)
__intel_fbc_handle_fifo_underrun_irq(fbc);
}
/**
* intel_fbc_read_underrun_dbg_info - Read and log FBC-related FIFO underrun debug info
* @display: display device instance
* @pipe: the pipe possibly containing the FBC
* @log: log the info?
*
* If @pipe does not contain an FBC instance, this function bails early.
* Otherwise, FBC-related FIFO underrun is read and cleared, and then, if @log
* is true, printed with error level.
*/
void intel_fbc_read_underrun_dbg_info(struct intel_display *display,
enum pipe pipe, bool log)
{
struct intel_fbc *fbc = intel_fbc_for_pipe(display, pipe);
u32 val;
if (!fbc)
return;
val = intel_de_read(display, FBC_DEBUG_STATUS(fbc->id));
if (!(val & FBC_UNDERRUN_DECMPR))
return;
intel_de_write(display, FBC_DEBUG_STATUS(fbc->id), FBC_UNDERRUN_DECMPR);
if (log)
drm_err(display->drm,
"Pipe %c FIFO underrun info: FBC decompressing\n",
pipe_name(pipe));
}
/*
* The DDX driver changes its behavior depending on the value it reads from
* i915.enable_fbc, so sanitize it by translating the default value into either
@ -2156,10 +2283,10 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
if (!fbc)
return NULL;
fbc->compressed_fb = i915_gem_stolen_node_alloc(display->drm);
fbc->compressed_fb = intel_parent_stolen_node_alloc(display);
if (!fbc->compressed_fb)
goto err;
fbc->compressed_llb = i915_gem_stolen_node_alloc(display->drm);
fbc->compressed_llb = intel_parent_stolen_node_alloc(display);
if (!fbc->compressed_llb)
goto err;
@ -2184,8 +2311,8 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
return fbc;
err:
i915_gem_stolen_node_free(fbc->compressed_llb);
i915_gem_stolen_node_free(fbc->compressed_fb);
intel_parent_stolen_node_free(display, fbc->compressed_llb);
intel_parent_stolen_node_free(display, fbc->compressed_fb);
kfree(fbc);
return NULL;
@ -2206,7 +2333,10 @@ void intel_fbc_init(struct intel_display *display)
display->params.enable_fbc);
for_each_fbc_id(display, fbc_id)
display->fbc[fbc_id] = intel_fbc_create(display, fbc_id);
display->fbc.instances[fbc_id] = intel_fbc_create(display, fbc_id);
mutex_init(&display->fbc.sys_cache.lock);
display->fbc.sys_cache.id = FBC_SYS_CACHE_ID_NONE;
}
/**
@ -2226,6 +2356,11 @@ void intel_fbc_sanitize(struct intel_display *display)
if (intel_fbc_hw_is_active(fbc))
intel_fbc_hw_deactivate(fbc);
}
/* Ensure the sys cache usage config is clear as well */
mutex_lock(&display->fbc.sys_cache.lock);
fbc_sys_cache_update_config(display, 0, FBC_SYS_CACHE_ID_NONE);
mutex_unlock(&display->fbc.sys_cache.lock);
}
static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
@ -2244,6 +2379,11 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused)
seq_puts(m, "FBC enabled\n");
seq_printf(m, "Compressing: %s\n",
str_yes_no(intel_fbc_is_compressing(fbc)));
mutex_lock(&display->fbc.sys_cache.lock);
seq_printf(m, "Using system cache: %s\n",
str_yes_no(display->fbc.sys_cache.id == fbc->id));
mutex_unlock(&display->fbc.sys_cache.lock);
} else {
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
}
@ -2325,7 +2465,7 @@ void intel_fbc_debugfs_register(struct intel_display *display)
{
struct intel_fbc *fbc;
fbc = display->fbc[INTEL_FBC_A];
fbc = display->fbc.instances[INTEL_FBC_A];
if (fbc)
intel_fbc_debugfs_add(fbc, display->drm->debugfs_root);
}

View file

@ -9,6 +9,7 @@
#include <linux/types.h>
enum fb_op_origin;
enum pipe;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
@ -46,6 +47,8 @@ void intel_fbc_flush(struct intel_display *display,
unsigned int frontbuffer_bits, enum fb_op_origin origin);
void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane);
void intel_fbc_handle_fifo_underrun_irq(struct intel_display *display);
void intel_fbc_read_underrun_dbg_info(struct intel_display *display,
enum pipe, bool log);
void intel_fbc_reset_underrun(struct intel_display *display);
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc);
void intel_fbc_debugfs_register(struct intel_display *display);

View file

@ -88,6 +88,8 @@
#define DPFC_FENCE_YOFF _MMIO(0x3218)
#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258)
#define DPFC_CHICKEN _MMIO(0x3224)
#define FBC_DEBUG_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43220, 0x43260)
#define FBC_UNDERRUN_DECMPR REG_BIT(27)
#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264)
#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
@ -126,4 +128,14 @@
#define FBC_REND_NUKE REG_BIT(2)
#define FBC_REND_CACHE_CLEAN REG_BIT(1)
#define XE3P_LPD_FBC_SYS_CACHE_USAGE_CFG _MMIO(0x1344E0)
#define FBC_SYS_CACHE_START_BASE_MASK REG_GENMASK(31, 16)
#define FBC_SYS_CACHE_START_BASE(base) REG_FIELD_PREP(FBC_SYS_CACHE_START_BASE_MASK, (base))
#define FBC_SYS_CACHEABLE_RANGE_MASK REG_GENMASK(15, 4)
#define FBC_SYS_CACHEABLE_RANGE(range) REG_FIELD_PREP(FBC_SYS_CACHEABLE_RANGE_MASK, (range))
#define FBC_SYS_CACHE_TAG_MASK REG_GENMASK(3, 2)
#define FBC_SYS_CACHE_TAG_DONT_CACHE REG_FIELD_PREP(FBC_SYS_CACHE_TAG_MASK, 0)
#define FBC_SYS_CACHE_TAG_USE_RES_SPACE REG_FIELD_PREP(FBC_SYS_CACHE_TAG_MASK, 3)
#define FBC_SYS_CACHE_READ_ENABLE REG_BIT(0)
#endif /* __INTEL_FBC_REGS__ */

View file

@ -25,6 +25,8 @@
*
*/
#include <linux/seq_buf.h>
#include <drm/drm_print.h>
#include "i915_reg.h"
@ -57,6 +59,100 @@
* The code also supports underrun detection on the PCH transcoder.
*/
#define UNDERRUN_DBG1_NUM_PLANES 6
static void log_underrun_dbg1(struct intel_display *display, enum pipe pipe,
unsigned long plane_mask, const char *info)
{
DECLARE_SEQ_BUF(planes_desc, 32);
unsigned int i;
if (!plane_mask)
return;
for_each_set_bit(i, &plane_mask, UNDERRUN_DBG1_NUM_PLANES) {
if (i == 0)
seq_buf_puts(&planes_desc, "[C]");
else
seq_buf_printf(&planes_desc, "[%d]", i);
}
drm_err(display->drm, "Pipe %c FIFO underrun info: %s on planes: %s\n",
pipe_name(pipe), info, seq_buf_str(&planes_desc));
drm_WARN_ON(display->drm, seq_buf_has_overflowed(&planes_desc));
}
static void read_underrun_dbg1(struct intel_display *display, enum pipe pipe, bool log)
{
u32 val = intel_de_read(display, UNDERRUN_DBG1(pipe));
if (!val)
return;
intel_de_write(display, UNDERRUN_DBG1(pipe), val);
if (!log)
return;
log_underrun_dbg1(display, pipe, REG_FIELD_GET(UNDERRUN_DBUF_BLOCK_NOT_VALID_MASK, val),
"DBUF block not valid");
log_underrun_dbg1(display, pipe, REG_FIELD_GET(UNDERRUN_DDB_EMPTY_MASK, val),
"DDB empty");
log_underrun_dbg1(display, pipe, REG_FIELD_GET(UNDERRUN_DBUF_NOT_FILLED_MASK, val),
"DBUF not completely filled");
log_underrun_dbg1(display, pipe, REG_FIELD_GET(UNDERRUN_BELOW_WM0_MASK, val),
"DBUF below WM0");
}
static void read_underrun_dbg2(struct intel_display *display, enum pipe pipe, bool log)
{
u32 val = intel_de_read(display, UNDERRUN_DBG2(pipe));
if (!(val & UNDERRUN_FRAME_LINE_COUNTERS_FROZEN))
return;
intel_de_write(display, UNDERRUN_DBG2(pipe), UNDERRUN_FRAME_LINE_COUNTERS_FROZEN);
if (log)
drm_err(display->drm,
"Pipe %c FIFO underrun info: frame count: %u, line count: %u\n",
pipe_name(pipe),
REG_FIELD_GET(UNDERRUN_PIPE_FRAME_COUNT_MASK, val),
REG_FIELD_GET(UNDERRUN_LINE_COUNT_MASK, val));
}
static void read_underrun_dbg_pkgc(struct intel_display *display, bool log)
{
u32 val = intel_de_read(display, GEN12_DCPR_STATUS_1);
if (!(val & XE3P_UNDERRUN_PKGC))
return;
/*
* Note: If there are multiple pipes enabled, only one of them will see
* XE3P_UNDERRUN_PKGC set.
*/
intel_de_write(display, GEN12_DCPR_STATUS_1, XE3P_UNDERRUN_PKGC);
if (log)
drm_err(display->drm,
"General FIFO underrun info: Package C-state blocking memory\n");
}
static void read_underrun_dbg_info(struct intel_display *display,
enum pipe pipe,
bool log)
{
if (!HAS_UNDERRUN_DBG_INFO(display))
return;
read_underrun_dbg1(display, pipe, log);
read_underrun_dbg2(display, pipe, log);
intel_fbc_read_underrun_dbg_info(display, pipe, log);
read_underrun_dbg_pkgc(display, log);
}
static bool ivb_can_enable_err_int(struct intel_display *display)
{
struct intel_crtc *crtc;
@ -262,6 +358,17 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct intel_display *displa
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
/*
* The debug bits get latched at the time of the FIFO underrun ISR bit
* getting set. That means that any non-zero debug bit that is read when
* handling a FIFO underrun interrupt has the potential to belong to
* another underrun event (past or future). To alleviate this problem,
* let's clear existing bits before enabling the interrupt, so that at
* least we don't get information that is too out-of-date.
*/
if (enable && !old)
read_underrun_dbg_info(display, pipe, false);
if (HAS_GMCH(display))
i9xx_set_fifo_underrun_reporting(display, pipe, enable, old);
else if (display->platform.ironlake || display->platform.sandybridge)
@ -379,6 +486,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct intel_display *display,
trace_intel_cpu_fifo_underrun(display, pipe);
drm_err(display->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe));
read_underrun_dbg_info(display, pipe, true);
}
intel_fbc_handle_fifo_underrun_irq(display);

View file

@ -35,8 +35,6 @@
#include <drm/drm_print.h>
#include <drm/display/drm_hdcp_helper.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_regs.h"
@ -44,6 +42,7 @@
#include "intel_display_wa.h"
#include "intel_gmbus.h"
#include "intel_gmbus_regs.h"
#include "intel_parent.h"
struct intel_gmbus {
struct i2c_adapter adapter;
@ -391,12 +390,11 @@ intel_gpio_setup(struct intel_gmbus *bus, i915_reg_t gpio_reg)
static bool has_gmbus_irq(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
/*
* encoder->shutdown() may want to use GMBUS
* after irqs have already been disabled.
*/
return HAS_GMBUS_IRQ(display) && intel_irqs_enabled(i915);
return HAS_GMBUS_IRQ(display) && intel_parent_irq_enabled(display);
}
static int gmbus_wait(struct intel_display *display, u32 status, u32 irq_en)
@ -791,7 +789,7 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct intel_display *display = bus->display;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
int ret;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);
@ -831,7 +829,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
.buf = buf,
}
};
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
int ret;
wakeref = intel_display_power_get(display, POWER_DOMAIN_GMBUS);

View file

@ -29,10 +29,10 @@
#include "intel_display_types.h"
#include "intel_dp_mst.h"
#include "intel_hdcp.h"
#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_parent.h"
#include "intel_pcode.h"
#include "intel_step.h"
@ -258,7 +258,7 @@ static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
/* If MTL+ make sure gsc is loaded and proxy is setup */
if (USE_HDCP_GSC(display)) {
if (!intel_hdcp_gsc_check_status(display->drm))
if (!intel_parent_hdcp_gsc_check_status(display))
return false;
}

View file

@ -1,22 +0,0 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __INTEL_HDCP_GSC_H__
#define __INTEL_HDCP_GSC_H__
#include <linux/types.h>
struct drm_device;
struct intel_hdcp_gsc_context;
ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
void *msg_in, size_t msg_in_len,
void *msg_out, size_t msg_out_len);
bool intel_hdcp_gsc_check_status(struct drm_device *drm);
struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm);
void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context);
#endif /* __INTEL_HDCP_GCS_H__ */

View file

@ -10,8 +10,8 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_hdcp_gsc.h"
#include "intel_hdcp_gsc_message.h"
#include "intel_parent.h"
static int
intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
@ -44,10 +44,9 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
session_init_in.protocol = data->protocol;
byte = intel_hdcp_gsc_msg_send(gsc_context, &session_init_in,
sizeof(session_init_in),
&session_init_out,
sizeof(session_init_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&session_init_in, sizeof(session_init_in),
&session_init_out, sizeof(session_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -106,10 +105,9 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN);
memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN);
byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_rxcert_in,
sizeof(verify_rxcert_in),
&verify_rxcert_out,
sizeof(verify_rxcert_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&verify_rxcert_in, sizeof(verify_rxcert_in),
&verify_rxcert_out, sizeof(verify_rxcert_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte);
return byte;
@ -169,10 +167,9 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
memcpy(send_hprime_in.h_prime, rx_hprime->h_prime,
HDCP_2_2_H_PRIME_LEN);
byte = intel_hdcp_gsc_msg_send(gsc_context, &send_hprime_in,
sizeof(send_hprime_in),
&send_hprime_out,
sizeof(send_hprime_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&send_hprime_in, sizeof(send_hprime_in),
&send_hprime_out, sizeof(send_hprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -220,10 +217,9 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km,
HDCP_2_2_E_KH_KM_LEN);
byte = intel_hdcp_gsc_msg_send(gsc_context, &pairing_info_in,
sizeof(pairing_info_in),
&pairing_info_out,
sizeof(pairing_info_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&pairing_info_in, sizeof(pairing_info_in),
&pairing_info_out, sizeof(pairing_info_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -269,8 +265,9 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
lc_init_in.port.physical_port = (u8)data->hdcp_ddi;
lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = intel_hdcp_gsc_msg_send(gsc_context, &lc_init_in, sizeof(lc_init_in),
&lc_init_out, sizeof(lc_init_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&lc_init_in, sizeof(lc_init_in),
&lc_init_out, sizeof(lc_init_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -321,10 +318,9 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime,
HDCP_2_2_L_PRIME_LEN);
byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_lprime_in,
sizeof(verify_lprime_in),
&verify_lprime_out,
sizeof(verify_lprime_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&verify_lprime_in, sizeof(verify_lprime_in),
&verify_lprime_out, sizeof(verify_lprime_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -370,8 +366,9 @@ intel_hdcp_gsc_get_session_key(struct device *dev,
get_skey_in.port.physical_port = (u8)data->hdcp_ddi;
get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = intel_hdcp_gsc_msg_send(gsc_context, &get_skey_in, sizeof(get_skey_in),
&get_skey_out, sizeof(get_skey_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&get_skey_in, sizeof(get_skey_in),
&get_skey_out, sizeof(get_skey_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -434,10 +431,9 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids,
HDCP_2_2_RECEIVER_IDS_MAX_LEN);
byte = intel_hdcp_gsc_msg_send(gsc_context, &verify_repeater_in,
sizeof(verify_repeater_in),
&verify_repeater_out,
sizeof(verify_repeater_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&verify_repeater_in, sizeof(verify_repeater_in),
&verify_repeater_out, sizeof(verify_repeater_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -504,9 +500,9 @@ intel_hdcp_gsc_verify_mprime(struct device *dev,
verify_mprime_in->k = cpu_to_be16(data->k);
byte = intel_hdcp_gsc_msg_send(gsc_context, verify_mprime_in, cmd_size,
&verify_mprime_out,
sizeof(verify_mprime_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
verify_mprime_in, cmd_size,
&verify_mprime_out, sizeof(verify_mprime_out));
kfree(verify_mprime_in);
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
@ -552,10 +548,9 @@ static int intel_hdcp_gsc_enable_authentication(struct device *dev,
enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
enable_auth_in.stream_type = data->streams[0].stream_type;
byte = intel_hdcp_gsc_msg_send(gsc_context, &enable_auth_in,
sizeof(enable_auth_in),
&enable_auth_out,
sizeof(enable_auth_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&enable_auth_in, sizeof(enable_auth_in),
&enable_auth_out, sizeof(enable_auth_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -599,10 +594,9 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
session_close_in.port.physical_port = (u8)data->hdcp_ddi;
session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder;
byte = intel_hdcp_gsc_msg_send(gsc_context, &session_close_in,
sizeof(session_close_in),
&session_close_out,
sizeof(session_close_out));
byte = intel_parent_hdcp_gsc_msg_send(display, gsc_context,
&session_close_in, sizeof(session_close_in),
&session_close_out, sizeof(session_close_out));
if (byte < 0) {
drm_dbg_kms(display->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte);
return byte;
@ -645,7 +639,7 @@ int intel_hdcp_gsc_init(struct intel_display *display)
mutex_lock(&display->hdcp.hdcp_mutex);
gsc_context = intel_hdcp_gsc_context_alloc(display->drm);
gsc_context = intel_parent_hdcp_gsc_context_alloc(display);
if (IS_ERR(gsc_context)) {
ret = PTR_ERR(gsc_context);
kfree(arbiter);
@ -665,7 +659,7 @@ out:
void intel_hdcp_gsc_fini(struct intel_display *display)
{
intel_hdcp_gsc_context_free(display->hdcp.gsc_context);
intel_parent_hdcp_gsc_context_free(display, display->hdcp.gsc_context);
display->hdcp.gsc_context = NULL;
kfree(display->hdcp.arbiter);
display->hdcp.arbiter = NULL;

View file

@ -2518,7 +2518,7 @@ intel_hdmi_set_edid(struct drm_connector *_connector)
struct intel_display *display = to_intel_display(connector);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct i2c_adapter *ddc = connector->base.ddc;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
const struct drm_edid *drm_edid;
bool connected = false;
@ -2561,7 +2561,7 @@ intel_hdmi_detect(struct drm_connector *_connector, bool force)
enum drm_connector_status status = connector_status_disconnected;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.base.id, connector->base.name);

View file

@ -27,8 +27,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_connector.h"
#include "intel_display_core.h"
#include "intel_display_power.h"
@ -39,6 +37,7 @@
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
#include "intel_parent.h"
/**
* DOC: Hotplug
@ -786,7 +785,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
container_of(work, typeof(*display), hotplug.poll_init_work);
struct drm_connector_list_iter conn_iter;
struct intel_connector *connector;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool enabled;
mutex_lock(&display->drm->mode_config.mutex);
@ -1177,13 +1176,12 @@ bool intel_hpd_schedule_detection(struct intel_display *display)
static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
{
struct intel_display *display = m->private;
struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_hotplug *hotplug = &display->hotplug;
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
intel_synchronize_irq(dev_priv);
intel_parent_irq_synchronize(display);
flush_work(&display->hotplug.dig_port_work);
flush_delayed_work(&display->hotplug.hotplug_work);

View file

@ -519,12 +519,9 @@ void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir)
{
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
u32 trigger_aux = iir & xelpdp_pica_aux_mask(display);
u32 pin_mask = 0, long_mask = 0;
if (DISPLAY_VER(display) >= 20)
trigger_aux |= iir & XE2LPD_AUX_DDI_MASK;
for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) {
u32 val;

View file

@ -71,7 +71,6 @@
#include <drm/drm_print.h>
#include <drm/intel/intel_lpe_audio.h>
#include "i915_irq.h"
#include "intel_audio_regs.h"
#include "intel_de.h"
#include "intel_lpe_audio.h"

View file

@ -1324,11 +1324,11 @@ intel_lt_phy_config_changed(struct intel_encoder *encoder,
return true;
}
static intel_wakeref_t intel_lt_phy_transaction_begin(struct intel_encoder *encoder)
static struct ref_tracker *intel_lt_phy_transaction_begin(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
intel_psr_pause(intel_dp);
wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
@ -1336,7 +1336,7 @@ static intel_wakeref_t intel_lt_phy_transaction_begin(struct intel_encoder *enco
return wakeref;
}
static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, struct ref_tracker *wakeref)
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@ -1932,7 +1932,7 @@ void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
enum port port = encoder->port;
intel_wakeref_t wakeref = 0;
struct ref_tracker *wakeref = 0;
u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
@ -2060,7 +2060,7 @@ void intel_lt_phy_pll_disable(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
enum port port = encoder->port;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
? (XELPDP_LANE_PIPE_RESET(0) |
@ -2137,7 +2137,7 @@ void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
const struct intel_ddi_buf_trans *trans;
u8 owned_lane_mask;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
int n_entries, ln;
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@ -2222,7 +2222,7 @@ void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
{
u8 owned_lane_mask;
u8 lane;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
int i, j, k;
pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder));
@ -2310,7 +2310,7 @@ void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (intel_tc_port_in_tbt_alt_mode(dig_port))
intel_mtl_tbt_pll_enable(encoder, crtc_state);
intel_mtl_tbt_pll_enable_clock(encoder, crtc_state->port_clock);
else
intel_lt_phy_pll_enable(encoder, crtc_state);
}
@ -2320,7 +2320,7 @@ void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (intel_tc_port_in_tbt_alt_mode(dig_port))
intel_mtl_tbt_pll_disable(encoder);
intel_mtl_tbt_pll_disable_clock(encoder);
else
intel_lt_phy_pll_disable(encoder);

View file

@ -42,6 +42,4 @@ void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_xe3plpd_pll_disable(struct intel_encoder *encoder);
#define HAS_LT_PHY(display) (DISPLAY_VER(display) >= 35)
#endif /* __INTEL_LT_PHY_H__ */

View file

@ -105,7 +105,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
wakeref = intel_display_power_get_if_enabled(display, encoder->power_domain);

View file

@ -940,7 +940,7 @@ void intel_modeset_setup_hw_state(struct intel_display *display,
{
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);

View file

@ -246,7 +246,6 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
verify_crtc_state(state, crtc);
intel_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
intel_cx0pll_state_verify(state, crtc);
intel_lt_phy_pll_state_verify(state, crtc);
}

View file

@ -1,27 +0,0 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
#include <drm/drm_panic.h>
#include "gem/i915_gem_object.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_panic.h"
struct intel_panic *intel_panic_alloc(void)
{
return i915_gem_object_alloc_panic();
}
int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
{
struct intel_framebuffer *fb = sb->private;
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
return i915_gem_object_panic_setup(panic, sb, obj, fb->panic_tiling);
}
void intel_panic_finish(struct intel_panic *panic)
{
return i915_gem_object_panic_finish(panic);
}

View file

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: MIT */
/* Copyright © 2025 Intel Corporation */
#ifndef __INTEL_PANIC_H__
#define __INTEL_PANIC_H__
struct drm_scanout_buffer;
struct intel_panic;
struct intel_panic *intel_panic_alloc(void);
int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb);
void intel_panic_finish(struct intel_panic *panic);
#endif /* __INTEL_PANIC_H__ */

View file

@ -0,0 +1,214 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
/*
* Convenience wrapper functions to call the parent interface functions:
*
* - display->parent->SUBSTRUCT->FUNCTION()
* - display->parent->FUNCTION()
*
* All functions here should be named accordingly:
*
* - intel_parent_SUBSTRUCT_FUNCTION()
* - intel_parent_FUNCTION()
*
* These functions may use display driver specific types for parameters and
* return values, translating them to and from the generic types used in the
* function pointer interface.
*/
#include <drm/drm_print.h>
#include <drm/intel/display_parent_interface.h>
#include "intel_display_core.h"
#include "intel_parent.h"
/* hdcp */
ssize_t intel_parent_hdcp_gsc_msg_send(struct intel_display *display,
struct intel_hdcp_gsc_context *gsc_context,
void *msg_in, size_t msg_in_len,
void *msg_out, size_t msg_out_len)
{
return display->parent->hdcp->gsc_msg_send(gsc_context, msg_in, msg_in_len, msg_out, msg_out_len);
}
bool intel_parent_hdcp_gsc_check_status(struct intel_display *display)
{
return display->parent->hdcp->gsc_check_status(display->drm);
}
struct intel_hdcp_gsc_context *intel_parent_hdcp_gsc_context_alloc(struct intel_display *display)
{
return display->parent->hdcp->gsc_context_alloc(display->drm);
}
void intel_parent_hdcp_gsc_context_free(struct intel_display *display,
struct intel_hdcp_gsc_context *gsc_context)
{
display->parent->hdcp->gsc_context_free(gsc_context);
}
/* irq */
bool intel_parent_irq_enabled(struct intel_display *display)
{
return display->parent->irq->enabled(display->drm);
}
void intel_parent_irq_synchronize(struct intel_display *display)
{
display->parent->irq->synchronize(display->drm);
}
/* panic */
struct intel_panic *intel_parent_panic_alloc(struct intel_display *display)
{
return display->parent->panic->alloc();
}
int intel_parent_panic_setup(struct intel_display *display, struct intel_panic *panic, struct drm_scanout_buffer *sb)
{
return display->parent->panic->setup(panic, sb);
}
void intel_parent_panic_finish(struct intel_display *display, struct intel_panic *panic)
{
display->parent->panic->finish(panic);
}
/* pc8 */
void intel_parent_pc8_block(struct intel_display *display)
{
if (drm_WARN_ON_ONCE(display->drm, !display->parent->pc8))
return;
display->parent->pc8->block(display->drm);
}
void intel_parent_pc8_unblock(struct intel_display *display)
{
if (drm_WARN_ON_ONCE(display->drm, !display->parent->pc8))
return;
display->parent->pc8->unblock(display->drm);
}
/* rps */
bool intel_parent_rps_available(struct intel_display *display)
{
return display->parent->rps;
}
void intel_parent_rps_boost_if_not_started(struct intel_display *display, struct dma_fence *fence)
{
if (display->parent->rps)
display->parent->rps->boost_if_not_started(fence);
}
void intel_parent_rps_mark_interactive(struct intel_display *display, bool interactive)
{
if (display->parent->rps)
display->parent->rps->mark_interactive(display->drm, interactive);
}
void intel_parent_rps_ilk_irq_handler(struct intel_display *display)
{
if (display->parent->rps)
display->parent->rps->ilk_irq_handler(display->drm);
}
/* stolen */
int intel_parent_stolen_insert_node_in_range(struct intel_display *display,
struct intel_stolen_node *node, u64 size,
unsigned int align, u64 start, u64 end)
{
return display->parent->stolen->insert_node_in_range(node, size, align, start, end);
}
int intel_parent_stolen_insert_node(struct intel_display *display, struct intel_stolen_node *node, u64 size,
unsigned int align)
{
if (drm_WARN_ON_ONCE(display->drm, !display->parent->stolen->insert_node))
return -ENODEV;
return display->parent->stolen->insert_node(node, size, align);
}
void intel_parent_stolen_remove_node(struct intel_display *display,
struct intel_stolen_node *node)
{
display->parent->stolen->remove_node(node);
}
bool intel_parent_stolen_initialized(struct intel_display *display)
{
return display->parent->stolen->initialized(display->drm);
}
bool intel_parent_stolen_node_allocated(struct intel_display *display,
const struct intel_stolen_node *node)
{
return display->parent->stolen->node_allocated(node);
}
u32 intel_parent_stolen_node_offset(struct intel_display *display, struct intel_stolen_node *node)
{
return display->parent->stolen->node_offset(node);
}
u64 intel_parent_stolen_area_address(struct intel_display *display)
{
if (drm_WARN_ON_ONCE(display->drm, !display->parent->stolen->area_address))
return 0;
return display->parent->stolen->area_address(display->drm);
}
u64 intel_parent_stolen_area_size(struct intel_display *display)
{
if (drm_WARN_ON_ONCE(display->drm, !display->parent->stolen->area_size))
return 0;
return display->parent->stolen->area_size(display->drm);
}
u64 intel_parent_stolen_node_address(struct intel_display *display, struct intel_stolen_node *node)
{
return display->parent->stolen->node_address(node);
}
u64 intel_parent_stolen_node_size(struct intel_display *display, const struct intel_stolen_node *node)
{
return display->parent->stolen->node_size(node);
}
struct intel_stolen_node *intel_parent_stolen_node_alloc(struct intel_display *display)
{
return display->parent->stolen->node_alloc(display->drm);
}
void intel_parent_stolen_node_free(struct intel_display *display, const struct intel_stolen_node *node)
{
display->parent->stolen->node_free(node);
}
/* generic */
void intel_parent_fence_priority_display(struct intel_display *display, struct dma_fence *fence)
{
if (display->parent->fence_priority_display)
display->parent->fence_priority_display(fence);
}
bool intel_parent_has_auxccs(struct intel_display *display)
{
return display->parent->has_auxccs && display->parent->has_auxccs(display->drm);
}
bool intel_parent_has_fenced_regions(struct intel_display *display)
{
return display->parent->has_fenced_regions && display->parent->has_fenced_regions(display->drm);
}
bool intel_parent_vgpu_active(struct intel_display *display)
{
return display->parent->vgpu_active && display->parent->vgpu_active(display->drm);
}

View file

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: MIT */
/* Copyright © 2025 Intel Corporation */
#ifndef __INTEL_PARENT_H__
#define __INTEL_PARENT_H__
#include <linux/types.h>
struct dma_fence;
struct drm_scanout_buffer;
struct intel_display;
struct intel_hdcp_gsc_context;
struct intel_panic;
struct intel_stolen_node;
/* hdcp */
ssize_t intel_parent_hdcp_gsc_msg_send(struct intel_display *display,
struct intel_hdcp_gsc_context *gsc_context,
void *msg_in, size_t msg_in_len,
void *msg_out, size_t msg_out_len);
bool intel_parent_hdcp_gsc_check_status(struct intel_display *display);
struct intel_hdcp_gsc_context *intel_parent_hdcp_gsc_context_alloc(struct intel_display *display);
void intel_parent_hdcp_gsc_context_free(struct intel_display *display,
struct intel_hdcp_gsc_context *gsc_context);
/* irq */
bool intel_parent_irq_enabled(struct intel_display *display);
void intel_parent_irq_synchronize(struct intel_display *display);
/* panic */
struct intel_panic *intel_parent_panic_alloc(struct intel_display *display);
int intel_parent_panic_setup(struct intel_display *display, struct intel_panic *panic, struct drm_scanout_buffer *sb);
void intel_parent_panic_finish(struct intel_display *display, struct intel_panic *panic);
/* pc8 */
void intel_parent_pc8_block(struct intel_display *display);
void intel_parent_pc8_unblock(struct intel_display *display);
/* rps */
bool intel_parent_rps_available(struct intel_display *display);
void intel_parent_rps_boost_if_not_started(struct intel_display *display, struct dma_fence *fence);
void intel_parent_rps_mark_interactive(struct intel_display *display, bool interactive);
void intel_parent_rps_ilk_irq_handler(struct intel_display *display);
/* stolen */
int intel_parent_stolen_insert_node_in_range(struct intel_display *display,
struct intel_stolen_node *node, u64 size,
unsigned int align, u64 start, u64 end);
int intel_parent_stolen_insert_node(struct intel_display *display, struct intel_stolen_node *node, u64 size,
unsigned int align);
void intel_parent_stolen_remove_node(struct intel_display *display,
struct intel_stolen_node *node);
bool intel_parent_stolen_initialized(struct intel_display *display);
bool intel_parent_stolen_node_allocated(struct intel_display *display,
const struct intel_stolen_node *node);
u32 intel_parent_stolen_node_offset(struct intel_display *display, struct intel_stolen_node *node);
u64 intel_parent_stolen_area_address(struct intel_display *display);
u64 intel_parent_stolen_area_size(struct intel_display *display);
u64 intel_parent_stolen_node_address(struct intel_display *display, struct intel_stolen_node *node);
u64 intel_parent_stolen_node_size(struct intel_display *display, const struct intel_stolen_node *node);
struct intel_stolen_node *intel_parent_stolen_node_alloc(struct intel_display *display);
void intel_parent_stolen_node_free(struct intel_display *display, const struct intel_stolen_node *node);
/* generic */
bool intel_parent_has_auxccs(struct intel_display *display);
bool intel_parent_has_fenced_regions(struct intel_display *display);
bool intel_parent_vgpu_active(struct intel_display *display);
void intel_parent_fence_priority_display(struct intel_display *display, struct dma_fence *fence);
#endif /* __INTEL_PARENT_H__ */

View file

@ -30,13 +30,12 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_atomic.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_parent.h"
#include "intel_pipe_crc.h"
#include "intel_pipe_crc_regs.h"
@ -589,7 +588,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *_crtc, const char *source_name)
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
bool enable;
@ -658,7 +657,6 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
enum pipe pipe = crtc->pipe;
@ -669,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
intel_de_write(display, PIPE_CRC_CTL(display, pipe), 0);
intel_de_posting_read(display, PIPE_CRC_CTL(display, pipe));
intel_synchronize_irq(dev_priv);
intel_parent_irq_synchronize(display);
}

View file

@ -45,7 +45,6 @@
#include <drm/drm_panic.h>
#include <drm/drm_print.h>
#include "gem/i915_gem_object.h"
#include "i9xx_plane_regs.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
@ -56,7 +55,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
#include "intel_panic.h"
#include "intel_parent.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "skl_scaler.h"
@ -179,25 +178,29 @@ bool intel_plane_needs_physical(struct intel_plane *plane)
DISPLAY_INFO(display)->cursor_needs_physical;
}
bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
bool intel_plane_can_async_flip(struct intel_plane *plane,
const struct drm_format_info *info,
u64 modifier)
{
if (intel_format_info_is_yuv_semiplanar(drm_format_info(format), modifier) ||
format == DRM_FORMAT_C8)
if (intel_format_info_is_yuv_semiplanar(info, modifier) ||
info->format == DRM_FORMAT_C8)
return false;
return plane->can_async_flip && plane->can_async_flip(modifier);
}
bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
u32 format,
u64 modifier)
bool intel_plane_format_mod_supported_async(struct drm_plane *_plane,
u32 format, u64 modifier)
{
if (!plane->funcs->format_mod_supported(plane, format, modifier))
struct intel_plane *plane = to_intel_plane(_plane);
const struct drm_format_info *info;
if (!plane->base.funcs->format_mod_supported(&plane->base, format, modifier))
return false;
return intel_plane_can_async_flip(to_intel_plane(plane),
format, modifier);
info = drm_get_format_info(plane->base.dev, format, modifier);
return intel_plane_can_async_flip(plane, info, modifier);
}
unsigned int intel_adjusted_rate(const struct drm_rect *src,
@ -651,11 +654,10 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
ilk_must_disable_cxsr(new_crtc_state, old_plane_state, new_plane_state))
new_crtc_state->disable_cxsr = true;
if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) {
if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
new_crtc_state->do_async_flip = true;
new_crtc_state->async_flip_planes |= BIT(plane->id);
} else if (plane->need_async_flip_toggle_wa &&
new_crtc_state->uapi.async_flip) {
if (new_crtc_state->uapi.async_flip) {
/*
* On platforms with double buffered async flip bit we
* set the bit already one frame early during the sync
@ -663,6 +665,9 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr
* hardware will therefore be ready to perform a real
* async flip during the next commit, without having
* to wait yet another frame for the bit to latch.
*
* async_flip_planes bitmask is also used by selective
* fetch calculation to choose full frame update.
*/
new_crtc_state->async_flip_planes |= BIT(plane->id);
}
@ -1235,8 +1240,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
goto unpin_fb;
if (new_plane_state->uapi.fence) {
i915_gem_fence_wait_priority_display(new_plane_state->uapi.fence);
intel_parent_fence_priority_display(display, new_plane_state->uapi.fence);
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence);
}
@ -1330,33 +1334,33 @@ static unsigned int intel_4tile_get_offset(unsigned int width, unsigned int x, u
return offset;
}
static void intel_panic_flush(struct drm_plane *plane)
static void intel_panic_flush(struct drm_plane *_plane)
{
struct intel_plane_state *plane_state = to_intel_plane_state(plane->state);
struct intel_crtc_state *crtc_state = to_intel_crtc_state(plane->state->crtc->state);
struct intel_plane *iplane = to_intel_plane(plane);
struct intel_display *display = to_intel_display(iplane);
struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_display *display = to_intel_display(plane);
const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
const struct intel_framebuffer *fb = to_intel_framebuffer(plane_state->hw.fb);
intel_panic_finish(intel_fb->panic);
intel_parent_panic_finish(display, fb->panic);
if (crtc_state->enable_psr2_sel_fetch) {
/* Force a full update for psr2 */
intel_psr2_panic_force_full_update(display, crtc_state);
intel_psr2_panic_force_full_update(crtc_state);
}
/* Flush the cache and don't disable tiling if it's the fbdev framebuffer.*/
if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
if (fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
struct iosys_map map;
intel_fbdev_get_map(display->fbdev.fbdev, &map);
drm_clflush_virt_range(map.vaddr, fb->pitches[0] * fb->height);
drm_clflush_virt_range(map.vaddr, fb->base.pitches[0] * fb->base.height);
return;
}
if (fb->modifier && iplane->disable_tiling)
iplane->disable_tiling(iplane);
if (fb->base.modifier != DRM_FORMAT_MOD_LINEAR && plane->disable_tiling)
plane->disable_tiling(plane);
}
static unsigned int (*intel_get_tiling_func(u64 fb_modifier))(unsigned int width,
@ -1394,45 +1398,43 @@ static int intel_get_scanout_buffer(struct drm_plane *plane,
{
struct intel_plane_state *plane_state;
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct intel_framebuffer *fb;
struct intel_display *display = to_intel_display(plane->dev);
if (!plane->state || !plane->state->fb || !plane->state->visible)
return -ENODEV;
plane_state = to_intel_plane_state(plane->state);
fb = plane_state->hw.fb;
intel_fb = to_intel_framebuffer(fb);
fb = to_intel_framebuffer(plane_state->hw.fb);
obj = intel_fb_bo(fb);
obj = intel_fb_bo(&fb->base);
if (!obj)
return -ENODEV;
if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
if (fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) {
intel_fbdev_get_map(display->fbdev.fbdev, &sb->map[0]);
} else {
int ret;
/* Can't disable tiling if DPT is in use */
if (intel_fb_uses_dpt(fb)) {
if (fb->format->cpp[0] != 4)
if (intel_fb_uses_dpt(&fb->base)) {
if (fb->base.format->cpp[0] != 4)
return -EOPNOTSUPP;
intel_fb->panic_tiling = intel_get_tiling_func(fb->modifier);
if (!intel_fb->panic_tiling)
fb->panic_tiling = intel_get_tiling_func(fb->base.modifier);
if (!fb->panic_tiling)
return -EOPNOTSUPP;
}
sb->private = intel_fb;
ret = intel_panic_setup(intel_fb->panic, sb);
sb->private = fb;
ret = intel_parent_panic_setup(display, fb->panic, sb);
if (ret)
return ret;
}
sb->width = fb->width;
sb->height = fb->height;
sb->width = fb->base.width;
sb->height = fb->base.height;
/* Use the generic linear format, because tiling, RC, CCS, CC
* will be disabled in disable_tiling()
*/
sb->format = drm_format_info(fb->format->format);
sb->pitch[0] = fb->pitches[0];
sb->format = drm_format_info(fb->base.format->format);
sb->pitch[0] = fb->base.pitches[0];
return 0;
}
@ -1464,7 +1466,7 @@ void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_sta
old_plane_state->ggtt_vma == new_plane_state->ggtt_vma)
return;
drm_vblank_work_init(&old_plane_state->unpin_work, old_plane_state->uapi.crtc,
drm_vblank_work_init(&old_plane_state->unpin_work, old_plane_state->hw.crtc,
intel_cursor_unpin_work);
}

View file

@ -8,6 +8,7 @@
#include <linux/types.h>
struct drm_format_info;
struct drm_plane;
struct drm_property;
struct drm_rect;
@ -21,7 +22,8 @@ enum plane_id;
struct intel_plane *
intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id);
bool intel_plane_can_async_flip(struct intel_plane *plane, u32 format,
bool intel_plane_can_async_flip(struct intel_plane *plane,
const struct drm_format_info *info,
u64 modifier);
unsigned int intel_adjusted_rate(const struct drm_rect *src,
const struct drm_rect *dst,

View file

@ -39,7 +39,7 @@ intel_reuse_initial_plane_obj(struct intel_crtc *this,
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (!crtc_state->uapi.active)
if (!crtc_state->hw.active)
continue;
if (!plane_state->ggtt_vma)
@ -411,10 +411,12 @@ void intel_initial_plane_config(struct intel_display *display)
struct intel_crtc *crtc;
for_each_intel_crtc(display->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
if (!crtc_state->hw.active)
continue;
/*

View file

@ -67,10 +67,10 @@ static const char *pps_name(struct intel_dp *intel_dp)
return "PPS <invalid>";
}
intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
struct ref_tracker *intel_pps_lock(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
/*
* See vlv_pps_reset_all() why we need a power domain reference here.
@ -81,8 +81,7 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
return wakeref;
}
intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
intel_wakeref_t wakeref)
struct ref_tracker *intel_pps_unlock(struct intel_dp *intel_dp, struct ref_tracker *wakeref)
{
struct intel_display *display = to_intel_display(intel_dp);
@ -697,12 +696,10 @@ static void wait_panel_power_cycle(struct intel_dp *intel_dp)
void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
wait_panel_power_cycle(intel_dp);
}
@ -811,14 +808,13 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
void intel_pps_vdd_on(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
bool vdd;
if (!intel_dp_is_edp(intel_dp))
return;
vdd = false;
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
vdd = intel_pps_vdd_on_unlocked(intel_dp);
INTEL_DISPLAY_STATE_WARN(display, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
@ -873,8 +869,6 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
@ -883,7 +877,7 @@ void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
* vdd might still be enabled due to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
intel_pps_vdd_off_sync_unlocked(intel_dp);
}
@ -892,9 +886,8 @@ static void edp_panel_vdd_work(struct work_struct *__work)
struct intel_pps *pps = container_of(to_delayed_work(__work),
struct intel_pps, panel_vdd_work);
struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
intel_wakeref_t wakeref;
with_intel_pps_lock(intel_dp, wakeref) {
with_intel_pps_lock(intel_dp) {
if (!intel_dp->pps.want_panel_vdd)
intel_pps_vdd_off_sync_unlocked(intel_dp);
}
@ -952,12 +945,10 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
void intel_pps_vdd_off(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
intel_pps_vdd_off_unlocked(intel_dp, false);
}
@ -1026,12 +1017,10 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
void intel_pps_on(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
intel_pps_on_unlocked(intel_dp);
}
@ -1082,12 +1071,10 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
void intel_pps_off(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
intel_pps_off_unlocked(intel_dp);
}
@ -1095,7 +1082,6 @@ void intel_pps_off(struct intel_dp *intel_dp)
void intel_pps_backlight_on(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
/*
* If we enable the backlight right away following a panel power
@ -1105,7 +1091,7 @@ void intel_pps_backlight_on(struct intel_dp *intel_dp)
*/
wait_backlight_on(intel_dp);
with_intel_pps_lock(intel_dp, wakeref) {
with_intel_pps_lock(intel_dp) {
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
@ -1121,12 +1107,11 @@ void intel_pps_backlight_on(struct intel_dp *intel_dp)
void intel_pps_backlight_off(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
with_intel_pps_lock(intel_dp, wakeref) {
with_intel_pps_lock(intel_dp) {
i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
u32 pp;
@ -1149,11 +1134,10 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
{
struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
is_enabled = false;
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
if (is_enabled == enable)
return;
@ -1251,9 +1235,7 @@ void vlv_pps_pipe_init(struct intel_dp *intel_dp)
/* Call on all DP, not just eDP */
void vlv_pps_pipe_reset(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
intel_dp->pps.vlv_active_pipe = vlv_active_pipe(intel_dp);
}
@ -1329,9 +1311,7 @@ void vlv_pps_port_disable(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_wakeref_t wakeref;
with_intel_pps_lock(intel_dp, wakeref)
with_intel_pps_lock(intel_dp)
intel_dp->pps.vlv_active_pipe = INVALID_PIPE;
}
@ -1362,10 +1342,9 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
bool have_power = false;
with_intel_pps_lock(intel_dp, wakeref) {
with_intel_pps_lock(intel_dp) {
have_power = edp_have_panel_power(intel_dp) ||
edp_have_panel_vdd(intel_dp);
}
@ -1692,12 +1671,11 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
void intel_pps_encoder_reset(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
with_intel_pps_lock(intel_dp, wakeref) {
with_intel_pps_lock(intel_dp) {
/*
* Reinit the power sequencer also on the resume path, in case
* BIOS did something nasty with it.
@ -1716,7 +1694,6 @@ void intel_pps_encoder_reset(struct intel_dp *intel_dp)
bool intel_pps_init(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
bool ret;
intel_dp->pps.initializing = true;
@ -1724,7 +1701,7 @@ bool intel_pps_init(struct intel_dp *intel_dp)
pps_init_timestamps(intel_dp);
with_intel_pps_lock(intel_dp, wakeref) {
with_intel_pps_lock(intel_dp) {
ret = pps_initial_setup(intel_dp);
pps_init_delays(intel_dp);
@ -1760,9 +1737,7 @@ static void pps_init_late(struct intel_dp *intel_dp)
void intel_pps_init_late(struct intel_dp *intel_dp)
{
intel_wakeref_t wakeref;
with_intel_pps_lock(intel_dp, wakeref) {
with_intel_pps_lock(intel_dp) {
/* Reinit delays after per-panel info has been parsed from VBT */
pps_init_late(intel_dp);

View file

@ -8,20 +8,22 @@
#include <linux/types.h>
#include "intel_wakeref.h"
enum pipe;
struct intel_connector;
struct intel_crtc_state;
struct intel_display;
struct intel_dp;
struct intel_encoder;
struct ref_tracker;
intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp);
intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref);
struct ref_tracker *intel_pps_lock(struct intel_dp *intel_dp);
struct ref_tracker *intel_pps_unlock(struct intel_dp *intel_dp, struct ref_tracker *wakeref);
#define with_intel_pps_lock(dp, wf) \
for ((wf) = intel_pps_lock(dp); (wf); (wf) = intel_pps_unlock((dp), (wf)))
#define __with_intel_pps_lock(dp, wf) \
for (struct ref_tracker *(wf) = intel_pps_lock(dp); (wf); (wf) = intel_pps_unlock((dp), (wf)))
#define with_intel_pps_lock(dp) \
__with_intel_pps_lock((dp), __UNIQUE_ID(wakeref))
void intel_pps_backlight_on(struct intel_dp *intel_dp);
void intel_pps_backlight_off(struct intel_dp *intel_dp);

View file

@ -494,82 +494,37 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
return val;
}
static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
{
u8 su_capability = 0;
if (intel_dp->psr.sink_panel_replay_su_support) {
if (drm_dp_dpcd_read_byte(&intel_dp->aux,
DP_PANEL_REPLAY_CAP_CAPABILITY,
&su_capability) < 0)
return 0;
} else {
su_capability = intel_dp->psr_dpcd[1];
}
return su_capability;
}
static unsigned int
intel_dp_get_su_x_granularity_offset(struct intel_dp *intel_dp)
{
return intel_dp->psr.sink_panel_replay_su_support ?
DP_PANEL_REPLAY_CAP_X_GRANULARITY :
DP_PSR2_SU_X_GRANULARITY;
}
static unsigned int
intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
{
return intel_dp->psr.sink_panel_replay_su_support ?
DP_PANEL_REPLAY_CAP_Y_GRANULARITY :
DP_PSR2_SU_Y_GRANULARITY;
}
/*
* Note: Bits related to granularity are same in panel replay and psr
* registers. Rely on PSR definitions on these "common" bits.
*/
static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
static void _psr_compute_su_granularity(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
ssize_t r;
u16 w;
__le16 w;
u8 y;
/*
* TODO: Do we need to take into account panel supporting both PSR and
* Panel replay?
*/
/*
* If sink don't have specific granularity requirements set legacy
* ones.
*/
if (!(intel_dp_get_su_capability(intel_dp) &
DP_PSR2_SU_GRANULARITY_REQUIRED)) {
if (!(connector->dp.psr_caps.dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
/* As PSR2 HW sends full lines, we do not care about x granularity */
w = 4;
w = cpu_to_le16(4);
y = 4;
goto exit;
}
r = drm_dp_dpcd_read(&intel_dp->aux,
intel_dp_get_su_x_granularity_offset(intel_dp),
&w, 2);
if (r != 2)
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, sizeof(w));
if (r != sizeof(w))
drm_dbg_kms(display->drm,
"Unable to read selective update x granularity\n");
/*
* Spec says that if the value read is 0 the default granularity should
* be used instead.
*/
if (r != 2 || w == 0)
w = 4;
if (r != sizeof(w) || w == 0)
w = cpu_to_le16(4);
r = drm_dp_dpcd_read(&intel_dp->aux,
intel_dp_get_su_y_granularity_offset(intel_dp),
&y, 1);
r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
if (r != 1) {
drm_dbg_kms(display->drm,
"Unable to read selective update y granularity\n");
@ -579,17 +534,17 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
y = 1;
exit:
intel_dp->psr.su_w_granularity = w;
intel_dp->psr.su_y_granularity = y;
connector->dp.psr_caps.su_w_granularity = le16_to_cpu(w);
connector->dp.psr_caps.su_y_granularity = y;
}
static enum intel_panel_replay_dsc_support
compute_pr_dsc_support(struct intel_dp *intel_dp)
compute_pr_dsc_support(struct intel_connector *connector)
{
u8 pr_dsc_mode;
u8 val;
val = intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
val = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val);
switch (pr_dsc_mode) {
@ -621,7 +576,31 @@ static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_supp
};
}
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
static void _panel_replay_compute_su_granularity(struct intel_connector *connector)
{
u16 w;
u8 y;
if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED)) {
w = 4;
y = 4;
goto exit;
}
/*
* Spec says that if the value read is 0 the default granularity should
* be used instead.
*/
w = le16_to_cpu(*(__le16 *)&connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_X_GRANULARITY)]) ? : 4;
y = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_Y_GRANULARITY)] ? : 1;
exit:
connector->dp.panel_replay_caps.su_w_granularity = w;
connector->dp.panel_replay_caps.su_y_granularity = y;
}
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
int ret;
@ -631,11 +610,12 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
return;
ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
&intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
&connector->dp.panel_replay_caps.dpcd,
sizeof(connector->dp.panel_replay_caps.dpcd));
if (ret < 0)
return;
if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_SUPPORT))
return;
@ -646,7 +626,7 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
return;
}
if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
drm_dbg_kms(display->drm,
"Panel doesn't support early transport, eDP Panel Replay not possible\n");
@ -654,36 +634,40 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
}
}
connector->dp.panel_replay_caps.support = true;
intel_dp->psr.sink_panel_replay_support = true;
if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_SU_SUPPORT) {
connector->dp.panel_replay_caps.su_support = true;
intel_dp->psr.sink_panel_replay_dsc_support = compute_pr_dsc_support(intel_dp);
_panel_replay_compute_su_granularity(connector);
}
connector->dp.panel_replay_caps.dsc_support = compute_pr_dsc_support(connector);
drm_dbg_kms(display->drm,
"Panel replay %sis supported by panel (in DSC mode: %s)\n",
intel_dp->psr.sink_panel_replay_su_support ?
connector->dp.panel_replay_caps.su_support ?
"selective_update " : "",
panel_replay_dsc_support_str(intel_dp->psr.sink_panel_replay_dsc_support));
panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support));
}
static void _psr_init_dpcd(struct intel_dp *intel_dp)
static void _psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
int ret;
ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
sizeof(intel_dp->psr_dpcd));
ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, connector->dp.psr_caps.dpcd,
sizeof(connector->dp.psr_caps.dpcd));
if (ret < 0)
return;
if (!intel_dp->psr_dpcd[0])
if (!connector->dp.psr_caps.dpcd[0])
return;
drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
connector->dp.psr_caps.dpcd[0]);
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
drm_dbg_kms(display->drm,
@ -697,13 +681,14 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
return;
}
connector->dp.psr_caps.support = true;
intel_dp->psr.sink_support = true;
intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
connector->dp.psr_caps.sync_latency = intel_dp_get_sink_sync_latency(intel_dp);
if (DISPLAY_VER(display) >= 9 &&
intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
connector->dp.psr_caps.dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = connector->dp.psr_caps.dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
/*
@ -717,22 +702,21 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
* Y-coordinate requirement panels we would need to enable
* GTC first.
*/
intel_dp->psr.sink_psr2_support = y_req &&
connector->dp.psr_caps.su_support = y_req &&
intel_alpm_aux_wake_supported(intel_dp);
drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
intel_dp->psr.sink_psr2_support ? "" : "not ");
connector->dp.psr_caps.su_support ? "" : "not ");
}
if (connector->dp.psr_caps.su_support)
_psr_compute_su_granularity(intel_dp, connector);
}
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
void intel_psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
_psr_init_dpcd(intel_dp);
_psr_init_dpcd(intel_dp, connector);
_panel_replay_init_dpcd(intel_dp);
if (intel_dp->psr.sink_psr2_support ||
intel_dp->psr.sink_panel_replay_su_support)
intel_dp_get_su_granularity(intel_dp);
_panel_replay_init_dpcd(intel_dp, connector);
}
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
@ -772,8 +756,9 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
aux_ctl);
}
static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
static bool psr2_su_region_et_valid(struct intel_connector *connector, bool panel_replay)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_display *display = to_intel_display(intel_dp);
if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
@ -781,9 +766,9 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay
return false;
return panel_replay ?
intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
}
static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
@ -924,7 +909,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
* off-by-one issue that HW has in some cases.
*/
idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
idle_frames = max(idle_frames, connector->dp.psr_caps.sync_latency + 1);
if (drm_WARN_ON(display->drm, idle_frames > 0xf))
idle_frames = 0xf;
@ -1019,10 +1004,11 @@ static int psr2_block_count(struct intel_dp *intel_dp)
static u8 frames_before_su_entry(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
u8 frames_before_su_entry;
frames_before_su_entry = max_t(u8,
intel_dp->psr.sink_sync_latency + 1,
connector->dp.psr_caps.sync_latency + 1,
2);
/* Entry setup frames must be at least 1 less than frames before SU entry */
@ -1304,25 +1290,32 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return crtc_state->enable_psr2_sel_fetch = true;
}
static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
static bool psr2_granularity_check(struct intel_crtc_state *crtc_state,
struct intel_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
u16 y_granularity = 0;
u16 sink_y_granularity = crtc_state->has_panel_replay ?
connector->dp.panel_replay_caps.su_y_granularity :
connector->dp.psr_caps.su_y_granularity;
u16 sink_w_granularity = crtc_state->has_panel_replay ?
connector->dp.panel_replay_caps.su_w_granularity :
connector->dp.psr_caps.su_w_granularity;
/* PSR2 HW only send full lines so we only need to validate the width */
if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
if (crtc_hdisplay % sink_w_granularity)
return false;
if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
if (crtc_vdisplay % sink_y_granularity)
return false;
/* HW tracking is only aligned to 4 lines */
if (!crtc_state->enable_psr2_sel_fetch)
return intel_dp->psr.su_y_granularity == 4;
return sink_y_granularity == 4;
/*
* adl_p and mtl platforms have 1 line granularity.
@ -1330,11 +1323,11 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* to match sink requirement if multiple of 4.
*/
if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = sink_y_granularity;
else if (sink_y_granularity <= 2)
y_granularity = 4;
else if ((intel_dp->psr.su_y_granularity % 4) == 0)
y_granularity = intel_dp->psr.su_y_granularity;
else if ((sink_y_granularity % 4) == 0)
y_granularity = sink_y_granularity;
if (y_granularity == 0 || crtc_vdisplay % y_granularity)
return false;
@ -1372,16 +1365,18 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
}
static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state,
const struct drm_display_mode *adjusted_mode)
{
struct intel_display *display = to_intel_display(intel_dp);
int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
int psr_setup_time = drm_dp_psr_setup_time(connector->dp.psr_caps.dpcd);
int entry_setup_frames = 0;
if (psr_setup_time < 0) {
drm_dbg_kms(display->drm,
"PSR condition failed: Invalid PSR setup time (0x%02x)\n",
intel_dp->psr_dpcd[1]);
connector->dp.psr_caps.dpcd[1]);
return -ETIME;
}
@ -1522,14 +1517,16 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
}
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1)
if (!connector->dp.psr_caps.su_support || display->params.enable_psr == 1)
return false;
/* JSL and EHL only supports eDP 1.3 */
@ -1621,9 +1618,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return true;
}
static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
static bool intel_sel_update_config_valid(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_display *display = to_intel_display(intel_dp);
if (HAS_PSR2_SEL_FETCH(display) &&
@ -1640,7 +1639,8 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state))
if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state,
conn_state))
goto unsupported;
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
@ -1653,11 +1653,11 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
if (DISPLAY_VER(display) < 14)
goto unsupported;
if (!intel_dp->psr.sink_panel_replay_su_support)
if (!connector->dp.panel_replay_caps.su_support)
goto unsupported;
if (intel_dsc_enabled_on_link(crtc_state) &&
intel_dp->psr.sink_panel_replay_dsc_support !=
connector->dp.panel_replay_caps.dsc_support !=
INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) {
drm_dbg_kms(display->drm,
"Selective update with Panel Replay not enabled because it's not supported with DSC\n");
@ -1671,14 +1671,14 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
if (!psr2_granularity_check(crtc_state, connector)) {
drm_dbg_kms(display->drm,
"Selective update not enabled, SU granularity not compatible\n");
goto unsupported;
}
crtc_state->enable_psr2_su_region_et =
psr2_su_region_et_valid(intel_dp, crtc_state->has_panel_replay);
crtc_state->enable_psr2_su_region_et = psr2_su_region_et_valid(connector,
crtc_state->has_panel_replay);
return true;
@ -1688,7 +1688,8 @@ unsupported:
}
static bool _psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
@ -1703,7 +1704,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (crtc_state->vrr.enable)
return false;
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode);
if (entry_setup_frames >= 0) {
intel_dp->psr.entry_setup_frames = entry_setup_frames;
@ -1717,19 +1718,33 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
return true;
}
static bool
_panel_replay_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
static inline bool compute_link_off_after_as_sdp_when_pr_active(struct intel_connector *connector)
{
return (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP);
}
static inline bool compute_disable_as_sdp_when_pr_active(struct intel_connector *connector)
{
return !(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR);
}
static bool _panel_replay_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_display *display = to_intel_display(intel_dp);
struct intel_hdcp *hdcp = &connector->hdcp;
if (!CAN_PANEL_REPLAY(intel_dp))
return false;
if (!connector->dp.panel_replay_caps.support)
return false;
if (!panel_replay_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
return false;
@ -1742,13 +1757,16 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
}
if (intel_dsc_enabled_on_link(crtc_state) &&
intel_dp->psr.sink_panel_replay_dsc_support ==
connector->dp.panel_replay_caps.dsc_support ==
INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) {
drm_dbg_kms(display->drm,
"Panel Replay not enabled because it's not supported with DSC\n");
return false;
}
crtc_state->link_off_after_as_sdp_when_pr_active = compute_link_off_after_as_sdp_when_pr_active(connector);
crtc_state->disable_as_sdp_when_pr_active = compute_disable_as_sdp_when_pr_active(connector);
if (!intel_dp_is_edp(intel_dp))
return true;
@ -1824,6 +1842,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
if (!psr_global_enabled(intel_dp)) {
@ -1855,18 +1874,16 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
}
/* Only used for state verification. */
crtc_state->panel_replay_dsc_support = intel_dp->psr.sink_panel_replay_dsc_support;
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
crtc_state->panel_replay_dsc_support = connector->dp.panel_replay_caps.dsc_support;
crtc_state->has_panel_replay = _panel_replay_compute_config(crtc_state, conn_state);
crtc_state->has_psr = crtc_state->has_panel_replay ? true :
_psr_compute_config(intel_dp, crtc_state);
_psr_compute_config(intel_dp, crtc_state, conn_state);
if (!crtc_state->has_psr)
return;
crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
crtc_state->has_sel_update = intel_sel_update_config_valid(crtc_state, conn_state);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@ -2701,7 +2718,7 @@ intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
struct drm_rect inter;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
if (new_plane_state->hw.crtc != crtc_state->uapi.crtc)
continue;
if (plane->id != PLANE_CURSOR)
@ -2734,7 +2751,7 @@ static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state
if (plane_state->uapi.dst.y1 < 0 ||
plane_state->uapi.dst.x1 < 0 ||
plane_state->scaler_id >= 0 ||
plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
plane_state->hw.rotation != DRM_MODE_ROTATE_0)
return false;
return true;
@ -2749,7 +2766,8 @@ static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state
*/
static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
{
if (crtc_state->scaler_state.scaler_id >= 0)
if (crtc_state->scaler_state.scaler_id >= 0 ||
crtc_state->async_flip_planes)
return false;
return true;
@ -2838,7 +2856,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
.x2 = INT_MAX };
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
if (new_plane_state->hw.crtc != crtc_state->uapi.crtc)
continue;
if (!new_plane_state->uapi.visible &&
@ -2937,7 +2955,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct drm_rect *sel_fetch_area, inter;
struct intel_plane *linked = new_plane_state->planar_linked_plane;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
if (new_plane_state->hw.crtc != crtc_state->uapi.crtc ||
!new_plane_state->uapi.visible)
continue;
@ -2992,9 +3010,9 @@ skip_sel_fetch_set_loop:
return 0;
}
void intel_psr2_panic_force_full_update(struct intel_display *display,
struct intel_crtc_state *crtc_state)
void intel_psr2_panic_force_full_update(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = man_trk_ctl_enable_bit_get(display);
@ -4109,24 +4127,22 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
}
static void intel_psr_sink_capability(struct intel_dp *intel_dp,
static void intel_psr_sink_capability(struct intel_connector *connector,
struct seq_file *m)
{
struct intel_psr *psr = &intel_dp->psr;
seq_printf(m, "Sink support: PSR = %s",
str_yes_no(psr->sink_support));
str_yes_no(connector->dp.psr_caps.support));
if (psr->sink_support)
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
if (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
if (connector->dp.psr_caps.support)
seq_printf(m, " [0x%02x]", connector->dp.psr_caps.dpcd[0]);
if (connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
seq_printf(m, " (Early Transport)");
seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
seq_printf(m, ", Panel Replay = %s", str_yes_no(connector->dp.panel_replay_caps.support));
seq_printf(m, ", Panel Replay Selective Update = %s",
str_yes_no(psr->sink_panel_replay_su_support));
str_yes_no(connector->dp.panel_replay_caps.su_support));
seq_printf(m, ", Panel Replay DSC support = %s",
panel_replay_dsc_support_str(psr->sink_panel_replay_dsc_support));
if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support));
if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
seq_printf(m, " (Early Transport)");
seq_printf(m, "\n");
@ -4164,7 +4180,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
seq_printf(m, " %s\n", psr->no_psr_reason);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp,
struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
@ -4173,9 +4190,9 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
bool enabled;
u32 val, psr2_ctl;
intel_psr_sink_capability(intel_dp, m);
intel_psr_sink_capability(connector, m);
if (!(psr->sink_support || psr->sink_panel_replay_support))
if (!(connector->dp.psr_caps.support || connector->dp.panel_replay_caps.support))
return 0;
wakeref = intel_display_rpm_get(display);
@ -4289,7 +4306,7 @@ static int i915_edp_psr_status_show(struct seq_file *m, void *data)
if (!intel_dp)
return -ENODEV;
return intel_psr_status(m, intel_dp);
return intel_psr_status(m, intel_dp, intel_dp->attached_connector);
}
DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
@ -4423,7 +4440,7 @@ static int i915_psr_status_show(struct seq_file *m, void *data)
struct intel_connector *connector = m->private;
struct intel_dp *intel_dp = intel_attached_dp(connector);
return intel_psr_status(m, intel_dp);
return intel_psr_status(m, intel_dp, connector);
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_status);

View file

@ -28,7 +28,7 @@ struct intel_plane_state;
bool intel_encoder_can_psr(struct intel_encoder *encoder);
bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector);
void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@ -59,8 +59,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
void intel_psr2_panic_force_full_update(struct intel_display *display,
struct intel_crtc_state *crtc_state);
void intel_psr2_panic_force_full_update(const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state);

View file

@ -462,7 +462,7 @@ vlv_sprite_get_hw_state(struct intel_plane *plane,
struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
@ -893,7 +893,7 @@ ivb_sprite_get_hw_state(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
@ -1233,7 +1233,7 @@ g4x_sprite_get_hw_state(struct intel_plane *plane,
{
struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
@ -1567,6 +1567,7 @@ static const struct drm_plane_funcs g4x_sprite_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = g4x_sprite_format_mod_supported,
.format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs snb_sprite_funcs = {
@ -1576,6 +1577,7 @@ static const struct drm_plane_funcs snb_sprite_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = snb_sprite_format_mod_supported,
.format_mod_supported_async = intel_plane_format_mod_supported_async,
};
static const struct drm_plane_funcs vlv_sprite_funcs = {
@ -1585,6 +1587,7 @@ static const struct drm_plane_funcs vlv_sprite_funcs = {
.atomic_duplicate_state = intel_plane_duplicate_state,
.atomic_destroy_state = intel_plane_destroy_state,
.format_mod_supported = vlv_sprite_format_mod_supported,
.format_mod_supported_async = intel_plane_format_mod_supported_async,
};
struct intel_plane *

View file

@ -51,7 +51,7 @@ struct intel_tc_port {
const struct intel_tc_phy_ops *phy_ops;
struct mutex lock; /* protects the TypeC port mode */
intel_wakeref_t lock_wakeref;
struct ref_tracker *lock_wakeref;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
enum intel_display_power_domain lock_power_domain;
#endif
@ -182,7 +182,7 @@ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
intel_display_power_legacy_aux_domain(display, dig_port->aux_ch);
}
static intel_wakeref_t
static struct ref_tracker *
__tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
{
struct intel_display *display = to_intel_display(tc->dig_port);
@ -192,11 +192,11 @@ __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domai
return intel_display_power_get(display, *domain);
}
static intel_wakeref_t
static struct ref_tracker *
tc_cold_block(struct intel_tc_port *tc)
{
enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
wakeref = __tc_cold_block(tc, &domain);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@ -207,7 +207,7 @@ tc_cold_block(struct intel_tc_port *tc)
static void
__tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
intel_wakeref_t wakeref)
struct ref_tracker *wakeref)
{
struct intel_display *display = to_intel_display(tc->dig_port);
@ -215,7 +215,7 @@ __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain doma
}
static void
tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
tc_cold_unblock(struct intel_tc_port *tc, struct ref_tracker *wakeref)
{
struct intel_display __maybe_unused *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
@ -269,10 +269,9 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
static u32 get_lane_mask(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
intel_wakeref_t wakeref;
u32 lane_mask;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE)
lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
@ -296,7 +295,6 @@ get_pin_assignment(struct intel_tc_port *tc)
struct intel_display *display = to_intel_display(tc->dig_port);
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
enum intel_tc_pin_assignment pin_assignment;
intel_wakeref_t wakeref;
i915_reg_t reg;
u32 mask;
u32 val;
@ -312,7 +310,7 @@ get_pin_assignment(struct intel_tc_port *tc)
mask = DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx);
}
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE)
val = intel_de_read(display, reg);
drm_WARN_ON(display->drm, val == 0xffffffff);
@ -527,12 +525,11 @@ static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
struct intel_display *display = to_intel_display(tc->dig_port);
struct intel_digital_port *dig_port = tc->dig_port;
u32 isr_bit = display->hotplug.pch_hpd[dig_port->base.hpd_pin];
intel_wakeref_t wakeref;
u32 fia_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref) {
with_intel_display_power(display, tc_phy_cold_off_domain(tc)) {
fia_isr = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
pch_isr = intel_de_read(display, SDEISR);
}
@ -628,7 +625,7 @@ static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
enum intel_display_power_domain domain;
intel_wakeref_t tc_cold_wref;
struct ref_tracker *tc_cold_wref;
tc_cold_wref = __tc_cold_block(tc, &domain);
@ -774,10 +771,9 @@ tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
static void tgl_tc_phy_init(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
intel_wakeref_t wakeref;
u32 val;
with_intel_display_power(display, tc_phy_cold_off_domain(tc), wakeref)
with_intel_display_power(display, tc_phy_cold_off_domain(tc))
val = intel_de_read(display, PORT_TX_DFLEXDPSP(FIA1));
drm_WARN_ON(display->drm, val == 0xffffffff);
@ -819,12 +815,11 @@ static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
u32 cpu_isr_bits = display->hotplug.hpd[hpd_pin];
u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 cpu_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE) {
cpu_isr = intel_de_read(display, GEN11_DE_HPD_ISR);
pch_isr = intel_de_read(display, SDEISR);
}
@ -897,7 +892,7 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
struct intel_display *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
struct ref_tracker *port_wakeref;
port_wakeref = intel_display_power_get(display, port_power_domain);
@ -916,7 +911,7 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
struct intel_display *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
struct ref_tracker *port_wakeref;
if (tc->mode == TC_PORT_TBT_ALT) {
tc->lock_wakeref = tc_cold_block(tc);
@ -968,7 +963,7 @@ static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
struct intel_display *display = to_intel_display(tc->dig_port);
enum intel_display_power_domain port_power_domain =
tc_port_power_domain(tc);
intel_wakeref_t port_wakeref;
struct ref_tracker *port_wakeref;
port_wakeref = intel_display_power_get(display, port_power_domain);
@ -1015,12 +1010,11 @@ static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
u32 pica_isr_bits = display->hotplug.hpd[hpd_pin];
u32 pch_isr_bit = display->hotplug.pch_hpd[hpd_pin];
intel_wakeref_t wakeref;
u32 pica_isr;
u32 pch_isr;
u32 mask = 0;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE) {
pica_isr = intel_de_read(display, PICAINTERRUPT_ISR);
pch_isr = intel_de_read(display, SDEISR);
}
@ -1175,7 +1169,7 @@ static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
intel_wakeref_t tc_cold_wref;
struct ref_tracker *tc_cold_wref;
enum intel_display_power_domain domain;
tc_cold_wref = __tc_cold_block(tc, &domain);

View file

@ -554,7 +554,8 @@ struct child_device_config {
u8 dvo_function;
u8 dp_usb_type_c:1; /* 195+ */
u8 tbt:1; /* 209+ */
u8 flags2_reserved:2; /* 195+ */
u8 dedicated_external:1; /* 264+ */
u8 dyn_port_over_tc:1; /* 264+ */
u8 dp_port_trace_length:4; /* 209+ */
u8 dp_gpio_index; /* 195+ */
u16 dp_gpio_pin_num; /* 195+ */

View file

@ -999,7 +999,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
u32 dss_ctl1, dss_ctl2;
if (!intel_dsc_source_support(crtc_state))

View file

@ -9,12 +9,12 @@
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include <drm/intel/i915_drm.h>
#include <video/vga.h>
#include "soc/intel_gmch.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_vga.h"
#include "intel_vga_regs.h"
@ -95,6 +95,46 @@ void intel_vga_reset_io_mem(struct intel_display *display)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
static int intel_gmch_vga_set_state(struct intel_display *display, bool enable_decode)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
unsigned int reg = DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0), reg, &gmch_ctrl)) {
drm_err(display->drm, "failed to read control word\n");
return -EIO;
}
if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
return 0;
if (enable_decode)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
if (pci_bus_write_config_word(pdev->bus, PCI_DEVFN(0, 0), reg, gmch_ctrl)) {
drm_err(display->drm, "failed to write control word\n");
return -EIO;
}
return 0;
}
static unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
{
struct intel_display *display = to_intel_display(pdev);
intel_gmch_vga_set_state(display, enable_decode);
if (enable_decode)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
int intel_vga_register(struct intel_display *display)
{

View file

@ -9,7 +9,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
#include "pxp/intel_pxp.h"
#include "intel_bo.h"
#include "intel_color.h"
#include "intel_color_pipeline.h"
@ -22,7 +21,7 @@
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_panic.h"
#include "intel_parent.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
@ -597,7 +596,7 @@ static u32 tgl_plane_min_alignment(struct intel_plane *plane,
* Figure out what's going on here...
*/
if (display->platform.alderlake_p &&
intel_plane_can_async_flip(plane, fb->format->format, fb->modifier))
intel_plane_can_async_flip(plane, fb->format, fb->modifier))
return mult * 16 * 1024;
switch (fb->modifier) {
@ -941,7 +940,7 @@ skl_plane_get_hw_state(struct intel_plane *plane,
struct intel_display *display = to_intel_display(plane);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
@ -1603,7 +1602,7 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
}
/* FLAT CCS doesn't need to program AUX_DIST */
if (HAS_AUX_CCS(display))
if (HAS_AUX_DIST(display))
intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
skl_plane_aux_dist(plane_state, color_plane));
@ -2308,7 +2307,7 @@ static void check_protection(struct intel_plane_state *plane_state)
if (DISPLAY_VER(display) < 11)
return;
plane_state->decrypt = intel_pxp_key_check(obj, false) == 0;
plane_state->decrypt = intel_bo_key_check(obj) == 0;
plane_state->force_black = intel_bo_is_protected(obj) &&
!plane_state->decrypt;
}
@ -2462,7 +2461,7 @@ static struct intel_fbc *skl_plane_fbc(struct intel_display *display,
enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe);
if (skl_plane_has_fbc(display, fbc_id, plane_id))
return display->fbc[fbc_id];
return display->fbc.instances[fbc_id];
else
return NULL;
}
@ -2973,12 +2972,6 @@ skl_universal_plane_create(struct intel_display *display,
else
caps = skl_plane_caps(display, pipe, plane_id);
/* FIXME: xe has problems with AUX */
if (!IS_ENABLED(I915) && HAS_AUX_CCS(display))
caps &= ~(INTEL_PLANE_CAP_CCS_RC |
INTEL_PLANE_CAP_CCS_RC_CC |
INTEL_PLANE_CAP_CCS_MC);
modifiers = intel_fb_plane_get_modifiers(display, caps);
ret = drm_universal_plane_init(display->drm, &plane->base,

View file

@ -8,7 +8,6 @@
#include <drm/drm_blend.h>
#include <drm/drm_print.h>
#include "soc/intel_dram.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
@ -23,6 +22,7 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dram.h"
#include "intel_fb.h"
#include "intel_fixed.h"
#include "intel_flipq.h"
@ -718,7 +718,7 @@ static void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct intel_display *display = to_intel_display(crtc);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
enum plane_id plane_id;
power_domain = POWER_DOMAIN_PIPE(pipe);
@ -3125,7 +3125,7 @@ static bool skl_watermark_ipc_can_enable(struct intel_display *display)
if (display->platform.kabylake ||
display->platform.coffeelake ||
display->platform.cometlake) {
const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct dram_info *dram_info = intel_dram_info(display);
return dram_info->symmetric_memory;
}
@ -3169,7 +3169,7 @@ static void increase_wm_latency(struct intel_display *display, int inc)
static bool need_16gb_dimm_wa(struct intel_display *display)
{
const struct dram_info *dram_info = intel_dram_info(display->drm);
const struct dram_info *dram_info = intel_dram_info(display);
return (display->platform.skylake || display->platform.kabylake ||
display->platform.coffeelake || display->platform.cometlake ||

View file

@ -936,7 +936,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_wakeref_t wakeref;
struct ref_tracker *wakeref;
enum port port;
bool active = false;

Some files were not shown because too many files have changed in this diff Show more