diff --git a/.clang-format b/.clang-format index f371a13b4d19..ecb5035a3d9d 100644 --- a/.clang-format +++ b/.clang-format @@ -167,7 +167,7 @@ ForEachMacros: - 'drm_connector_for_each_possible_encoder' - 'drm_exec_for_each_locked_object' - 'drm_exec_for_each_locked_object_reverse' - - 'drm_for_each_bridge_in_chain' + - 'drm_for_each_bridge_in_chain_scoped' - 'drm_for_each_connector_iter' - 'drm_for_each_crtc' - 'drm_for_each_crtc_reverse' diff --git a/.mailmap b/.mailmap index d2edd256b19d..081a14a3bd31 100644 --- a/.mailmap +++ b/.mailmap @@ -173,6 +173,7 @@ Carlos Bilbao Changbin Du Chao Yu Chao Yu +Chen-Yu Tsai Chester Lin Chris Chiu Chris Chiu @@ -227,6 +228,7 @@ Dmitry Safonov <0x7f454c46@gmail.com> Dmitry Safonov <0x7f454c46@gmail.com> Dmitry Safonov <0x7f454c46@gmail.com> Domen Puncer +Dong Aisheng Douglas Gilbert Drew Fustini diff --git a/Documentation/accel/qaic/qaic.rst b/Documentation/accel/qaic/qaic.rst index 018d6cc173d7..ef27e262cb91 100644 --- a/Documentation/accel/qaic/qaic.rst +++ b/Documentation/accel/qaic/qaic.rst @@ -36,7 +36,7 @@ polling mode and reenables the IRQ line. This mitigation in QAIC is very effective. The same lprnet usecase that generates 100k IRQs per second (per /proc/interrupts) is reduced to roughly 64 IRQs over 5 minutes while keeping the host system stable, and having the same -workload throughput performance (within run to run noise variation). +workload throughput performance (within run-to-run noise variation). Single MSI Mode --------------- @@ -49,7 +49,7 @@ useful to be able to fall back to a single MSI when needed. To support this fallback, we allow the case where only one MSI is able to be allocated, and share that one MSI between MHI and the DBCs. The device detects when only one MSI has been configured and directs the interrupts for the DBCs -to the interrupt normally used for MHI. Unfortunately this means that the +to the interrupt normally used for MHI. Unfortunately, this means that the interrupt handlers for every DBC and MHI wake up for every interrupt that arrives; however, the DBC threaded irq handlers only are started when work to be done is detected (MHI will always start its threaded handler). @@ -62,9 +62,9 @@ never disabled, allowing each new entry to the FIFO to trigger a new interrupt. Neural Network Control (NNC) Protocol ===================================== -The implementation of NNC is split between the KMD (QAIC) and UMD. In general +The implementation of NNC is split between the KMD (QAIC) and UMD. In general, QAIC understands how to encode/decode NNC wire protocol, and elements of the -protocol which require kernel space knowledge to process (for example, mapping +protocol which requires kernel space knowledge to process (for example, mapping host memory to device IOVAs). QAIC understands the structure of a message, and all of the transactions. QAIC does not understand commands (the payload of a passthrough transaction). diff --git a/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml b/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml index 05442d437755..6211ab8bbb0e 100644 --- a/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml +++ b/Documentation/devicetree/bindings/display/bridge/fsl,imx8mp-hdmi-tx.yaml @@ -49,6 +49,10 @@ properties: $ref: /schemas/graph.yaml#/properties/port description: HDMI output port + port@2: + $ref: /schemas/graph.yaml#/properties/port + description: Parallel audio input port + required: - port@0 - port@1 @@ -98,5 +102,13 @@ examples: remote-endpoint = <&hdmi0_con>; }; }; + + port@2 { + reg = <2>; + + endpoint { + remote-endpoint = <&pai_to_hdmi_tx>; + }; + }; }; }; diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml index c167795c63f6..b95f10edd3a2 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml +++ b/Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml @@ -14,6 +14,9 @@ description: | R-Car Gen4 SoCs. The encoder can operate in either DSI or CSI-2 mode, with up to four data lanes. +allOf: + - $ref: /schemas/display/dsi-controller.yaml# + properties: compatible: enum: @@ -80,14 +83,14 @@ required: - resets - ports -additionalProperties: false +unevaluatedProperties: false examples: - | #include #include - dsi0: dsi-encoder@fed80000 { + dsi@fed80000 { compatible = "renesas,r8a779a0-dsi-csi2-tx"; reg = <0xfed80000 0x10000>; power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>; @@ -117,4 +120,51 @@ examples: }; }; }; + + - | + #include + #include + + dsi@fed80000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,r8a779g0-dsi-csi2-tx"; + reg = <0xfed80000 0x10000>; + clocks = <&cpg CPG_MOD 415>, + <&cpg CPG_CORE R8A779G0_CLK_DSIEXT>, + <&cpg CPG_CORE R8A779G0_CLK_DSIREF>; + clock-names = "fck", "dsi", "pll"; + power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>; + resets = <&cpg 415>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + }; + + port@1 { + reg = <1>; + + dsi0port1_out: endpoint { + remote-endpoint = <&panel_in>; + data-lanes = <1 2>; + }; + }; + }; + + panel@0 { + reg = <0>; + compatible = "raspberrypi,dsi-7inch", "ilitek,ili9881c"; + power-supply = <&vcc_lcd_reg>; + + port { + panel_in: endpoint { + remote-endpoint = <&dsi0port1_out>; + }; + }; + }; + }; ... diff --git a/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml b/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml new file mode 100644 index 000000000000..4f99682a308d --- /dev/null +++ b/Documentation/devicetree/bindings/display/imx/fsl,imx8mp-hdmi-pai.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/imx/fsl,imx8mp-hdmi-pai.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale i.MX8MP HDMI Parallel Audio Interface + +maintainers: + - Shengjiu Wang + +description: + The HDMI TX Parallel Audio Interface (HTX_PAI) is a bridge between the + Audio Subsystem to the HDMI TX Controller. + +properties: + compatible: + const: fsl,imx8mp-hdmi-pai + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: apb + + power-domains: + maxItems: 1 + + port: + $ref: /schemas/graph.yaml#/properties/port + description: Output to the HDMI TX controller. + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - power-domains + - port + +additionalProperties: false + +examples: + - | + #include + #include + + audio-bridge@32fc4800 { + compatible = "fsl,imx8mp-hdmi-pai"; + reg = <0x32fc4800 0x800>; + interrupt-parent = <&irqsteer_hdmi>; + interrupts = <14>; + clocks = <&clk IMX8MP_CLK_HDMI_APB>; + clock-names = "apb"; + power-domains = <&hdmi_blk_ctrl IMX8MP_HDMIBLK_PD_PAI>; + + port { + pai_to_hdmi_tx: endpoint { + remote-endpoint = <&hdmi_tx_from_pai>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,il79900a.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,il79900a.yaml new file mode 100644 index 000000000000..02f7fb1f16dc --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/ilitek,il79900a.yaml @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/ilitek,il79900a.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Ilitek IL79900a based MIPI-DSI panels + +maintainers: + - Langyan Ye + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - enum: + - tianma,tl121bvms07-00 + - const: ilitek,il79900a + + reg: + maxItems: 1 + description: DSI virtual channel used by the panel + + enable-gpios: + maxItems: 1 + description: GPIO specifier for the enable pin + + avdd-supply: + description: Positive analog voltage supply (AVDD) + + avee-supply: + description: Negative analog voltage supply (AVEE) + + pp1800-supply: + description: 1.8V logic voltage supply + + backlight: true + +required: + - compatible + - reg + - enable-gpios + - avdd-supply + - avee-supply + - pp1800-supply + +additionalProperties: false + +examples: + - | + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "tianma,tl121bvms07-00", "ilitek,il79900a"; + reg = <0>; + enable-gpios = <&pio 25 0>; + avdd-supply = <®_avdd>; + avee-supply = <®_avee>; + pp1800-supply = <®_pp1800>; + backlight = <&backlight>; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml index 434cc6af9c95..34a612705e8c 100644 --- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml +++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9881c.yaml @@ -20,6 +20,7 @@ properties: - bananapi,lhr050h41 - bestar,bsd1218-a101kl68 - feixin,k101-im2byl02 + - raspberrypi,dsi-5inch - raspberrypi,dsi-7inch - startek,kd050hdfia020 - tdo,tl050hdv35 @@ -30,6 +31,7 @@ properties: maxItems: 1 backlight: true + port: true power-supply: true reset-gpios: true rotation: true diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml index 9b92a05791cc..ac2db8cf5eb7 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml @@ -56,8 +56,6 @@ properties: - panasonic,vvx10f034n00 # Samsung s6e3fa7 1080x2220 based AMS559NK06 AMOLED panel - samsung,s6e3fa7-ams559nk06 - # Samsung s6e3fc2x01 1080x2340 AMOLED panel - - samsung,s6e3fc2x01 # Samsung sofef00 1080x2280 AMOLED panel - samsung,sofef00 # Shangai Top Display Optoelectronics 7" TL070WSH30 1024x600 TFT LCD panel @@ -80,7 +78,6 @@ allOf: properties: compatible: enum: - - samsung,s6e3fc2x01 - samsung,sofef00 then: properties: diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index 2017428d8828..35ba99b76119 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -184,6 +184,8 @@ properties: - innolux,n156bge-l21 # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel - innolux,zj070na-01p + # JuTouch Technology Co.. 10" JT101TM023 WXGA (1280 x 800) LVDS panel + - jutouch,jt101tm023 # Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel - koe,tx14d24vm1bpa # Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml index ccb574caed28..f1723e910252 100644 --- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml +++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml @@ -33,6 +33,8 @@ properties: - samsung,atna45dc02 # Samsung 15.6" 3K (2880x1620 pixels) eDP AMOLED panel - samsung,atna56ac03 + # Samsung 16.0" 3K (2880x1800 pixels) eDP AMOLED panel + - samsung,atna60cl08 - const: samsung,atna33xc20 enable-gpios: true diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e3fc2x01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e3fc2x01.yaml new file mode 100644 index 000000000000..d48354fb52ea --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e3fc2x01.yaml @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/samsung,s6e3fc2x01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Samsung S6E3FC2X01 AMOLED DDIC + +description: The S6E3FC2X01 is display driver IC with connected panel. + +maintainers: + - David Heidelberg + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + items: + - enum: + # Samsung 6.41 inch, 1080x2340 pixels, 19.5:9 ratio + - samsung,s6e3fc2x01-ams641rw + - const: samsung,s6e3fc2x01 + + reg: + maxItems: 1 + + reset-gpios: true + + port: true + + vddio-supply: + description: VDD regulator + + vci-supply: + description: VCI regulator + + poc-supply: + description: POC regulator + +required: + - compatible + - reset-gpios + - vddio-supply + - vci-supply + - poc-supply + +unevaluatedProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "samsung,s6e3fc2x01-ams641rw", "samsung,s6e3fc2x01"; + reg = <0>; + + vddio-supply = <&vreg_l14a_1p88>; + vci-supply = <&s2dos05_buck1>; + poc-supply = <&s2dos05_ldo1>; + + te-gpios = <&tlmm 10 GPIO_ACTIVE_HIGH>; + reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>; + + pinctrl-0 = <&sde_dsi_active &sde_te_active_sleep>; + pinctrl-1 = <&sde_dsi_suspend &sde_te_active_sleep>; + pinctrl-names = "default", "sleep"; + + port { + panel_in: endpoint { + remote-endpoint = <&mdss_dsi0_out>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml b/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml new file mode 100644 index 000000000000..08a35ebbbb3c --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/sharp,lq079l1sx01.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/sharp,lq079l1sx01.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Sharp Microelectronics 7.9" WQXGA TFT LCD panel + +maintainers: + - Svyatoslav Ryhel + +description: > + This panel requires a dual-channel DSI host to operate and it supports + only left-right split mode, where each channel drives the left or right + half of the screen and only video mode. + + Each of the DSI channels controls a separate DSI peripheral. + The peripheral driven by the first link (DSI-LINK1), left one, is + considered the primary peripheral and controls the device. + +allOf: + - $ref: panel-common-dual.yaml# + +properties: + compatible: + const: sharp,lq079l1sx01 + + reg: + maxItems: 1 + + avdd-supply: + description: regulator that supplies the analog voltage + + vddio-supply: + description: regulator that supplies the I/O voltage + + vsp-supply: + description: positive boost supply regulator + + vsn-supply: + description: negative boost supply regulator + + reset-gpios: + maxItems: 1 + + backlight: true + ports: true + +required: + - compatible + - reg + - avdd-supply + - vddio-supply + - ports + +additionalProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "sharp,lq079l1sx01"; + reg = <0>; + + reset-gpios = <&gpio 59 GPIO_ACTIVE_LOW>; + + avdd-supply = <&avdd_lcd>; + vddio-supply = <&vdd_lcd_io>; + vsp-supply = <&vsp_5v5_lcd>; + vsn-supply = <&vsn_5v5_lcd>; + + backlight = <&backlight>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + panel_in0: endpoint { + remote-endpoint = <&dsi0_out>; + }; + }; + + port@1 { + reg = <1>; + panel_in1: endpoint { + remote-endpoint = <&dsi1_out>; + }; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/display/panel/synaptics,td4300-panel.yaml b/Documentation/devicetree/bindings/display/panel/synaptics,td4300-panel.yaml new file mode 100644 index 000000000000..152d94367130 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/synaptics,td4300-panel.yaml @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/synaptics,td4300-panel.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synaptics TDDI Display Panel Controller + +maintainers: + - Kaustabh Chakraborty + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + enum: + - syna,td4101-panel + - syna,td4300-panel + + reg: + maxItems: 1 + + vio-supply: + description: core I/O voltage supply + + vsn-supply: + description: negative voltage supply for analog circuits + + vsp-supply: + description: positive voltage supply for analog circuits + + backlight-gpios: + maxItems: 1 + description: backlight enable GPIO + + reset-gpios: true + width-mm: true + height-mm: true + panel-timing: true + +required: + - compatible + - reg + - width-mm + - height-mm + - panel-timing + +additionalProperties: false + +examples: + - | + #include + + dsi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "syna,td4300-panel"; + reg = <0>; + + vio-supply = <&panel_vio_reg>; + vsn-supply = <&panel_vsn_reg>; + vsp-supply = <&panel_vsp_reg>; + + backlight-gpios = <&gpd3 5 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpd3 4 GPIO_ACTIVE_LOW>; + + width-mm = <68>; + height-mm = <121>; + + panel-timing { + clock-frequency = <144389520>; + + hactive = <1080>; + hsync-len = <4>; + hfront-porch = <120>; + hback-porch = <32>; + + vactive = <1920>; + vsync-len = <2>; + vfront-porch = <21>; + vback-porch = <4>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml index c59df3c1a3f7..632b48bfabb9 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml @@ -17,6 +17,7 @@ properties: - rockchip,px30-mipi-dsi - rockchip,rk3128-mipi-dsi - rockchip,rk3288-mipi-dsi + - rockchip,rk3368-mipi-dsi - rockchip,rk3399-mipi-dsi - rockchip,rk3568-mipi-dsi - rockchip,rv1126-mipi-dsi @@ -73,6 +74,7 @@ allOf: enum: - rockchip,px30-mipi-dsi - rockchip,rk3128-mipi-dsi + - rockchip,rk3368-mipi-dsi - rockchip,rk3568-mipi-dsi - rockchip,rv1126-mipi-dsi diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml index a5b4e0021758..bee9faf1d3f8 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml +++ b/Documentation/devicetree/bindings/gpu/arm,mali-valhall-csf.yaml @@ -18,6 +18,8 @@ properties: oneOf: - items: - enum: + - mediatek,mt8196-mali + - nxp,imx95-mali # G310 - rockchip,rk3588-mali - const: arm,mali-valhall-csf # Mali Valhall GPU model/revision is fully discoverable @@ -44,7 +46,9 @@ properties: minItems: 1 items: - const: core - - const: coregroup + - enum: + - coregroup + - stacks - const: stacks mali-supply: true @@ -91,7 +95,6 @@ required: - interrupts - interrupt-names - clocks - - mali-supply additionalProperties: false @@ -108,6 +111,29 @@ allOf: power-domains: maxItems: 1 power-domain-names: false + required: + - mali-supply + - if: + properties: + compatible: + contains: + const: mediatek,mt8196-mali + then: + properties: + mali-supply: false + sram-supply: false + operating-points-v2: false + power-domains: + maxItems: 1 + power-domain-names: false + clocks: + maxItems: 2 + clock-names: + items: + - const: core + - const: stacks + required: + - power-domains examples: - | @@ -143,5 +169,17 @@ examples: }; }; }; + - | + gpu@48000000 { + compatible = "mediatek,mt8196-mali", "arm,mali-valhall-csf"; + reg = <0x48000000 0x480000>; + clocks = <&gpufreq 0>, <&gpufreq 1>; + clock-names = "core", "stacks"; + interrupts = , + , + ; + interrupt-names = "job", "mmu", "gpu"; + power-domains = <&gpufreq>; + }; ... diff --git a/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml b/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml index c87d7bece0ec..225a6e1b7fcd 100644 --- a/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml +++ b/Documentation/devicetree/bindings/gpu/img,powervr-rogue.yaml @@ -13,6 +13,16 @@ maintainers: properties: compatible: oneOf: + - items: + - enum: + - renesas,r8a7796-gpu + - renesas,r8a77961-gpu + - const: img,img-gx6250 + - const: img,img-rogue + - items: + - const: renesas,r8a77965-gpu + - const: img,img-ge7800 + - const: img,img-rogue - items: - enum: - ti,am62-gpu @@ -82,6 +92,33 @@ required: additionalProperties: false allOf: + - if: + properties: + compatible: + contains: + enum: + - ti,am62-gpu + - ti,j721s2-gpu + then: + properties: + clocks: + maxItems: 1 + + - if: + properties: + compatible: + contains: + enum: + - img,img-ge7800 + - img,img-gx6250 + - thead,th1520-gpu + then: + properties: + clocks: + minItems: 3 + clock-names: + minItems: 3 + - if: properties: compatible: @@ -90,14 +127,31 @@ allOf: then: properties: power-domains: - items: - - description: Power domain A + maxItems: 1 power-domain-names: maxItems: 1 required: - power-domains - power-domain-names + - if: + properties: + compatible: + contains: + enum: + - img,img-bxs-4-64 + - img,img-ge7800 + - img,img-gx6250 + then: + properties: + power-domains: + minItems: 2 + power-domain-names: + minItems: 2 + required: + - power-domains + - power-domain-names + - if: properties: compatible: @@ -105,10 +159,6 @@ allOf: const: thead,th1520-gpu then: properties: - clocks: - minItems: 3 - clock-names: - minItems: 3 power-domains: items: - description: The single, unified power domain for the GPU on the @@ -117,35 +167,6 @@ allOf: required: - power-domains - - if: - properties: - compatible: - contains: - const: img,img-bxs-4-64 - then: - properties: - power-domains: - items: - - description: Power domain A - - description: Power domain B - power-domain-names: - minItems: 2 - required: - - power-domains - - power-domain-names - - - if: - properties: - compatible: - contains: - enum: - - ti,am62-gpu - - ti,j721s2-gpu - then: - properties: - clocks: - maxItems: 1 - examples: - | #include diff --git a/Documentation/devicetree/bindings/i2c/apm,xgene-slimpro-i2c.yaml b/Documentation/devicetree/bindings/i2c/apm,xgene-slimpro-i2c.yaml new file mode 100644 index 000000000000..9460c64071f2 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/apm,xgene-slimpro-i2c.yaml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/i2c/apm,xgene-slimpro-i2c.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: APM X-Gene SLIMpro Mailbox I2C + +maintainers: + - Khuong Dinh + +description: + An I2C controller accessed over the "SLIMpro" mailbox. + +allOf: + - $ref: /schemas/i2c/i2c-controller.yaml# + +properties: + compatible: + const: apm,xgene-slimpro-i2c + + mboxes: + maxItems: 1 + +required: + - compatible + - mboxes + +unevaluatedProperties: false + +examples: + - | + i2c { + compatible = "apm,xgene-slimpro-i2c"; + mboxes = <&mailbox 0>; + }; diff --git a/Documentation/devicetree/bindings/i2c/i2c-xgene-slimpro.txt b/Documentation/devicetree/bindings/i2c/i2c-xgene-slimpro.txt deleted file mode 100644 index f6b2c20cfbf6..000000000000 --- a/Documentation/devicetree/bindings/i2c/i2c-xgene-slimpro.txt +++ /dev/null @@ -1,15 +0,0 @@ -APM X-Gene SLIMpro Mailbox I2C Driver - -An I2C controller accessed over the "SLIMpro" mailbox. - -Required properties : - - - compatible : should be "apm,xgene-slimpro-i2c" - - mboxes : use the label reference for the mailbox as the first parameter. - The second parameter is the channel number. - -Example : - i2cslimpro { - compatible = "apm,xgene-slimpro-i2c"; - mboxes = <&mailbox 0>; - }; diff --git a/Documentation/devicetree/bindings/npu/arm,ethos.yaml b/Documentation/devicetree/bindings/npu/arm,ethos.yaml new file mode 100644 index 000000000000..716c4997f976 --- /dev/null +++ b/Documentation/devicetree/bindings/npu/arm,ethos.yaml @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/npu/arm,ethos.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arm Ethos U65/U85 + +maintainers: + - Rob Herring + +description: > + The Arm Ethos-U NPUs are designed for IoT inference applications. The NPUs + can accelerate 8-bit and 16-bit integer quantized networks: + + Transformer networks (U85 only) + Convolutional Neural Networks (CNN) + Recurrent Neural Networks (RNN) + + Further documentation is available here: + + U65 TRM: https://developer.arm.com/documentation/102023/ + U85 TRM: https://developer.arm.com/documentation/102685/ + +properties: + compatible: + oneOf: + - items: + - enum: + - fsl,imx93-npu + - const: arm,ethos-u65 + - items: + - {} + - const: arm,ethos-u85 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 2 + + clock-names: + items: + - const: core + - const: apb + + power-domains: + maxItems: 1 + + sram: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + - clocks + +additionalProperties: false + +examples: + - | + #include + #include + #include + + npu@4a900000 { + compatible = "fsl,imx93-npu", "arm,ethos-u65"; + reg = <0x4a900000 0x1000>; + interrupts = ; + power-domains = <&mlmix>; + clocks = <&clk IMX93_CLK_ML>, <&clk IMX93_CLK_ML_APB>; + clock-names = "core", "apb"; + sram = <&sram>; + }; +... diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.yaml b/Documentation/devicetree/bindings/sound/fsl-asoc-card.yaml index 92aa47ec72c7..88eb20bb008f 100644 --- a/Documentation/devicetree/bindings/sound/fsl-asoc-card.yaml +++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.yaml @@ -79,6 +79,7 @@ properties: - fsl,imx-audio-nau8822 - fsl,imx-audio-sgtl5000 - fsl,imx-audio-si476x + - fsl,imx-audio-tlv320 - fsl,imx-audio-tlv320aic31xx - fsl,imx-audio-tlv320aic32x4 - fsl,imx-audio-wm8524 diff --git a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml index 8ac91625dce5..b49a920af704 100644 --- a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml @@ -33,6 +33,7 @@ properties: - qcom,apq8096-sndcard - qcom,glymur-sndcard - qcom,qcm6490-idp-sndcard + - qcom,qcs615-sndcard - qcom,qcs6490-rb3gen2-sndcard - qcom,qcs8275-sndcard - qcom,qcs9075-sndcard diff --git a/Documentation/devicetree/bindings/sound/ti,tas2781.yaml b/Documentation/devicetree/bindings/sound/ti,tas2781.yaml index bd00afa47d62..7f84f506013c 100644 --- a/Documentation/devicetree/bindings/sound/ti,tas2781.yaml +++ b/Documentation/devicetree/bindings/sound/ti,tas2781.yaml @@ -24,10 +24,10 @@ description: | Instruments Smart Amp speaker protection algorithm. The integrated speaker voltage and current sense provides for real time monitoring of loudspeaker behavior. - The TAS5825/TAS5827 is a stereo, digital input Class-D audio - amplifier optimized for efficiently driving high peak power into - small loudspeakers. An integrated on-chip DSP supports Texas - Instruments Smart Amp speaker protection algorithm. + The TAS5802/TAS5815/TAS5825/TAS5827/TAS5828 is a stereo, digital input + Class-D audio amplifier optimized for efficiently driving high peak + power into small loudspeakers. An integrated on-chip DSP supports + Texas Instruments Smart Amp speaker protection algorithm. Specifications about the audio amplifier can be found at: https://www.ti.com/lit/gpn/tas2120 @@ -35,8 +35,10 @@ description: | https://www.ti.com/lit/gpn/tas2563 https://www.ti.com/lit/gpn/tas2572 https://www.ti.com/lit/gpn/tas2781 + https://www.ti.com/lit/gpn/tas5815 https://www.ti.com/lit/gpn/tas5825m https://www.ti.com/lit/gpn/tas5827 + https://www.ti.com/lit/gpn/tas5828m properties: compatible: @@ -65,11 +67,21 @@ properties: Protection and Audio Processing, 16/20/24/32bit stereo I2S or multichannel TDM. + ti,tas5802: 22-W, Inductor-Less, Digital Input, Closed-Loop Class-D + Audio Amplifier with 96-Khz Extended Processing and Low Idle Power + Dissipation. + + ti,tas5815: 30-W, Digital Input, Stereo, Closed-loop Class-D Audio + Amplifier with 96 kHz Enhanced Processing + ti,tas5825: 38-W Stereo, Inductor-Less, Digital Input, Closed-Loop 4.5V to 26.4V Class-D Audio Amplifier with 192-kHz Extended Audio Processing. - ti,tas5827: 47-W Stereo, Digital Input, High Efficiency Closed-Loop Class-D - Amplifier with Class-H Algorithm + ti,tas5827: 47-W Stereo, Digital Input, High Efficiency Closed-Loop + Class-D Amplifier with Class-H Algorithm + + ti,tas5828: 50-W Stereo, Digital Input, High Efficiency Closed-Loop + Class-D Amplifier with Hybrid-Pro Algorithm oneOf: - items: - enum: @@ -80,8 +92,11 @@ properties: - ti,tas2563 - ti,tas2570 - ti,tas2572 + - ti,tas5802 + - ti,tas5815 - ti,tas5825 - ti,tas5827 + - ti,tas5828 - const: ti,tas2781 - enum: - ti,tas2781 @@ -177,12 +192,28 @@ allOf: minimum: 0x38 maximum: 0x3f + - if: + properties: + compatible: + contains: + enum: + - ti,tas5802 + - ti,tas5815 + then: + properties: + reg: + maxItems: 4 + items: + minimum: 0x54 + maximum: 0x57 + - if: properties: compatible: contains: enum: - ti,tas5827 + - ti,tas5828 then: properties: reg: diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index f1d1882009ba..3451c9ac0add 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -835,6 +835,8 @@ patternProperties: description: JOZ BV "^jty,.*": description: JTY + "^jutouch,.*": + description: JuTouch Technology Co., Ltd. "^kam,.*": description: Kamstrup A/S "^karo,.*": diff --git a/Documentation/filesystems/ext4/directory.rst b/Documentation/filesystems/ext4/directory.rst index 6eece8e31df8..9b003a4d453f 100644 --- a/Documentation/filesystems/ext4/directory.rst +++ b/Documentation/filesystems/ext4/directory.rst @@ -183,10 +183,10 @@ in the place where the name normally goes. The structure is - det_checksum - Directory leaf block checksum. -The leaf directory block checksum is calculated against the FS UUID, the -directory's inode number, the directory's inode generation number, and -the entire directory entry block up to (but not including) the fake -directory entry. +The leaf directory block checksum is calculated against the FS UUID (or +the checksum seed, if that feature is enabled for the fs), the directory's +inode number, the directory's inode generation number, and the entire +directory entry block up to (but not including) the fake directory entry. Hash Tree Directories ~~~~~~~~~~~~~~~~~~~~~ @@ -196,12 +196,12 @@ new feature was added to ext3 to provide a faster (but peculiar) balanced tree keyed off a hash of the directory entry name. If the EXT4_INDEX_FL (0x1000) flag is set in the inode, this directory uses a hashed btree (htree) to organize and find directory entries. For -backwards read-only compatibility with ext2, this tree is actually -hidden inside the directory file, masquerading as “empty” directory data -blocks! It was stated previously that the end of the linear directory -entry table was signified with an entry pointing to inode 0; this is -(ab)used to fool the old linear-scan algorithm into thinking that the -rest of the directory block is empty so that it moves on. +backwards read-only compatibility with ext2, interior tree nodes are actually +hidden inside the directory file, masquerading as “empty” directory entries +spanning the whole block. It was stated previously that directory entries +with the inode set to 0 are treated as unused entries; this is (ab)used to +fool the old linear-scan algorithm into skipping over those blocks containing +the interior tree node data. The root of the tree always lives in the first data block of the directory. By ext2 custom, the '.' and '..' entries must appear at the @@ -209,24 +209,24 @@ beginning of this first block, so they are put here as two ``struct ext4_dir_entry_2`` s and not stored in the tree. The rest of the root node contains metadata about the tree and finally a hash->block map to find nodes that are lower in the htree. If -``dx_root.info.indirect_levels`` is non-zero then the htree has two -levels; the data block pointed to by the root node's map is an interior -node, which is indexed by a minor hash. Interior nodes in this tree -contains a zeroed out ``struct ext4_dir_entry_2`` followed by a -minor_hash->block map to find leafe nodes. Leaf nodes contain a linear -array of all ``struct ext4_dir_entry_2``; all of these entries -(presumably) hash to the same value. If there is an overflow, the -entries simply overflow into the next leaf node, and the -least-significant bit of the hash (in the interior node map) that gets -us to this next leaf node is set. +``dx_root.info.indirect_levels`` is non-zero then the htree has that many +levels and the blocks pointed to by the root node's map are interior nodes. +These interior nodes have a zeroed out ``struct ext4_dir_entry_2`` followed by +a hash->block map to find nodes of the next level. Leaf nodes look like +classic linear directory blocks, but all of its entries have a hash value +equal or greater than the indicated hash of the parent node. -To traverse the directory as a htree, the code calculates the hash of -the desired file name and uses it to find the corresponding block -number. If the tree is flat, the block is a linear array of directory -entries that can be searched; otherwise, the minor hash of the file name -is computed and used against this second block to find the corresponding -third block number. That third block number will be a linear array of -directory entries. +The actual hash value for an entry name is only 31 bits, the least-significant +bit is set to 0. However, if there is a hash collision between directory +entries, the least-significant bit may get set to 1 on interior nodes in the +case where these two (or more) hash-colliding entries do not fit into one leaf +node and must be split across multiple nodes. + +To look up a name in such a htree, the code calculates the hash of the desired +file name and uses it to find the leaf node with the range of hash values the +calculated hash falls into (in other words, a lookup works basically the same +as it would in a B-Tree keyed by the hash value), and possibly also scanning +the leaf nodes that follow (in tree order) in case of hash collisions. To traverse the directory as a linear array (such as the old code does), the code simply reads every data block in the directory. The blocks used @@ -319,7 +319,8 @@ of a data block: * - 0x24 - __le32 - block - - The block number (within the directory file) that goes with hash=0. + - The block number (within the directory file) that lead to the left-most + leaf node, i.e. the leaf containing entries with the lowest hash values. * - 0x28 - struct dx_entry - entries[0] @@ -442,7 +443,7 @@ The dx_tail structure is 8 bytes long and looks like this: * - 0x0 - u32 - dt_reserved - - Zero. + - Unused (but still part of the checksum curiously). * - 0x4 - __le32 - dt_checksum @@ -450,4 +451,4 @@ The dx_tail structure is 8 bytes long and looks like this: The checksum is calculated against the FS UUID, the htree index header (dx_root or dx_node), all of the htree indices (dx_entry) that are in -use, and the tail block (dx_tail). +use, and the tail block (dx_tail) with the dt_checksum initially set to 0. diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index 5139705089f2..781129f78b06 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -92,6 +92,18 @@ GEM Atomic Helper Reference .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c :export: +VBLANK Helper Reference +----------------------- + +.. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c + :doc: overview + +.. kernel-doc:: include/drm/drm_vblank_helper.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_vblank_helper.c + :export: + Simple KMS Helper Reference =========================== diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index b5f58b4274b1..9013ced318cb 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -623,6 +623,43 @@ Contact: Thomas Zimmermann , Simona Vetter Level: Advanced +Implement a new DUMB_CREATE2 ioctl +---------------------------------- + +The current DUMB_CREATE ioctl is not well defined. Instead of a pixel and +framebuffer format, it only accepts a color mode of vague semantics. Assuming +a linear framebuffer, the color mode gives an idea of the supported pixel +format. But userspace effectively has to guess the correct values. It really +only works reliably with framebuffers in XRGB8888. Userspace has begun to +workaround these limitations by computing arbitrary format's buffer sizes and +calculating their sizes in terms of XRGB8888 pixels. + +One possible solution is a new ioctl DUMB_CREATE2. It should accept a DRM +format and a format modifier to resolve the color mode's ambiguity. As +framebuffers can be multi-planar, the new ioctl has to return the buffer size, +pitch and GEM handle for each individual color plane. + +In the first step, the new ioctl can be limited to the current features of +the existing DUMB_CREATE. Individual drivers can then be extended to support +multi-planar formats. Rockchip might require this and would be a good candidate. + +It might also be helpful to userspace to query information about the size of +a potential buffer, if allocated. Userspace would supply geometry and format; +the kernel would return minimal allocation sizes and scanline pitch. There is +interest to allocate that memory from another device and provide it to the +DRM driver (say via dma-buf). + +Another requested feature is the ability to allocate a buffer by size, without +format. Accelators use this for their buffer allocation and it could likely be +generalized. + +In addition to the kernel implementation, there must be user-space support +for the new ioctl. There's code in Mesa that might be able to use the new +call. + +Contact: Thomas Zimmermann + +Level: Advanced Better Testing ============== diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst index 8a8b1002931f..1e79e62a6bc4 100644 --- a/Documentation/gpu/vkms.rst +++ b/Documentation/gpu/vkms.rst @@ -51,6 +51,97 @@ To disable the driver, use :: sudo modprobe -r vkms +Configuring With Configfs +========================= + +It is possible to create and configure multiple VKMS instances via configfs. + +Start by mounting configfs and loading VKMS:: + + sudo mount -t configfs none /config + sudo modprobe vkms + +Once VKMS is loaded, ``/config/vkms`` is created automatically. Each directory +under ``/config/vkms`` represents a VKMS instance, create a new one:: + + sudo mkdir /config/vkms/my-vkms + +By default, the instance is disabled:: + + cat /config/vkms/my-vkms/enabled + 0 + +And directories are created for each configurable item of the display pipeline:: + + tree /config/vkms/my-vkms + ├── connectors + ├── crtcs + ├── enabled + ├── encoders + └── planes + +To add items to the display pipeline, create one or more directories under the +available paths. + +Start by creating one or more planes:: + + sudo mkdir /config/vkms/my-vkms/planes/plane0 + +Planes have 1 configurable attribute: + +- type: Plane type: 0 overlay, 1 primary, 2 cursor (same values as those + exposed by the "type" property of a plane) + +Continue by creating one or more CRTCs:: + + sudo mkdir /config/vkms/my-vkms/crtcs/crtc0 + +CRTCs have 1 configurable attribute: + +- writeback: Enable or disable writeback connector support by writing 1 or 0 + +Next, create one or more encoders:: + + sudo mkdir /config/vkms/my-vkms/encoders/encoder0 + +Last but not least, create one or more connectors:: + + sudo mkdir /config/vkms/my-vkms/connectors/connector0 + +Connectors have 1 configurable attribute: + +- status: Connection status: 1 connected, 2 disconnected, 3 unknown (same values + as those exposed by the "status" property of a connector) + +To finish the configuration, link the different pipeline items:: + + sudo ln -s /config/vkms/my-vkms/crtcs/crtc0 /config/vkms/my-vkms/planes/plane0/possible_crtcs + sudo ln -s /config/vkms/my-vkms/crtcs/crtc0 /config/vkms/my-vkms/encoders/encoder0/possible_crtcs + sudo ln -s /config/vkms/my-vkms/encoders/encoder0 /config/vkms/my-vkms/connectors/connector0/possible_encoders + +Since at least one primary plane is required, make sure to set the right type:: + + echo "1" | sudo tee /config/vkms/my-vkms/planes/plane0/type + +Once you are done configuring the VKMS instance, enable it:: + + echo "1" | sudo tee /config/vkms/my-vkms/enabled + +Finally, you can remove the VKMS instance disabling it:: + + echo "0" | sudo tee /config/vkms/my-vkms/enabled + +And removing the top level directory and its subdirectories:: + + sudo rm /config/vkms/my-vkms/planes/*/possible_crtcs/* + sudo rm /config/vkms/my-vkms/encoders/*/possible_crtcs/* + sudo rm /config/vkms/my-vkms/connectors/*/possible_encoders/* + sudo rmdir /config/vkms/my-vkms/planes/* + sudo rmdir /config/vkms/my-vkms/crtcs/* + sudo rmdir /config/vkms/my-vkms/encoders/* + sudo rmdir /config/vkms/my-vkms/connectors/* + sudo rmdir /config/vkms/my-vkms + Testing With IGT ================ @@ -68,26 +159,23 @@ To return to graphical mode, do:: sudo systemctl isolate graphical.target -Once you are in text only mode, you can run tests using the --device switch -or IGT_DEVICE variable to specify the device filter for the driver we want -to test. IGT_DEVICE can also be used with the run-test.sh script to run the +Once you are in text only mode, you can run tests using the IGT_FORCE_DRIVER +variable to specify the device filter for the driver we want to test. +IGT_FORCE_DRIVER can also be used with the run-tests.sh script to run the tests for a specific driver:: - sudo ./build/tests/ --device "sys:/sys/devices/platform/vkms" - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/ - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t + sudo IGT_FORCE_DRIVER="vkms" ./build/tests/ + sudo IGT_FORCE_DRIVER="vkms" ./scripts/run-tests.sh -t For example, to test the functionality of the writeback library, we can run the kms_writeback test:: - sudo ./build/tests/kms_writeback --device "sys:/sys/devices/platform/vkms" - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_writeback - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./scripts/run-tests.sh -t kms_writeback + sudo IGT_FORCE_DRIVER="vkms" ./build/tests/kms_writeback + sudo IGT_FORCE_DRIVER="vkms" ./scripts/run-tests.sh -t kms_writeback You can also run subtests if you do not want to run the entire test:: - sudo ./build/tests/kms_flip --run-subtest basic-plain-flip --device "sys:/sys/devices/platform/vkms" - sudo IGT_DEVICE="sys:/sys/devices/platform/vkms" ./build/tests/kms_flip --run-subtest basic-plain-flip + sudo IGT_FORCE_DRIVER="vkms" ./build/tests/kms_flip --run-subtest basic-plain-flip Testing With KUnit ================== @@ -147,21 +235,14 @@ Runtime Configuration --------------------- We want to be able to reconfigure vkms instance without having to reload the -module. Use/Test-cases: +module through configfs. Use/Test-cases: - Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of compositors). -- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of - them first). - - Change output configuration: Plug/unplug screens, change EDID, allow changing the refresh rate. -The currently proposed solution is to expose vkms configuration through -configfs. All existing module options should be supported through configfs -too. - Writeback support ----------------- diff --git a/Documentation/gpu/xe/index.rst b/Documentation/gpu/xe/index.rst index 88b22fad880e..bc432c95d1a3 100644 --- a/Documentation/gpu/xe/index.rst +++ b/Documentation/gpu/xe/index.rst @@ -14,6 +14,7 @@ DG2, etc is provided to prototype the driver. xe_mm xe_map xe_migrate + xe_exec_queue xe_cs xe_pm xe_gt_freq diff --git a/Documentation/gpu/xe/xe_exec_queue.rst b/Documentation/gpu/xe/xe_exec_queue.rst new file mode 100644 index 000000000000..6076569e311c --- /dev/null +++ b/Documentation/gpu/xe/xe_exec_queue.rst @@ -0,0 +1,20 @@ +.. SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +=============== +Execution Queue +=============== + +.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.c + :doc: Execution Queue + +Internal API +============ + +.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue_types.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/xe/xe_exec_queue.c + :internal: diff --git a/Documentation/networking/can.rst b/Documentation/networking/can.rst index f93049f03a37..536ff411da1d 100644 --- a/Documentation/networking/can.rst +++ b/Documentation/networking/can.rst @@ -1398,10 +1398,9 @@ second bit timing has to be specified in order to enable the CAN FD bitrate. Additionally CAN FD capable CAN controllers support up to 64 bytes of payload. The representation of this length in can_frame.len and canfd_frame.len for userspace applications and inside the Linux network -layer is a plain value from 0 .. 64 instead of the CAN 'data length code'. -The data length code was a 1:1 mapping to the payload length in the Classical -CAN frames anyway. The payload length to the bus-relevant DLC mapping is -only performed inside the CAN drivers, preferably with the helper +layer is a plain value from 0 .. 64 instead of the Classical CAN length +which ranges from 0 to 8. The payload length to the bus-relevant DLC mapping +is only performed inside the CAN drivers, preferably with the helper functions can_fd_dlc2len() and can_fd_len2dlc(). The CAN netdevice driver capabilities can be distinguished by the network @@ -1465,6 +1464,70 @@ Example when 'fd-non-iso on' is added on this switchable CAN FD adapter:: can state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0 +Transmitter Delay Compensation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At high bit rates, the propagation delay from the TX pin to the RX pin of +the transceiver might become greater than the actual bit time causing +measurement errors: the RX pin would still be measuring the previous bit. + +The Transmitter Delay Compensation (thereafter, TDC) resolves this problem +by introducing a Secondary Sample Point (SSP) equal to the distance, in +minimum time quantum, from the start of the bit time on the TX pin to the +actual measurement on the RX pin. The SSP is calculated as the sum of two +configurable values: the TDC Value (TDCV) and the TDC offset (TDCO). + +TDC, if supported by the device, can be configured together with CAN-FD +using the ip tool's "tdc-mode" argument as follow: + +**omitted** + When no "tdc-mode" option is provided, the kernel will automatically + decide whether TDC should be turned on, in which case it will + calculate a default TDCO and use the TDCV as measured by the + device. This is the recommended method to use TDC. + +**"tdc-mode off"** + TDC is explicitly disabled. + +**"tdc-mode auto"** + The user must provide the "tdco" argument. The TDCV will be + automatically calculated by the device. This option is only + available if the device supports the TDC-AUTO CAN controller mode. + +**"tdc-mode manual"** + The user must provide both the "tdco" and "tdcv" arguments. This + option is only available if the device supports the TDC-MANUAL CAN + controller mode. + +Note that some devices may offer an additional parameter: "tdcf" (TDC Filter +window). If supported by your device, this can be added as an optional +argument to either "tdc-mode auto" or "tdc-mode manual". + +Example configuring a 500 kbit/s arbitration bitrate, a 5 Mbit/s data +bitrate, a TDCO of 15 minimum time quantum and a TDCV automatically measured +by the device:: + + $ ip link set can0 up type can bitrate 500000 \ + fd on dbitrate 4000000 \ + tdc-mode auto tdco 15 + $ ip -details link show can0 + 5: can0: mtu 72 qdisc pfifo_fast state UP \ + mode DEFAULT group default qlen 10 + link/can promiscuity 0 allmulti 0 minmtu 72 maxmtu 72 + can state ERROR-ACTIVE restart-ms 0 + bitrate 500000 sample-point 0.875 + tq 12 prop-seg 69 phase-seg1 70 phase-seg2 20 sjw 10 brp 1 + ES582.1/ES584.1: tseg1 2..256 tseg2 2..128 sjw 1..128 brp 1..512 \ + brp_inc 1 + dbitrate 4000000 dsample-point 0.750 + dtq 12 dprop-seg 7 dphase-seg1 7 dphase-seg2 5 dsjw 2 dbrp 1 + tdco 15 tdcf 0 + ES582.1/ES584.1: dtseg1 2..32 dtseg2 1..16 dsjw 1..8 dbrp 1..32 \ + dbrp_inc 1 + tdco 0..127 tdcf 0..127 + clock 80000000 + + Supported CAN Hardware ---------------------- diff --git a/Documentation/networking/seg6-sysctl.rst b/Documentation/networking/seg6-sysctl.rst index 07c20e470baf..1b6af4779be1 100644 --- a/Documentation/networking/seg6-sysctl.rst +++ b/Documentation/networking/seg6-sysctl.rst @@ -25,6 +25,9 @@ seg6_require_hmac - INTEGER Default is 0. +/proc/sys/net/ipv6/seg6_* variables: +==================================== + seg6_flowlabel - INTEGER Controls the behaviour of computing the flowlabel of outer IPv6 header in case of SR T.encaps diff --git a/Documentation/rust/coding-guidelines.rst b/Documentation/rust/coding-guidelines.rst index 6ff9e754755d..3198be3a6d63 100644 --- a/Documentation/rust/coding-guidelines.rst +++ b/Documentation/rust/coding-guidelines.rst @@ -38,6 +38,81 @@ Like ``clang-format`` for the rest of the kernel, ``rustfmt`` works on individual files, and does not require a kernel configuration. Sometimes it may even work with broken code. +Imports +~~~~~~~ + +``rustfmt``, by default, formats imports in a way that is prone to conflicts +while merging and rebasing, since in some cases it condenses several items into +the same line. For instance: + +.. code-block:: rust + + // Do not use this style. + use crate::{ + example1, + example2::{example3, example4, example5}, + example6, example7, + example8::example9, + }; + +Instead, the kernel uses a vertical layout that looks like this: + +.. code-block:: rust + + use crate::{ + example1, + example2::{ + example3, + example4, + example5, // + }, + example6, + example7, + example8::example9, // + }; + +That is, each item goes into its own line, and braces are used as soon as there +is more than one item in a list. + +The trailing empty comment allows to preserve this formatting. Not only that, +``rustfmt`` will actually reformat imports vertically when the empty comment is +added. That is, it is possible to easily reformat the original example into the +expected style by running ``rustfmt`` on an input like: + +.. code-block:: rust + + // Do not use this style. + use crate::{ + example1, + example2::{example3, example4, example5, // + }, + example6, example7, + example8::example9, // + }; + +The trailing empty comment works for nested imports, as shown above, as well as +for single item imports -- this can be useful to minimize diffs within patch +series: + +.. code-block:: rust + + use crate::{ + example1, // + }; + +The trailing empty comment works in any of the lines within the braces, but it +is preferred to keep it in the last item, since it is reminiscent of the +trailing comma in other formatters. Sometimes it may be simpler to avoid moving +the comment several times within a patch series due to changes in the list. + +There may be cases where exceptions may need to be made, i.e. none of this is +a hard rule. There is also code that is not migrated to this style yet, but +please do not introduce code in other styles. + +Eventually, the goal is to get ``rustfmt`` to support this formatting style (or +a similar one) automatically in a stable release without requiring the trailing +empty comment. Thus, at some point, the goal is to remove those comments. + Comments -------- diff --git a/Documentation/userspace-api/dma-buf-heaps.rst b/Documentation/userspace-api/dma-buf-heaps.rst index 1dfe5e7acd5a..05445c83b79a 100644 --- a/Documentation/userspace-api/dma-buf-heaps.rst +++ b/Documentation/userspace-api/dma-buf-heaps.rst @@ -16,13 +16,52 @@ following heaps: - The ``system`` heap allocates virtually contiguous, cacheable, buffers. - - The ``cma`` heap allocates physically contiguous, cacheable, - buffers. Only present if a CMA region is present. Such a region is - usually created either through the kernel commandline through the - ``cma`` parameter, a memory region Device-Tree node with the - ``linux,cma-default`` property set, or through the ``CMA_SIZE_MBYTES`` or - ``CMA_SIZE_PERCENTAGE`` Kconfig options. The heap's name in devtmpfs is - ``default_cma_region``. For backwards compatibility, when the - ``DMABUF_HEAPS_CMA_LEGACY`` Kconfig option is set, a duplicate node is - created following legacy naming conventions; the legacy name might be - ``reserved``, ``linux,cma``, or ``default-pool``. + - The ``default_cma_region`` heap allocates physically contiguous, + cacheable, buffers. Only present if a CMA region is present. Such a + region is usually created either through the kernel commandline + through the ``cma`` parameter, a memory region Device-Tree node with + the ``linux,cma-default`` property set, or through the + ``CMA_SIZE_MBYTES`` or ``CMA_SIZE_PERCENTAGE`` Kconfig options. Prior + to Linux 6.17, its name wasn't stable and could be called + ``reserved``, ``linux,cma``, or ``default-pool``, depending on the + platform. + + - A heap will be created for each reusable region in the device tree + with the ``shared-dma-pool`` compatible, using the full device tree + node name as its name. The buffer semantics are identical to + ``default-cma-region``. + +Naming Convention +================= + +``dma-buf`` heaps name should meet a number of constraints: + +- The name must be stable, and must not change from one version to the other. + Userspace identifies heaps by their name, so if the names ever change, we + would be likely to introduce regressions. + +- The name must describe the memory region the heap will allocate from, and + must uniquely identify it in a given platform. Since userspace applications + use the heap name as the discriminant, it must be able to tell which heap it + wants to use reliably if there's multiple heaps. + +- The name must not mention implementation details, such as the allocator. The + heap driver will change over time, and implementation details when it was + introduced might not be relevant in the future. + +- The name should describe properties of the buffers that would be allocated. + Doing so will make heap identification easier for userspace. Such properties + are: + + - ``contiguous`` for physically contiguous buffers; + + - ``protected`` for encrypted buffers not accessible the OS; + +- The name may describe intended usage. Doing so will make heap identification + easier for userspace applications and users. + +For example, assuming a platform with a reserved memory region located +at the RAM address 0x42000000, intended to allocate video framebuffers, +physically contiguous, and backed by the CMA kernel allocator, good +names would be ``memory@42000000-contiguous`` or ``video@42000000``, but +``cma-video`` wouldn't. diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 6ae24c5ca559..57061fa29e6a 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -1229,6 +1229,9 @@ It is not possible to read back a pending external abort (injected via KVM_SET_VCPU_EVENTS or otherwise) because such an exception is always delivered directly to the virtual CPU). +Calling this ioctl on a vCPU that hasn't been initialized will return +-ENOEXEC. + :: struct kvm_vcpu_events { @@ -1309,6 +1312,8 @@ exceptions by manipulating individual registers using the KVM_SET_ONE_REG API. See KVM_GET_VCPU_EVENTS for the data structure. +Calling this ioctl on a vCPU that hasn't been initialized will return +-ENOEXEC. 4.33 KVM_GET_DEBUGREGS ---------------------- @@ -6432,9 +6437,18 @@ most one mapping per page, i.e. binding multiple memory regions to a single guest_memfd range is not allowed (any number of memory regions can be bound to a single guest_memfd file, but the bound ranges must not overlap). -When the capability KVM_CAP_GUEST_MEMFD_MMAP is supported, the 'flags' field -supports GUEST_MEMFD_FLAG_MMAP. Setting this flag on guest_memfd creation -enables mmap() and faulting of guest_memfd memory to host userspace. +The capability KVM_CAP_GUEST_MEMFD_FLAGS enumerates the `flags` that can be +specified via KVM_CREATE_GUEST_MEMFD. Currently defined flags: + + ============================ ================================================ + GUEST_MEMFD_FLAG_MMAP Enable using mmap() on the guest_memfd file + descriptor. + GUEST_MEMFD_FLAG_INIT_SHARED Make all memory in the file shared during + KVM_CREATE_GUEST_MEMFD (memory files created + without INIT_SHARED will be marked private). + Shared memory can be faulted into host userspace + page tables. Private memory cannot. + ============================ ================================================ When the KVM MMU performs a PFN lookup to service a guest fault and the backing guest_memfd has the GUEST_MEMFD_FLAG_MMAP set, then the fault will always be diff --git a/Documentation/virt/kvm/devices/arm-vgic-v3.rst b/Documentation/virt/kvm/devices/arm-vgic-v3.rst index ff02102f7141..5395ee66fc32 100644 --- a/Documentation/virt/kvm/devices/arm-vgic-v3.rst +++ b/Documentation/virt/kvm/devices/arm-vgic-v3.rst @@ -13,7 +13,8 @@ will act as the VM interrupt controller, requiring emulated user-space devices to inject interrupts to the VGIC instead of directly to CPUs. It is not possible to create both a GICv3 and GICv2 on the same VM. -Creating a guest GICv3 device requires a host GICv3 as well. +Creating a guest GICv3 device requires a host GICv3 host, or a GICv5 host with +support for FEAT_GCIE_LEGACY. Groups: diff --git a/MAINTAINERS b/MAINTAINERS index 46126ce2f968..5d59ff76cfc6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2017,6 +2017,15 @@ F: arch/arm64/include/asm/arch_timer.h F: drivers/clocksource/arm_arch_timer.c F: drivers/clocksource/arm_arch_timer_mmio.c +ARM ETHOS-U NPU DRIVER +M: Rob Herring (Arm) +M: Tomeu Vizoso +L: dri-devel@lists.freedesktop.org +S: Supported +T: git https://gitlab.freedesktop.org/drm/misc/kernel.git +F: drivers/accel/ethosu/ +F: include/uapi/drm/ethosu_accel.h + ARM GENERIC INTERRUPT CONTROLLER DRIVERS M: Marc Zyngier L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -2092,7 +2101,8 @@ F: drivers/gpu/drm/arm/display/komeda/ ARM MALI PANFROST DRM DRIVER M: Boris Brezillon M: Rob Herring -R: Steven Price +M: Steven Price +M: Adrián Larumbe L: dri-devel@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -2298,7 +2308,7 @@ S: Maintained F: drivers/clk/sunxi/ ARM/Allwinner sunXi SoC support -M: Chen-Yu Tsai +M: Chen-Yu Tsai M: Jernej Skrabec M: Samuel Holland L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -4804,6 +4814,7 @@ F: drivers/net/ethernet/broadcom/b44.* BROADCOM B53/SF2 ETHERNET SWITCH DRIVER M: Florian Fainelli +M: Jonas Gorski L: netdev@vger.kernel.org L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Supported @@ -7308,6 +7319,7 @@ F: Documentation/userspace-api/dma-buf-alloc-exchange.rst F: drivers/dma-buf/ F: include/linux/*fence.h F: include/linux/dma-buf.h +F: include/linux/dma-buf/ F: include/linux/dma-resv.h K: \bdma_(?:buf|fence|resv)\b @@ -7636,8 +7648,7 @@ F: drivers/accel/ F: include/drm/drm_accel.h DRM DRIVER FOR ALLWINNER DE2 AND DE3 ENGINE -M: Maxime Ripard -M: Chen-Yu Tsai +M: Chen-Yu Tsai R: Jernej Skrabec L: dri-devel@lists.freedesktop.org S: Supported @@ -7746,7 +7757,8 @@ F: Documentation/devicetree/bindings/display/panel/panel-edp.yaml F: drivers/gpu/drm/panel/panel-edp.c DRM DRIVER FOR GENERIC USB DISPLAY -S: Orphan +M: Ruben Wauters +S: Maintained W: https://github.com/notro/gud/wiki T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: drivers/gpu/drm/gud/ @@ -7887,7 +7899,7 @@ DRM DRIVER for Qualcomm display hardware M: Rob Clark M: Dmitry Baryshkov R: Abhinav Kumar -R: Jessica Zhang +R: Jessica Zhang R: Sean Paul R: Marijn Suijten L: linux-arm-msm@vger.kernel.org @@ -8056,6 +8068,12 @@ S: Maintained F: Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml F: drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c +DRM DRIVER FOR SAMSUNG S6E3FC2X01 DDIC +M: David Heidelberg +S: Maintained +F: Documentation/devicetree/bindings/display/panel/samsung,s6e3fc2x01.yaml +F: drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c + DRM DRIVER FOR SAMSUNG S6E3HA8 PANELS M: Dzmitry Sankouski S: Maintained @@ -8250,8 +8268,7 @@ F: drivers/gpu/nova-core/ F: rust/kernel/drm/ DRM DRIVERS FOR ALLWINNER A10 -M: Maxime Ripard -M: Chen-Yu Tsai +M: Chen-Yu Tsai L: dri-devel@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -8578,6 +8595,7 @@ S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git F: drivers/gpu/drm/scheduler/ F: include/drm/gpu_scheduler.h +F: include/drm/spsc_queue.h DRM GPUVM M: Danilo Krummrich @@ -8600,7 +8618,7 @@ F: drivers/gpu/drm/clients/drm_log.c DRM PANEL DRIVERS M: Neil Armstrong -R: Jessica Zhang +R: Jessica Zhang L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git @@ -18013,6 +18031,16 @@ X: net/rfkill/ X: net/wireless/ X: tools/testing/selftests/net/can/ +NETWORKING [IOAM] +M: Justin Iurman +S: Maintained +F: Documentation/networking/ioam6* +F: include/linux/ioam6* +F: include/net/ioam6* +F: include/uapi/linux/ioam6* +F: net/ipv6/ioam6* +F: tools/testing/selftests/net/ioam6* + NETWORKING [IPSEC] M: Steffen Klassert M: Herbert Xu @@ -27695,7 +27723,7 @@ F: drivers/acpi/pmic/intel_pmic_xpower.c N: axp288 X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS -M: Chen-Yu Tsai +M: Chen-Yu Tsai L: linux-kernel@vger.kernel.org S: Maintained N: axp[128] diff --git a/Makefile b/Makefile index 17cfa11ca716..d14824792227 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/Kconfig b/arch/Kconfig index ebe08b9186ad..74ff01133532 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -965,6 +965,7 @@ config HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC def_bool y depends on HAVE_CFI_ICALL_NORMALIZE_INTEGERS depends on RUSTC_VERSION >= 107900 + depends on ARM64 || X86_64 # With GCOV/KASAN we need this fix: https://github.com/rust-lang/rust/pull/129373 depends on (RUSTC_LLVM_VERSION >= 190103 && RUSTC_VERSION >= 108200) || \ (!GCOV_KERNEL && !KASAN_GENERIC && !KASAN_SW_TAGS) diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index a7cd526dd7ca..f930396d9dae 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -88,7 +88,7 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_DW=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index afa6a348f444..6b779dee5ea0 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -86,7 +86,7 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_DW=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 2bfa6371953c..a89b50d5369d 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -88,7 +88,7 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_DW=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 1558e8e87767..1b8b2a098cda 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -77,7 +77,7 @@ CONFIG_DMADEVICES=y CONFIG_DW_AXI_DMAC=y CONFIG_IIO=y CONFIG_TI_ADC108S102=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_NFS_FS=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 03d9ac20baa9..b7120523e09a 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -74,7 +74,7 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_STORAGE=y CONFIG_USB_SERIAL=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index c09488992f13..4077abd5980c 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -81,7 +81,7 @@ CONFIG_MMC_DW=y CONFIG_UIO=y CONFIG_UIO_PDRV_GENIRQ=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/arm/boot/dts/st/stih410.dtsi b/arch/arm/boot/dts/st/stih410.dtsi index d56343f44fda..07da9b48ccac 100644 --- a/arch/arm/boot/dts/st/stih410.dtsi +++ b/arch/arm/boot/dts/st/stih410.dtsi @@ -34,6 +34,41 @@ status = "disabled"; }; + display-subsystem { + compatible = "st,sti-display-subsystem"; + ports = <&compositor>, <&hqvdp>, <&tvout>, <&sti_hdmi>; + + assigned-clocks = <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>, + <&clk_s_c0_pll1 0>, + <&clk_s_c0_flexgen CLK_COMPO_DVP>, + <&clk_s_c0_flexgen CLK_MAIN_DISP>, + <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>, + <&clk_s_d2_flexgen CLK_PIX_AUX_DISP>, + <&clk_s_d2_flexgen CLK_PIX_GDP1>, + <&clk_s_d2_flexgen CLK_PIX_GDP2>, + <&clk_s_d2_flexgen CLK_PIX_GDP3>, + <&clk_s_d2_flexgen CLK_PIX_GDP4>; + + assigned-clock-parents = <0>, + <0>, + <0>, + <&clk_s_c0_pll1 0>, + <&clk_s_c0_pll1 0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 0>; + + assigned-clock-rates = <297000000>, + <297000000>, + <0>, + <400000000>, + <400000000>; + }; + soc { ohci0: usb@9a03c00 { compatible = "st,st-ohci-300x"; @@ -99,153 +134,176 @@ status = "disabled"; }; - sti-display-subsystem@0 { - compatible = "st,sti-display-subsystem"; - #address-cells = <1>; - #size-cells = <1>; + compositor: display-controller@9d11000 { + compatible = "st,stih407-compositor"; + reg = <0x9d11000 0x1000>; - reg = <0 0>; - assigned-clocks = <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>, - <&clk_s_c0_pll1 0>, - <&clk_s_c0_flexgen CLK_COMPO_DVP>, - <&clk_s_c0_flexgen CLK_MAIN_DISP>, - <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>, - <&clk_s_d2_flexgen CLK_PIX_AUX_DISP>, - <&clk_s_d2_flexgen CLK_PIX_GDP1>, - <&clk_s_d2_flexgen CLK_PIX_GDP2>, - <&clk_s_d2_flexgen CLK_PIX_GDP3>, - <&clk_s_d2_flexgen CLK_PIX_GDP4>; + clock-names = "compo_main", + "compo_aux", + "pix_main", + "pix_aux", + "pix_gdp1", + "pix_gdp2", + "pix_gdp3", + "pix_gdp4", + "main_parent", + "aux_parent"; - assigned-clock-parents = <0>, - <0>, - <0>, - <&clk_s_c0_pll1 0>, - <&clk_s_c0_pll1 0>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>, - <&clk_s_d2_quadfs 0>, + clocks = <&clk_s_c0_flexgen CLK_COMPO_DVP>, + <&clk_s_c0_flexgen CLK_COMPO_DVP>, + <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>, + <&clk_s_d2_flexgen CLK_PIX_AUX_DISP>, + <&clk_s_d2_flexgen CLK_PIX_GDP1>, + <&clk_s_d2_flexgen CLK_PIX_GDP2>, + <&clk_s_d2_flexgen CLK_PIX_GDP3>, + <&clk_s_d2_flexgen CLK_PIX_GDP4>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; + + reset-names = "compo-main", "compo-aux"; + resets = <&softreset STIH407_COMPO_SOFTRESET>, + <&softreset STIH407_COMPO_SOFTRESET>; + st,vtg = <&vtg_main>, <&vtg_aux>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + compo_main_out: endpoint { + remote-endpoint = <&tvout_in0>; + }; + }; + + port@1 { + reg = <1>; + compo_aux_out: endpoint { + remote-endpoint = <&tvout_in1>; + }; + }; + }; + }; + + tvout: encoder@8d08000 { + compatible = "st,stih407-tvout"; + reg = <0x8d08000 0x1000>; + reg-names = "tvout-reg"; + reset-names = "tvout"; + resets = <&softreset STIH407_HDTVOUT_SOFTRESET>; + assigned-clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, + <&clk_s_d2_flexgen CLK_TMDS_HDMI>, + <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, + <&clk_s_d0_flexgen CLK_PCM_0>, + <&clk_s_d2_flexgen CLK_PIX_HDDAC>, + <&clk_s_d2_flexgen CLK_HDDAC>; + + assigned-clock-parents = <&clk_s_d2_quadfs 0>, + <&clk_tmdsout_hdmi>, <&clk_s_d2_quadfs 0>, + <&clk_s_d0_quadfs 0>, <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 0>; - assigned-clock-rates = <297000000>, - <297000000>, - <0>, - <400000000>, - <400000000>; - - ranges; - - sti-compositor@9d11000 { - compatible = "st,stih407-compositor"; - reg = <0x9d11000 0x1000>; - - clock-names = "compo_main", - "compo_aux", - "pix_main", - "pix_aux", - "pix_gdp1", - "pix_gdp2", - "pix_gdp3", - "pix_gdp4", - "main_parent", - "aux_parent"; - - clocks = <&clk_s_c0_flexgen CLK_COMPO_DVP>, - <&clk_s_c0_flexgen CLK_COMPO_DVP>, - <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>, - <&clk_s_d2_flexgen CLK_PIX_AUX_DISP>, - <&clk_s_d2_flexgen CLK_PIX_GDP1>, - <&clk_s_d2_flexgen CLK_PIX_GDP2>, - <&clk_s_d2_flexgen CLK_PIX_GDP3>, - <&clk_s_d2_flexgen CLK_PIX_GDP4>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; - - reset-names = "compo-main", "compo-aux"; - resets = <&softreset STIH407_COMPO_SOFTRESET>, - <&softreset STIH407_COMPO_SOFTRESET>; - st,vtg = <&vtg_main>, <&vtg_aux>; - }; - - sti-tvout@8d08000 { - compatible = "st,stih407-tvout"; - reg = <0x8d08000 0x1000>; - reg-names = "tvout-reg"; - reset-names = "tvout"; - resets = <&softreset STIH407_HDTVOUT_SOFTRESET>; + ports { #address-cells = <1>; - #size-cells = <1>; - assigned-clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, - <&clk_s_d2_flexgen CLK_TMDS_HDMI>, - <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, - <&clk_s_d0_flexgen CLK_PCM_0>, - <&clk_s_d2_flexgen CLK_PIX_HDDAC>, - <&clk_s_d2_flexgen CLK_HDDAC>; + #size-cells = <0>; - assigned-clock-parents = <&clk_s_d2_quadfs 0>, - <&clk_tmdsout_hdmi>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d0_quadfs 0>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 0>; + port@0 { + reg = <0>; + tvout_in0: endpoint { + remote-endpoint = <&compo_main_out>; + }; + }; + + port@1 { + reg = <1>; + tvout_in1: endpoint { + remote-endpoint = <&compo_aux_out>; + }; + }; + + port@2 { + reg = <2>; + tvout_out0: endpoint { + remote-endpoint = <&hdmi_in>; + }; + }; + + port@3 { + reg = <3>; + tvout_out1: endpoint { + remote-endpoint = <&hda_in>; + }; + }; }; + }; - sti_hdmi: sti-hdmi@8d04000 { - compatible = "st,stih407-hdmi"; - reg = <0x8d04000 0x1000>; - reg-names = "hdmi-reg"; - #sound-dai-cells = <0>; - interrupts = ; - interrupt-names = "irq"; - clock-names = "pix", - "tmds", - "phy", - "audio", - "main_parent", - "aux_parent"; + sti_hdmi: hdmi@8d04000 { + compatible = "st,stih407-hdmi"; + reg = <0x8d04000 0x1000>; + reg-names = "hdmi-reg"; + #sound-dai-cells = <0>; + interrupts = ; + interrupt-names = "irq"; + clock-names = "pix", + "tmds", + "phy", + "audio", + "main_parent", + "aux_parent"; - clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, - <&clk_s_d2_flexgen CLK_TMDS_HDMI>, - <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, - <&clk_s_d0_flexgen CLK_PCM_0>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; + clocks = <&clk_s_d2_flexgen CLK_PIX_HDMI>, + <&clk_s_d2_flexgen CLK_TMDS_HDMI>, + <&clk_s_d2_flexgen CLK_REF_HDMIPHY>, + <&clk_s_d0_flexgen CLK_PCM_0>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; - hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; - reset-names = "hdmi"; - resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; - ddc = <&hdmiddc>; + hdmi,hpd-gpio = <&pio5 3 GPIO_ACTIVE_LOW>; + reset-names = "hdmi"; + resets = <&softreset STIH407_HDMI_TX_PHY_SOFTRESET>; + ddc = <&hdmiddc>; + + port { + hdmi_in: endpoint { + remote-endpoint = <&tvout_out0>; + }; }; + }; - sti-hda@8d02000 { - compatible = "st,stih407-hda"; - status = "disabled"; - reg = <0x8d02000 0x400>, <0x92b0120 0x4>; - reg-names = "hda-reg", "video-dacs-ctrl"; - clock-names = "pix", - "hddac", - "main_parent", - "aux_parent"; - clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, - <&clk_s_d2_flexgen CLK_HDDAC>, - <&clk_s_d2_quadfs 0>, - <&clk_s_d2_quadfs 1>; - }; + analog@8d02000 { + compatible = "st,stih407-hda"; + status = "disabled"; + reg = <0x8d02000 0x400>, <0x92b0120 0x4>; + reg-names = "hda-reg", "video-dacs-ctrl"; + clock-names = "pix", + "hddac", + "main_parent", + "aux_parent"; + clocks = <&clk_s_d2_flexgen CLK_PIX_HDDAC>, + <&clk_s_d2_flexgen CLK_HDDAC>, + <&clk_s_d2_quadfs 0>, + <&clk_s_d2_quadfs 1>; - sti-hqvdp@9c00000 { - compatible = "st,stih407-hqvdp"; - reg = <0x9C00000 0x100000>; - clock-names = "hqvdp", "pix_main"; - clocks = <&clk_s_c0_flexgen CLK_MAIN_DISP>, - <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>; - reset-names = "hqvdp"; - resets = <&softreset STIH407_HDQVDP_SOFTRESET>; - st,vtg = <&vtg_main>; + port { + hda_in: endpoint { + remote-endpoint = <&tvout_out1>; + }; }; }; + hqvdp: plane@9c00000 { + compatible = "st,stih407-hqvdp"; + reg = <0x9C00000 0x100000>; + clock-names = "hqvdp", "pix_main"; + clocks = <&clk_s_c0_flexgen CLK_MAIN_DISP>, + <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>; + reset-names = "hqvdp"; + resets = <&softreset STIH407_HDQVDP_SOFTRESET>; + st,vtg = <&vtg_main>; + }; + bdisp0:bdisp@9f10000 { compatible = "st,stih407-bdisp"; reg = <0x9f10000 0x1000>; diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig index 516689dc6cf1..242a61208a0f 100644 --- a/arch/arm/configs/axm55xx_defconfig +++ b/arch/arm/configs/axm55xx_defconfig @@ -194,8 +194,7 @@ CONFIG_MAILBOX=y CONFIG_PL320_MBOX=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=y diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig index 27dc3bf6b124..4a8ac09843d7 100644 --- a/arch/arm/configs/bcm2835_defconfig +++ b/arch/arm/configs/bcm2835_defconfig @@ -154,8 +154,8 @@ CONFIG_PWM_BCM2835=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_FANOTIFY=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig index e2ddaca0f89d..673408a10888 100644 --- a/arch/arm/configs/davinci_all_defconfig +++ b/arch/arm/configs/davinci_all_defconfig @@ -228,7 +228,7 @@ CONFIG_PWM=y CONFIG_PWM_TIECAP=m CONFIG_PWM_TIEHRPWM=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_XFS_FS=m CONFIG_AUTOFS_FS=m diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig index d76eb12d29a7..bb6c4748bfc8 100644 --- a/arch/arm/configs/dove_defconfig +++ b/arch/arm/configs/dove_defconfig @@ -95,8 +95,8 @@ CONFIG_RTC_DRV_MV=y CONFIG_DMADEVICES=y CONFIG_MV_XOR=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig index 2248afaf35b5..7f3756d8b086 100644 --- a/arch/arm/configs/ep93xx_defconfig +++ b/arch/arm/configs/ep93xx_defconfig @@ -103,8 +103,8 @@ CONFIG_RTC_DRV_EP93XX=y CONFIG_DMADEVICES=y CONFIG_EP93XX_DMA=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index 9a57763a8d38..0d55056c6f82 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -436,9 +436,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_AUTOFS_FS=y diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig index 3cb995b9616a..81199dddcde7 100644 --- a/arch/arm/configs/ixp4xx_defconfig +++ b/arch/arm/configs/ixp4xx_defconfig @@ -158,8 +158,8 @@ CONFIG_IXP4XX_NPE=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_OVERLAY_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig index 842a989baa27..f67e9cda73e2 100644 --- a/arch/arm/configs/mmp2_defconfig +++ b/arch/arm/configs/mmp2_defconfig @@ -53,7 +53,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_MAX8925=y # CONFIG_RESET_CONTROLLER is not set CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_MSDOS_FS=y diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig index fa06d98e43fc..e2d9f3610063 100644 --- a/arch/arm/configs/moxart_defconfig +++ b/arch/arm/configs/moxart_defconfig @@ -113,7 +113,7 @@ CONFIG_RTC_DRV_MOXART=y CONFIG_DMADEVICES=y CONFIG_MOXART_DMA=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_TMPFS=y CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig index b523bc246c09..59b020e66a0b 100644 --- a/arch/arm/configs/multi_v5_defconfig +++ b/arch/arm/configs/multi_v5_defconfig @@ -268,7 +268,7 @@ CONFIG_PWM_ATMEL=m CONFIG_PWM_ATMEL_HLCDC_PWM=m CONFIG_PWM_ATMEL_TCB=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_UDF_FS=m diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig index 3343f72de7ea..55f4ab67a306 100644 --- a/arch/arm/configs/mv78xx0_defconfig +++ b/arch/arm/configs/mv78xx0_defconfig @@ -91,8 +91,8 @@ CONFIG_RTC_DRV_DS1307=y CONFIG_RTC_DRV_RS5C372=y CONFIG_RTC_DRV_M41T80=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set CONFIG_EXT4_FS=m CONFIG_ISO9660_FS=m CONFIG_JOLIET=y diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig index 23dbb80fcc2e..d1742a7cae6a 100644 --- a/arch/arm/configs/mvebu_v5_defconfig +++ b/arch/arm/configs/mvebu_v5_defconfig @@ -168,7 +168,7 @@ CONFIG_MV_XOR=y CONFIG_STAGING=y CONFIG_FB_XGI=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_UDF_FS=m diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig index ea28ed8991b4..696b4fbc2412 100644 --- a/arch/arm/configs/nhk8815_defconfig +++ b/arch/arm/configs/nhk8815_defconfig @@ -116,7 +116,7 @@ CONFIG_IIO_ST_ACCEL_3AXIS=y CONFIG_PWM=y CONFIG_PWM_STMPE=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig index 661e5d6894bd..24c54bf1e243 100644 --- a/arch/arm/configs/omap1_defconfig +++ b/arch/arm/configs/omap1_defconfig @@ -184,7 +184,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_OMAP=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_AUTOFS_FS=y CONFIG_ISO9660_FS=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 1d5f75241739..4e53c331cd84 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -679,7 +679,7 @@ CONFIG_TWL4030_USB=m CONFIG_COUNTER=m CONFIG_TI_EQEP=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_SECURITY=y CONFIG_FANOTIFY=y CONFIG_QUOTA=y diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig index 62b9c6102789..c28426250ec3 100644 --- a/arch/arm/configs/orion5x_defconfig +++ b/arch/arm/configs/orion5x_defconfig @@ -115,8 +115,8 @@ CONFIG_RTC_DRV_M48T86=y CONFIG_DMADEVICES=y CONFIG_MV_XOR=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set CONFIG_EXT4_FS=m CONFIG_ISO9660_FS=m CONFIG_JOLIET=y diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index 70489f3555d0..3ea189f1f42f 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -579,9 +579,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_AUTOFS_FS=m CONFIG_FUSE_FS=m diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig index fa681a7a49c2..29a1dea500f0 100644 --- a/arch/arm/configs/qcom_defconfig +++ b/arch/arm/configs/qcom_defconfig @@ -291,7 +291,7 @@ CONFIG_INTERCONNECT_QCOM_MSM8974=m CONFIG_INTERCONNECT_QCOM_SDX55=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_FUSE_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y diff --git a/arch/arm/configs/rpc_defconfig b/arch/arm/configs/rpc_defconfig index 24f1fa868230..46df453e224e 100644 --- a/arch/arm/configs/rpc_defconfig +++ b/arch/arm/configs/rpc_defconfig @@ -77,7 +77,7 @@ CONFIG_SOUND_VIDC=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_PCF8583=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_AUTOFS_FS=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig index 967b1cb22136..7bf28a83946a 100644 --- a/arch/arm/configs/s3c6400_defconfig +++ b/arch/arm/configs/s3c6400_defconfig @@ -52,9 +52,9 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_S3C=y CONFIG_PWM=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_CRAMFS=y diff --git a/arch/arm/configs/sama7_defconfig b/arch/arm/configs/sama7_defconfig index e14720a9a5ac..e2ad9a05566f 100644 --- a/arch/arm/configs/sama7_defconfig +++ b/arch/arm/configs/sama7_defconfig @@ -201,7 +201,7 @@ CONFIG_MCHP_EIC=y CONFIG_RESET_CONTROLLER=y CONFIG_NVMEM_MICROCHIP_OTPC=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_AUTOFS_FS=m CONFIG_VFAT_FS=y diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index 294906c8f16e..f2e42846b116 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -136,7 +136,7 @@ CONFIG_FPGA_REGION=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_AUTOFS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig index a8f992fdb30d..8b19af1ea67c 100644 --- a/arch/arm/configs/spear13xx_defconfig +++ b/arch/arm/configs/spear13xx_defconfig @@ -84,8 +84,8 @@ CONFIG_DMATEST=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_AUTOFS_FS=m CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=m diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig index 8dc5a388759c..b4e4b96a98af 100644 --- a/arch/arm/configs/spear3xx_defconfig +++ b/arch/arm/configs/spear3xx_defconfig @@ -67,8 +67,8 @@ CONFIG_DMATEST=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_AUTOFS_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig index 4e9e1a6ff381..7083b1bd8573 100644 --- a/arch/arm/configs/spear6xx_defconfig +++ b/arch/arm/configs/spear6xx_defconfig @@ -53,8 +53,8 @@ CONFIG_DMATEST=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_AUTOFS_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig index ac2a0f998c73..395df2f9dc8e 100644 --- a/arch/arm/configs/spitz_defconfig +++ b/arch/arm/configs/spitz_defconfig @@ -193,8 +193,8 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig index dcd9c316072e..82190b155b14 100644 --- a/arch/arm/configs/stm32_defconfig +++ b/arch/arm/configs/stm32_defconfig @@ -69,7 +69,7 @@ CONFIG_STM32_MDMA=y CONFIG_IIO=y CONFIG_STM32_ADC_CORE=y CONFIG_STM32_ADC=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y # CONFIG_FILE_LOCKING is not set # CONFIG_DNOTIFY is not set # CONFIG_INOTIFY_USER is not set diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index ba863b445417..ab477ca13f89 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -319,9 +319,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y # CONFIG_DNOTIFY is not set CONFIG_VFAT_FS=y CONFIG_TMPFS=y diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index 9c8dc6dd5fe3..e88533b78327 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -175,7 +175,7 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig index cdb6065e04fd..b9454f6954f8 100644 --- a/arch/arm/configs/vexpress_defconfig +++ b/arch/arm/configs/vexpress_defconfig @@ -120,7 +120,7 @@ CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_MMIO=y CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y CONFIG_JFFS2_FS=y diff --git a/arch/arm64/boot/dts/freescale/imx95.dtsi b/arch/arm64/boot/dts/freescale/imx95.dtsi index 1292677cbe4e..f94776a0e47b 100644 --- a/arch/arm64/boot/dts/freescale/imx95.dtsi +++ b/arch/arm64/boot/dts/freescale/imx95.dtsi @@ -250,6 +250,28 @@ clock-output-names = "dummy"; }; + gpu_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp-500000000 { + opp-hz = /bits/ 64 <500000000>; + opp-hz-real = /bits/ 64 <500000000>; + opp-microvolt = <920000>; + }; + + opp-800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-hz-real = /bits/ 64 <800000000>; + opp-microvolt = <920000>; + }; + + opp-1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-hz-real = /bits/ 64 <1000000000>; + opp-microvolt = <920000>; + }; + }; + clk_ext1: clock-ext1 { compatible = "fixed-clock"; #clock-cells = <0>; @@ -2138,6 +2160,21 @@ }; }; + gpu: gpu@4d900000 { + compatible = "nxp,imx95-mali", "arm,mali-valhall-csf"; + reg = <0 0x4d900000 0 0x480000>; + clocks = <&scmi_clk IMX95_CLK_GPU>, <&scmi_clk IMX95_CLK_GPUAPB>; + clock-names = "core", "coregroup"; + interrupts = , + , + ; + interrupt-names = "job", "mmu", "gpu"; + operating-points-v2 = <&gpu_opp_table>; + power-domains = <&scmi_devpd IMX95_PD_GPU>; + #cooling-cells = <2>; + dynamic-power-coefficient = <1013>; + }; + ddr-pmu@4e090dc0 { compatible = "fsl,imx95-ddr-pmu", "fsl,imx93-ddr-pmu"; reg = <0x0 0x4e090dc0 0x0 0x200>; diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b37da3ee8529..99a7c0235e6d 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -24,22 +24,48 @@ * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it * can reset into an UNKNOWN state and might not read as 1 until it has * been initialized explicitly. - * - * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but - * don't advertise it (they predate this relaxation). - * * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H * indicating whether the CPU is running in E2H mode. */ mrs_s x1, SYS_ID_AA64MMFR4_EL1 sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH cmp x1, #0 - b.ge .LnVHE_\@ + b.lt .LnE2H0_\@ + /* + * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised + * as such via ID_AA64MMFR4_EL1.E2H0: + * + * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to + * have HCR_EL2.E2H implemented as RAO/WI. + * + * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest + * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV + * guests on these hosts can write to HCR_EL2.E2H without + * trapping to the hypervisor, but these writes have no + * functional effect. + * + * Handle both cases by checking for an essential VHE property + * (system register remapping) to decide whether we're + * effectively VHE-only or not. + */ + msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE + isb + mov x1, #1 // Write something to FAR_EL1 + msr far_el1, x1 + isb + mov x1, #2 // Try to overwrite it via FAR_EL2 + msr far_el2, x1 + isb + mrs x1, far_el1 // If we see the latest write in FAR_EL1, + cmp x1, #2 // we can safely assume we are VHE only. + b.ne .LnVHE_\@ // Otherwise, we know that nVHE works. + +.LnE2H0_\@: orr x0, x0, #HCR_E2H -.LnVHE_\@: msr_hcr_el2 x0 isb +.LnVHE_\@: .endm .macro __init_el2_sctlr diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b763293281c8..64302c438355 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -816,6 +816,11 @@ struct kvm_vcpu_arch { u64 hcrx_el2; u64 mdcr_el2; + struct { + u64 r; + u64 w; + } fgt[__NR_FGT_GROUP_IDS__]; + /* Exception Information */ struct kvm_vcpu_fault_info fault; @@ -1600,6 +1605,51 @@ static inline bool kvm_arch_has_irq_bypass(void) void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt); void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1); void check_feature_map(void); +void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu); +static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg) +{ + switch (reg) { + case HFGRTR_EL2: + case HFGWTR_EL2: + return HFGRTR_GROUP; + case HFGITR_EL2: + return HFGITR_GROUP; + case HDFGRTR_EL2: + case HDFGWTR_EL2: + return HDFGRTR_GROUP; + case HAFGRTR_EL2: + return HAFGRTR_GROUP; + case HFGRTR2_EL2: + case HFGWTR2_EL2: + return HFGRTR2_GROUP; + case HFGITR2_EL2: + return HFGITR2_GROUP; + case HDFGRTR2_EL2: + case HDFGWTR2_EL2: + return HDFGRTR2_GROUP; + default: + BUILD_BUG_ON(1); + } +} + +#define vcpu_fgt(vcpu, reg) \ + ({ \ + enum fgt_group_id id = __fgt_reg_to_group_id(reg); \ + u64 *p; \ + switch (reg) { \ + case HFGWTR_EL2: \ + case HDFGWTR_EL2: \ + case HFGWTR2_EL2: \ + case HDFGWTR2_EL2: \ + p = &(vcpu)->arch.fgt[id].w; \ + break; \ + default: \ + p = &(vcpu)->arch.fgt[id].r; \ + break; \ + } \ + \ + p; \ + }) #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6455db1b54fd..c231d2a3e515 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -1220,10 +1220,19 @@ __val; \ }) +/* + * The "Z" constraint combined with the "%x0" template should be enough + * to force XZR generation if (v) is a constant 0 value but LLVM does not + * yet understand that modifier/constraint combo so a conditional is required + * to nudge the compiler into using XZR as a source for a 0 constant value. + */ #define write_sysreg_s(v, r) do { \ u64 __val = (u64)(v); \ u32 __maybe_unused __check_r = (u32)(r); \ - asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ + if (__builtin_constant_p(__val) && __val == 0) \ + asm volatile(__msr_s(r, "xzr")); \ + else \ + asm volatile(__msr_s(r, "%x0") : : "r" (__val)); \ } while (0) /* diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index f546a914f041..a9c81715ce59 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -697,6 +697,8 @@ static void noinstr el0_breakpt(struct pt_regs *regs, unsigned long esr) static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr) { + bool step_done; + if (!is_ttbr0_addr(regs->pc)) arm64_apply_bp_hardening(); @@ -707,10 +709,10 @@ static void noinstr el0_softstp(struct pt_regs *regs, unsigned long esr) * If we are stepping a suspended breakpoint there's nothing more to do: * the single-step is complete. */ - if (!try_step_suspended_breakpoints(regs)) { - local_daif_restore(DAIF_PROCCTX); + step_done = try_step_suspended_breakpoints(regs); + local_daif_restore(DAIF_PROCCTX); + if (!step_done) do_el0_softstep(esr, regs); - } arm64_exit_to_user_mode(regs); } diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index dbd74e4885e2..3f675875abea 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -66,7 +66,7 @@ static int nr_timers(struct kvm_vcpu *vcpu) u32 timer_get_ctl(struct arch_timer_context *ctxt) { - struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt); switch(arch_timer_ctx_index(ctxt)) { case TIMER_VTIMER: @@ -85,7 +85,7 @@ u32 timer_get_ctl(struct arch_timer_context *ctxt) u64 timer_get_cval(struct arch_timer_context *ctxt) { - struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt); switch(arch_timer_ctx_index(ctxt)) { case TIMER_VTIMER: @@ -104,7 +104,7 @@ u64 timer_get_cval(struct arch_timer_context *ctxt) static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) { - struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt); switch(arch_timer_ctx_index(ctxt)) { case TIMER_VTIMER: @@ -126,7 +126,7 @@ static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) { - struct kvm_vcpu *vcpu = ctxt->vcpu; + struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctxt); switch(arch_timer_ctx_index(ctxt)) { case TIMER_VTIMER: @@ -146,16 +146,6 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) } } -static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) -{ - if (!ctxt->offset.vm_offset) { - WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt)); - return; - } - - WRITE_ONCE(*ctxt->offset.vm_offset, offset); -} - u64 kvm_phys_timer_read(void) { return timecounter->cc->read(timecounter->cc); @@ -343,7 +333,7 @@ static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt) u64 ns; ctx = container_of(hrt, struct arch_timer_context, hrtimer); - vcpu = ctx->vcpu; + vcpu = timer_context_to_vcpu(ctx); trace_kvm_timer_hrtimer_expire(ctx); @@ -436,8 +426,9 @@ static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level) * * But hey, it's fast, right? */ - if (is_hyp_ctxt(ctx->vcpu) && - (ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) { + struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx); + if (is_hyp_ctxt(vcpu) && + (ctx == vcpu_vtimer(vcpu) || ctx == vcpu_ptimer(vcpu))) { unsigned long val = timer_get_ctl(ctx); __assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level); timer_set_ctl(ctx, val); @@ -470,7 +461,7 @@ static void timer_emulate(struct arch_timer_context *ctx) trace_kvm_timer_emulate(ctx, should_fire); if (should_fire != ctx->irq.level) - kvm_timer_update_irq(ctx->vcpu, should_fire, ctx); + kvm_timer_update_irq(timer_context_to_vcpu(ctx), should_fire, ctx); kvm_timer_update_status(ctx, should_fire); @@ -498,7 +489,7 @@ static void set_cntpoff(u64 cntpoff) static void timer_save_state(struct arch_timer_context *ctx) { - struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); + struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx)); enum kvm_arch_timers index = arch_timer_ctx_index(ctx); unsigned long flags; @@ -609,7 +600,7 @@ static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) static void timer_restore_state(struct arch_timer_context *ctx) { - struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); + struct arch_timer_cpu *timer = vcpu_timer(timer_context_to_vcpu(ctx)); enum kvm_arch_timers index = arch_timer_ctx_index(ctx); unsigned long flags; @@ -668,7 +659,7 @@ static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, boo static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx) { - struct kvm_vcpu *vcpu = ctx->vcpu; + struct kvm_vcpu *vcpu = timer_context_to_vcpu(ctx); bool phys_active = false; /* @@ -677,7 +668,7 @@ static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx) * this point and the register restoration, we'll take the * interrupt anyway. */ - kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx); + kvm_timer_update_irq(vcpu, kvm_timer_should_fire(ctx), ctx); if (irqchip_in_kernel(vcpu->kvm)) phys_active = kvm_vgic_map_is_active(vcpu, timer_irq(ctx)); @@ -1063,7 +1054,7 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) struct arch_timer_context *ctxt = vcpu_get_timer(vcpu, timerid); struct kvm *kvm = vcpu->kvm; - ctxt->vcpu = vcpu; + ctxt->timer_id = timerid; if (timerid == TIMER_VTIMER) ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset; @@ -1121,49 +1112,6 @@ void kvm_timer_cpu_down(void) disable_percpu_irq(host_ptimer_irq); } -int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) -{ - struct arch_timer_context *timer; - - switch (regid) { - case KVM_REG_ARM_TIMER_CTL: - timer = vcpu_vtimer(vcpu); - kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); - break; - case KVM_REG_ARM_TIMER_CNT: - if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, - &vcpu->kvm->arch.flags)) { - timer = vcpu_vtimer(vcpu); - timer_set_offset(timer, kvm_phys_timer_read() - value); - } - break; - case KVM_REG_ARM_TIMER_CVAL: - timer = vcpu_vtimer(vcpu); - kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); - break; - case KVM_REG_ARM_PTIMER_CTL: - timer = vcpu_ptimer(vcpu); - kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); - break; - case KVM_REG_ARM_PTIMER_CNT: - if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, - &vcpu->kvm->arch.flags)) { - timer = vcpu_ptimer(vcpu); - timer_set_offset(timer, kvm_phys_timer_read() - value); - } - break; - case KVM_REG_ARM_PTIMER_CVAL: - timer = vcpu_ptimer(vcpu); - kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); - break; - - default: - return -1; - } - - return 0; -} - static u64 read_timer_ctl(struct arch_timer_context *timer) { /* @@ -1180,31 +1128,6 @@ static u64 read_timer_ctl(struct arch_timer_context *timer) return ctl; } -u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) -{ - switch (regid) { - case KVM_REG_ARM_TIMER_CTL: - return kvm_arm_timer_read(vcpu, - vcpu_vtimer(vcpu), TIMER_REG_CTL); - case KVM_REG_ARM_TIMER_CNT: - return kvm_arm_timer_read(vcpu, - vcpu_vtimer(vcpu), TIMER_REG_CNT); - case KVM_REG_ARM_TIMER_CVAL: - return kvm_arm_timer_read(vcpu, - vcpu_vtimer(vcpu), TIMER_REG_CVAL); - case KVM_REG_ARM_PTIMER_CTL: - return kvm_arm_timer_read(vcpu, - vcpu_ptimer(vcpu), TIMER_REG_CTL); - case KVM_REG_ARM_PTIMER_CNT: - return kvm_arm_timer_read(vcpu, - vcpu_ptimer(vcpu), TIMER_REG_CNT); - case KVM_REG_ARM_PTIMER_CVAL: - return kvm_arm_timer_read(vcpu, - vcpu_ptimer(vcpu), TIMER_REG_CVAL); - } - return (u64)-1; -} - static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, struct arch_timer_context *timer, enum kvm_arch_timer_regs treg) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index f21d1b7f20f8..870953b4a8a7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -642,6 +642,7 @@ nommu: vcpu->arch.hcr_el2 |= HCR_TWI; vcpu_set_pauth_traps(vcpu); + kvm_vcpu_load_fgt(vcpu); if (is_protected_kvm_enabled()) { kvm_call_hyp_nvhe(__pkvm_vcpu_load, @@ -1794,6 +1795,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; + if (!kvm_vcpu_initialized(vcpu)) + return -ENOEXEC; + if (kvm_arm_vcpu_get_events(vcpu, &events)) return -EINVAL; @@ -1805,6 +1809,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; + if (!kvm_vcpu_initialized(vcpu)) + return -ENOEXEC; + if (copy_from_user(&events, argp, sizeof(events))) return -EFAULT; diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c index 20bb9af125b1..be26d5aa668c 100644 --- a/arch/arm64/kvm/at.c +++ b/arch/arm64/kvm/at.c @@ -91,7 +91,6 @@ static enum trans_regime compute_translation_regime(struct kvm_vcpu *vcpu, u32 o case OP_AT_S1E2W: case OP_AT_S1E2A: return vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2; - break; default: return (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) ? TR_EL20 : TR_EL10; @@ -1602,13 +1601,17 @@ int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level) .fn = match_s1_desc, .priv = &dm, }, - .regime = TR_EL10, .as_el0 = false, .pan = false, }; struct s1_walk_result wr = {}; int ret; + if (is_hyp_ctxt(vcpu)) + wi.regime = vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2; + else + wi.regime = TR_EL10; + ret = setup_s1_walk(vcpu, &wi, &wr, va); if (ret) return ret; diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c index fbd8944a3dea..24bb3f36e9d5 100644 --- a/arch/arm64/kvm/config.c +++ b/arch/arm64/kvm/config.c @@ -5,6 +5,8 @@ */ #include +#include +#include #include /* @@ -1428,3 +1430,91 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r break; } } + +static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg) +{ + switch (reg) { + case HFGRTR_EL2: + return &hfgrtr_masks; + case HFGWTR_EL2: + return &hfgwtr_masks; + case HFGITR_EL2: + return &hfgitr_masks; + case HDFGRTR_EL2: + return &hdfgrtr_masks; + case HDFGWTR_EL2: + return &hdfgwtr_masks; + case HAFGRTR_EL2: + return &hafgrtr_masks; + case HFGRTR2_EL2: + return &hfgrtr2_masks; + case HFGWTR2_EL2: + return &hfgwtr2_masks; + case HFGITR2_EL2: + return &hfgitr2_masks; + case HDFGRTR2_EL2: + return &hdfgrtr2_masks; + case HDFGWTR2_EL2: + return &hdfgwtr2_masks; + default: + BUILD_BUG_ON(1); + } +} + +static __always_inline void __compute_fgt(struct kvm_vcpu *vcpu, enum vcpu_sysreg reg) +{ + u64 fgu = vcpu->kvm->arch.fgu[__fgt_reg_to_group_id(reg)]; + struct fgt_masks *m = __fgt_reg_to_masks(reg); + u64 clear = 0, set = 0, val = m->nmask; + + set |= fgu & m->mask; + clear |= fgu & m->nmask; + + if (is_nested_ctxt(vcpu)) { + u64 nested = __vcpu_sys_reg(vcpu, reg); + set |= nested & m->mask; + clear |= ~nested & m->nmask; + } + + val |= set; + val &= ~clear; + *vcpu_fgt(vcpu, reg) = val; +} + +static void __compute_hfgwtr(struct kvm_vcpu *vcpu) +{ + __compute_fgt(vcpu, HFGWTR_EL2); + + if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) + *vcpu_fgt(vcpu, HFGWTR_EL2) |= HFGWTR_EL2_TCR_EL1; +} + +static void __compute_hdfgwtr(struct kvm_vcpu *vcpu) +{ + __compute_fgt(vcpu, HDFGWTR_EL2); + + if (is_hyp_ctxt(vcpu)) + *vcpu_fgt(vcpu, HDFGWTR_EL2) |= HDFGWTR_EL2_MDSCR_EL1; +} + +void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu) +{ + if (!cpus_have_final_cap(ARM64_HAS_FGT)) + return; + + __compute_fgt(vcpu, HFGRTR_EL2); + __compute_hfgwtr(vcpu); + __compute_fgt(vcpu, HFGITR_EL2); + __compute_fgt(vcpu, HDFGRTR_EL2); + __compute_hdfgwtr(vcpu); + __compute_fgt(vcpu, HAFGRTR_EL2); + + if (!cpus_have_final_cap(ARM64_HAS_FGT2)) + return; + + __compute_fgt(vcpu, HFGRTR2_EL2); + __compute_fgt(vcpu, HFGWTR2_EL2); + __compute_fgt(vcpu, HFGITR2_EL2); + __compute_fgt(vcpu, HDFGRTR2_EL2); + __compute_fgt(vcpu, HDFGWTR2_EL2); +} diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c index 3515a273eaa2..3ad6b7c6e4ba 100644 --- a/arch/arm64/kvm/debug.c +++ b/arch/arm64/kvm/debug.c @@ -15,6 +15,12 @@ #include #include +static int cpu_has_spe(u64 dfr0) +{ + return cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && + !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P); +} + /** * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value * @@ -77,13 +83,12 @@ void kvm_init_host_debug_data(void) *host_data_ptr(debug_brps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr0); *host_data_ptr(debug_wrps) = SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr0); + if (cpu_has_spe(dfr0)) + host_data_set_flag(HAS_SPE); + if (has_vhe()) return; - if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMSVer_SHIFT) && - !(read_sysreg_s(SYS_PMBIDR_EL1) & PMBIDR_EL1_P)) - host_data_set_flag(HAS_SPE); - /* Check if we have BRBE implemented and available at the host */ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT)) host_data_set_flag(HAS_BRBE); @@ -102,7 +107,7 @@ void kvm_init_host_debug_data(void) void kvm_debug_init_vhe(void) { /* Clear PMSCR_EL1.E{0,1}SPE which reset to UNKNOWN values. */ - if (SYS_FIELD_GET(ID_AA64DFR0_EL1, PMSVer, read_sysreg(id_aa64dfr0_el1))) + if (host_data_test_flag(HAS_SPE)) write_sysreg_el1(0, SYS_PMSCR); } diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 16ba5e9ac86c..1c87699fd886 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -591,64 +591,6 @@ static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) return copy_core_reg_indices(vcpu, NULL); } -static const u64 timer_reg_list[] = { - KVM_REG_ARM_TIMER_CTL, - KVM_REG_ARM_TIMER_CNT, - KVM_REG_ARM_TIMER_CVAL, - KVM_REG_ARM_PTIMER_CTL, - KVM_REG_ARM_PTIMER_CNT, - KVM_REG_ARM_PTIMER_CVAL, -}; - -#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list) - -static bool is_timer_reg(u64 index) -{ - switch (index) { - case KVM_REG_ARM_TIMER_CTL: - case KVM_REG_ARM_TIMER_CNT: - case KVM_REG_ARM_TIMER_CVAL: - case KVM_REG_ARM_PTIMER_CTL: - case KVM_REG_ARM_PTIMER_CNT: - case KVM_REG_ARM_PTIMER_CVAL: - return true; - } - return false; -} - -static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - for (int i = 0; i < NUM_TIMER_REGS; i++) { - if (put_user(timer_reg_list[i], uindices)) - return -EFAULT; - uindices++; - } - - return 0; -} - -static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - void __user *uaddr = (void __user *)(long)reg->addr; - u64 val; - int ret; - - ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); - if (ret != 0) - return -EFAULT; - - return kvm_arm_timer_set_reg(vcpu, reg->id, val); -} - -static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - void __user *uaddr = (void __user *)(long)reg->addr; - u64 val; - - val = kvm_arm_timer_get_reg(vcpu, reg->id); - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; -} - static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) { const unsigned int slices = vcpu_sve_slices(vcpu); @@ -724,7 +666,6 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) res += num_sve_regs(vcpu); res += kvm_arm_num_sys_reg_descs(vcpu); res += kvm_arm_get_fw_num_regs(vcpu); - res += NUM_TIMER_REGS; return res; } @@ -755,11 +696,6 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) return ret; uindices += kvm_arm_get_fw_num_regs(vcpu); - ret = copy_timer_indices(vcpu, uindices); - if (ret < 0) - return ret; - uindices += NUM_TIMER_REGS; - return kvm_arm_copy_sys_reg_indices(vcpu, uindices); } @@ -777,9 +713,6 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); } - if (is_timer_reg(reg->id)) - return get_timer_reg(vcpu, reg); - return kvm_arm_sys_reg_get_reg(vcpu, reg); } @@ -797,9 +730,6 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); } - if (is_timer_reg(reg->id)) - return set_timer_reg(vcpu, reg); - return kvm_arm_sys_reg_set_reg(vcpu, reg); } diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index bca8c80e11da..cc7d5d1709cb 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -147,7 +147,12 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu) if (esr & ESR_ELx_WFx_ISS_RV) { u64 val, now; - now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT); + now = kvm_phys_timer_read(); + if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) + now -= timer_get_offset(vcpu_hvtimer(vcpu)); + else + now -= timer_get_offset(vcpu_vtimer(vcpu)); + val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); if (now >= val) diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index b6682202edf3..c5d5e5b86eaf 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -195,123 +195,6 @@ static inline void __deactivate_cptr_traps(struct kvm_vcpu *vcpu) __deactivate_cptr_traps_nvhe(vcpu); } -#define reg_to_fgt_masks(reg) \ - ({ \ - struct fgt_masks *m; \ - switch(reg) { \ - case HFGRTR_EL2: \ - m = &hfgrtr_masks; \ - break; \ - case HFGWTR_EL2: \ - m = &hfgwtr_masks; \ - break; \ - case HFGITR_EL2: \ - m = &hfgitr_masks; \ - break; \ - case HDFGRTR_EL2: \ - m = &hdfgrtr_masks; \ - break; \ - case HDFGWTR_EL2: \ - m = &hdfgwtr_masks; \ - break; \ - case HAFGRTR_EL2: \ - m = &hafgrtr_masks; \ - break; \ - case HFGRTR2_EL2: \ - m = &hfgrtr2_masks; \ - break; \ - case HFGWTR2_EL2: \ - m = &hfgwtr2_masks; \ - break; \ - case HFGITR2_EL2: \ - m = &hfgitr2_masks; \ - break; \ - case HDFGRTR2_EL2: \ - m = &hdfgrtr2_masks; \ - break; \ - case HDFGWTR2_EL2: \ - m = &hdfgwtr2_masks; \ - break; \ - default: \ - BUILD_BUG_ON(1); \ - } \ - \ - m; \ - }) - -#define compute_clr_set(vcpu, reg, clr, set) \ - do { \ - u64 hfg = __vcpu_sys_reg(vcpu, reg); \ - struct fgt_masks *m = reg_to_fgt_masks(reg); \ - set |= hfg & m->mask; \ - clr |= ~hfg & m->nmask; \ - } while(0) - -#define reg_to_fgt_group_id(reg) \ - ({ \ - enum fgt_group_id id; \ - switch(reg) { \ - case HFGRTR_EL2: \ - case HFGWTR_EL2: \ - id = HFGRTR_GROUP; \ - break; \ - case HFGITR_EL2: \ - id = HFGITR_GROUP; \ - break; \ - case HDFGRTR_EL2: \ - case HDFGWTR_EL2: \ - id = HDFGRTR_GROUP; \ - break; \ - case HAFGRTR_EL2: \ - id = HAFGRTR_GROUP; \ - break; \ - case HFGRTR2_EL2: \ - case HFGWTR2_EL2: \ - id = HFGRTR2_GROUP; \ - break; \ - case HFGITR2_EL2: \ - id = HFGITR2_GROUP; \ - break; \ - case HDFGRTR2_EL2: \ - case HDFGWTR2_EL2: \ - id = HDFGRTR2_GROUP; \ - break; \ - default: \ - BUILD_BUG_ON(1); \ - } \ - \ - id; \ - }) - -#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \ - do { \ - u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \ - struct fgt_masks *m = reg_to_fgt_masks(reg); \ - set |= hfg & m->mask; \ - clr |= hfg & m->nmask; \ - } while(0) - -#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \ - do { \ - struct fgt_masks *m = reg_to_fgt_masks(reg); \ - u64 c = clr, s = set; \ - u64 val; \ - \ - ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \ - if (is_nested_ctxt(vcpu)) \ - compute_clr_set(vcpu, reg, c, s); \ - \ - compute_undef_clr_set(vcpu, kvm, reg, c, s); \ - \ - val = m->nmask; \ - val |= s; \ - val &= ~c; \ - write_sysreg_s(val, SYS_ ## reg); \ - } while(0) - -#define update_fgt_traps(hctxt, vcpu, kvm, reg) \ - update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0) - static inline bool cpu_has_amu(void) { u64 pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); @@ -320,33 +203,36 @@ static inline bool cpu_has_amu(void) ID_AA64PFR0_EL1_AMU_SHIFT); } +#define __activate_fgt(hctxt, vcpu, reg) \ + do { \ + ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \ + write_sysreg_s(*vcpu_fgt(vcpu, reg), SYS_ ## reg); \ + } while (0) + static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); - struct kvm *kvm = kern_hyp_va(vcpu->kvm); if (!cpus_have_final_cap(ARM64_HAS_FGT)) return; - update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2); - update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0, - cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ? - HFGWTR_EL2_TCR_EL1_MASK : 0); - update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2); - update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2); - update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2); + __activate_fgt(hctxt, vcpu, HFGRTR_EL2); + __activate_fgt(hctxt, vcpu, HFGWTR_EL2); + __activate_fgt(hctxt, vcpu, HFGITR_EL2); + __activate_fgt(hctxt, vcpu, HDFGRTR_EL2); + __activate_fgt(hctxt, vcpu, HDFGWTR_EL2); if (cpu_has_amu()) - update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2); + __activate_fgt(hctxt, vcpu, HAFGRTR_EL2); if (!cpus_have_final_cap(ARM64_HAS_FGT2)) return; - update_fgt_traps(hctxt, vcpu, kvm, HFGRTR2_EL2); - update_fgt_traps(hctxt, vcpu, kvm, HFGWTR2_EL2); - update_fgt_traps(hctxt, vcpu, kvm, HFGITR2_EL2); - update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR2_EL2); - update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR2_EL2); + __activate_fgt(hctxt, vcpu, HFGRTR2_EL2); + __activate_fgt(hctxt, vcpu, HFGWTR2_EL2); + __activate_fgt(hctxt, vcpu, HFGITR2_EL2); + __activate_fgt(hctxt, vcpu, HDFGRTR2_EL2); + __activate_fgt(hctxt, vcpu, HDFGWTR2_EL2); } #define __deactivate_fgt(htcxt, vcpu, reg) \ diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 05774aed09cb..43bde061b65d 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -172,6 +172,7 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu) /* Trust the host for non-protected vcpu features. */ vcpu->arch.hcrx_el2 = host_vcpu->arch.hcrx_el2; + memcpy(vcpu->arch.fgt, host_vcpu->arch.fgt, sizeof(vcpu->arch.fgt)); return 0; } diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 7a045cad6bdf..f04cda40545b 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -1859,13 +1859,16 @@ void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu) { u64 guest_mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2); + if (is_nested_ctxt(vcpu)) + vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE); /* * In yet another example where FEAT_NV2 is fscking broken, accesses * to MDSCR_EL1 are redirected to the VNCR despite having an effect * at EL2. Use a big hammer to apply sanity. + * + * Unless of course we have FEAT_FGT, in which case we can precisely + * trap MDSCR_EL1. */ - if (is_hyp_ctxt(vcpu)) + else if (!cpus_have_final_cap(ARM64_HAS_FGT)) vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA; - else - vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 91053aa832d0..e67eb39ddc11 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -203,7 +203,6 @@ static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg, MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL ); MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL ); MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL ); - MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL ); MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL ); MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL ); case CNTHCTL_EL2: @@ -1595,14 +1594,47 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, return true; } -static bool access_hv_timer(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) +static int arch_timer_set_user(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, + u64 val) { - if (!vcpu_el2_e2h_is_set(vcpu)) - return undef_access(vcpu, p, r); + switch (reg_to_encoding(rd)) { + case SYS_CNTV_CTL_EL0: + case SYS_CNTP_CTL_EL0: + case SYS_CNTHV_CTL_EL2: + case SYS_CNTHP_CTL_EL2: + val &= ~ARCH_TIMER_CTRL_IT_STAT; + break; + case SYS_CNTVCT_EL0: + if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) + timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val); + return 0; + case SYS_CNTPCT_EL0: + if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) + timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val); + return 0; + } - return access_arch_timer(vcpu, p, r); + __vcpu_assign_sys_reg(vcpu, rd->reg, val); + return 0; +} + +static int arch_timer_get_user(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, + u64 *val) +{ + switch (reg_to_encoding(rd)) { + case SYS_CNTVCT_EL0: + *val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu)); + break; + case SYS_CNTPCT_EL0: + *val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu)); + break; + default: + *val = __vcpu_sys_reg(vcpu, rd->reg); + } + + return 0; } static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, @@ -2507,15 +2539,20 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu, "trap of EL2 register redirected to EL1"); } -#define EL2_REG_FILTERED(name, acc, rst, v, filter) { \ +#define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \ SYS_DESC(SYS_##name), \ .access = acc, \ .reset = rst, \ .reg = name, \ + .get_user = gu, \ + .set_user = su, \ .visibility = filter, \ .val = v, \ } +#define EL2_REG_FILTERED(name, acc, rst, v, filter) \ + SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter) + #define EL2_REG(name, acc, rst, v) \ EL2_REG_FILTERED(name, acc, rst, v, el2_visibility) @@ -2526,6 +2563,10 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu, EL2_REG_VNCR_FILT(name, hidden_visibility) #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v) +#define TIMER_REG(name, vis) \ + SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \ + arch_timer_get_user, arch_timer_set_user, vis) + /* * Since reset() callback and field val are not used for idregs, they will be * used for specific purposes for idregs. @@ -2705,18 +2746,17 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu, if (guest_hyp_sve_traps_enabled(vcpu)) { kvm_inject_nested_sve_trap(vcpu); - return true; + return false; } if (!p->is_write) { - p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2); + p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2); return true; } vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1; vq = min(vq, vcpu_sve_max_vq(vcpu)); - vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2); - + __vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1); return true; } @@ -2833,6 +2873,16 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu, return __el2_visibility(vcpu, rd, s1pie_visibility); } +static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + if (vcpu_has_nv(vcpu) && + !vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0)) + return 0; + + return REG_HIDDEN; +} + static bool access_mdcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -3482,17 +3532,19 @@ static const struct sys_reg_desc sys_reg_descs[] = { AMU_AMEVTYPER1_EL0(14), AMU_AMEVTYPER1_EL0(15), - { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer }, - { SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer }, + { SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer, + .get_user = arch_timer_get_user, .set_user = arch_timer_set_user }, + { SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer, + .get_user = arch_timer_get_user, .set_user = arch_timer_set_user }, { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer }, { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, - { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, - { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, + TIMER_REG(CNTP_CTL_EL0, NULL), + TIMER_REG(CNTP_CVAL_EL0, NULL), { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer }, - { SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer }, - { SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer }, + TIMER_REG(CNTV_CTL_EL0, NULL), + TIMER_REG(CNTV_CVAL_EL0, NULL), /* PMEVCNTRn_EL0 */ PMU_PMEVCNTR_EL0(0), @@ -3690,12 +3742,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0), EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0), { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer }, - EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0), - EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0), + TIMER_REG(CNTHP_CTL_EL2, el2_visibility), + TIMER_REG(CNTHP_CVAL_EL2, el2_visibility), - { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer }, - EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0), - EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0), + { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility }, + TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility), + TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility), { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 }, @@ -5233,15 +5285,28 @@ static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) } } +static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg) +{ + switch(reg->id) { + case KVM_REG_ARM_TIMER_CVAL: + return TO_ARM64_SYS_REG(CNTV_CVAL_EL0); + case KVM_REG_ARM_TIMER_CNT: + return TO_ARM64_SYS_REG(CNTVCT_EL0); + default: + return reg->id; + } +} + int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, const struct sys_reg_desc table[], unsigned int num) { u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; const struct sys_reg_desc *r; + u64 id = kvm_one_reg_to_id(reg); u64 val; int ret; - r = id_to_sys_reg_desc(vcpu, reg->id, table, num); + r = id_to_sys_reg_desc(vcpu, id, table, num); if (!r || sysreg_hidden(vcpu, r)) return -ENOENT; @@ -5274,13 +5339,14 @@ int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, { u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; const struct sys_reg_desc *r; + u64 id = kvm_one_reg_to_id(reg); u64 val; int ret; if (get_user(val, uaddr)) return -EFAULT; - r = id_to_sys_reg_desc(vcpu, reg->id, table, num); + r = id_to_sys_reg_desc(vcpu, id, table, num); if (!r || sysreg_hidden(vcpu, r)) return -ENOENT; @@ -5340,10 +5406,23 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg) static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) { + u64 idx; + if (!*uind) return true; - if (put_user(sys_reg_to_index(reg), *uind)) + switch (reg_to_encoding(reg)) { + case SYS_CNTV_CVAL_EL0: + idx = KVM_REG_ARM_TIMER_CVAL; + break; + case SYS_CNTVCT_EL0: + idx = KVM_REG_ARM_TIMER_CNT; + break; + default: + idx = sys_reg_to_index(reg); + } + + if (put_user(idx, *uind)) return false; (*uind)++; diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index 317abc490368..b3f904472fac 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h @@ -257,4 +257,10 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu); (val); \ }) +#define TO_ARM64_SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \ + sys_reg_Op1(SYS_ ## r), \ + sys_reg_CRn(SYS_ ## r), \ + sys_reg_CRm(SYS_ ## r), \ + sys_reg_Op2(SYS_ ## r)) + #endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */ diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index f1c153106c56..6fbb4b099855 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -297,8 +297,11 @@ void vcpu_set_ich_hcr(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; + if (!vgic_is_v3(vcpu->kvm)) + return; + /* Hide GICv3 sysreg if necessary */ - if (!kvm_has_gicv3(vcpu->kvm)) { + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TC); return; diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig index c6108f000288..22d7f8ac58a3 100644 --- a/arch/hexagon/configs/comet_defconfig +++ b/arch/hexagon/configs/comet_defconfig @@ -46,10 +46,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig index 7787a4dd7c3c..f3268fed02fc 100644 --- a/arch/m68k/configs/stmark2_defconfig +++ b/arch/m68k/configs/stmark2_defconfig @@ -72,9 +72,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y # CONFIG_FILE_LOCKING is not set # CONFIG_DNOTIFY is not set # CONFIG_INOTIFY_USER is not set diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig index 176314f3c9aa..fbbdcb394ca2 100644 --- a/arch/microblaze/configs/mmu_defconfig +++ b/arch/microblaze/configs/mmu_defconfig @@ -73,7 +73,7 @@ CONFIG_FB_XILINX=y CONFIG_UIO=y CONFIG_UIO_PDRV_GENIRQ=y CONFIG_UIO_DMEM_GENIRQ=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_TMPFS=y CONFIG_CRAMFS=y diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index 97d2cd997285..349e9e0b4f54 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -144,9 +144,9 @@ CONFIG_EXT2_FS=m CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=m -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=m +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_FS=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig index b0b551efac7c..6ee9ee391fdc 100644 --- a/arch/mips/configs/cobalt_defconfig +++ b/arch/mips/configs/cobalt_defconfig @@ -59,9 +59,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig index 85a4472cb058..52a63dd7aac7 100644 --- a/arch/mips/configs/decstation_64_defconfig +++ b/arch/mips/configs/decstation_64_defconfig @@ -133,9 +133,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_PROC_KCORE=y diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig index a3b2c8da2dde..59fb7ee5eeb0 100644 --- a/arch/mips/configs/decstation_defconfig +++ b/arch/mips/configs/decstation_defconfig @@ -129,9 +129,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_PROC_KCORE=y diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig index a476717b8a6a..8be1cb433e95 100644 --- a/arch/mips/configs/decstation_r4k_defconfig +++ b/arch/mips/configs/decstation_r4k_defconfig @@ -129,9 +129,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_PROC_KCORE=y diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig index cdedbb8a8f53..b6fe3c962464 100644 --- a/arch/mips/configs/fuloong2e_defconfig +++ b/arch/mips/configs/fuloong2e_defconfig @@ -173,7 +173,7 @@ CONFIG_USB_ISIGHTFW=m CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y CONFIG_AUTOFS_FS=y diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig index 2decf8b98d31..e123848f94ab 100644 --- a/arch/mips/configs/ip22_defconfig +++ b/arch/mips/configs/ip22_defconfig @@ -232,9 +232,9 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_INTF_DEV_UIE_EMUL=y CONFIG_RTC_DRV_DS1286=y CONFIG_EXT2_FS=m -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y CONFIG_QUOTA=y diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 5d079941fd20..1c10242b148b 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -272,9 +272,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig index 6db21e498faa..755cbf20f5a5 100644 --- a/arch/mips/configs/ip28_defconfig +++ b/arch/mips/configs/ip28_defconfig @@ -49,9 +49,9 @@ CONFIG_WATCHDOG=y CONFIG_INDYDOG=y # CONFIG_VGA_CONSOLE is not set CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_PROC_KCORE=y # CONFIG_PROC_PAGE_MONITOR is not set diff --git a/arch/mips/configs/ip30_defconfig b/arch/mips/configs/ip30_defconfig index a4524e785469..718f3060d9fa 100644 --- a/arch/mips/configs/ip30_defconfig +++ b/arch/mips/configs/ip30_defconfig @@ -143,9 +143,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig index d8ac11427f69..7568838eb08b 100644 --- a/arch/mips/configs/ip32_defconfig +++ b/arch/mips/configs/ip32_defconfig @@ -89,9 +89,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=m diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig index 65adb538030d..a790c2610fd3 100644 --- a/arch/mips/configs/jazz_defconfig +++ b/arch/mips/configs/jazz_defconfig @@ -69,7 +69,7 @@ CONFIG_FB_G364=y CONFIG_FRAMEBUFFER_CONSOLE=y # CONFIG_HWMON is not set CONFIG_EXT2_FS=m -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y CONFIG_AUTOFS_FS=m diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 5038a27d035f..8d3f20ed19b5 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig @@ -226,9 +226,9 @@ CONFIG_MMC=m CONFIG_LEDS_CLASS=y CONFIG_STAGING=y CONFIG_EXT2_FS=m -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_XFS_FS=m diff --git a/arch/mips/configs/loongson2k_defconfig b/arch/mips/configs/loongson2k_defconfig index 0cc665d3ea34..aec1fd1902eb 100644 --- a/arch/mips/configs/loongson2k_defconfig +++ b/arch/mips/configs/loongson2k_defconfig @@ -298,9 +298,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig index 240efff37d98..575aaf242361 100644 --- a/arch/mips/configs/loongson3_defconfig +++ b/arch/mips/configs/loongson3_defconfig @@ -348,9 +348,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=y CONFIG_XFS_POSIX_ACL=y CONFIG_QUOTA=y diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig index 9fcbac829920..81704ec67f09 100644 --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@ -313,7 +313,7 @@ CONFIG_RTC_DRV_CMOS=y CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig index 19102386a81c..82a97f58bce1 100644 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@ -319,7 +319,7 @@ CONFIG_RTC_DRV_CMOS=y CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig index 1b98f6945c2d..accb471a1d93 100644 --- a/arch/mips/configs/malta_qemu_32r6_defconfig +++ b/arch/mips/configs/malta_qemu_32r6_defconfig @@ -148,7 +148,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig index 7b8905cb3400..6bda67c5f68f 100644 --- a/arch/mips/configs/maltaaprp_defconfig +++ b/arch/mips/configs/maltaaprp_defconfig @@ -149,7 +149,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig index 8249f6a51895..e4082537f80f 100644 --- a/arch/mips/configs/maltasmvp_defconfig +++ b/arch/mips/configs/maltasmvp_defconfig @@ -148,9 +148,9 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig index 21cb37668763..58f5af45fa98 100644 --- a/arch/mips/configs/maltasmvp_eva_defconfig +++ b/arch/mips/configs/maltasmvp_eva_defconfig @@ -152,7 +152,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig index 3df9cd669683..9bfef7de0d1c 100644 --- a/arch/mips/configs/maltaup_defconfig +++ b/arch/mips/configs/maltaup_defconfig @@ -148,7 +148,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_XFS_FS=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig index 1dd07c9d1812..0f9ef20744f9 100644 --- a/arch/mips/configs/maltaup_xpa_defconfig +++ b/arch/mips/configs/maltaup_xpa_defconfig @@ -319,7 +319,7 @@ CONFIG_RTC_DRV_CMOS=y CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 2707ab134639..c58d1a61d528 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -595,9 +595,9 @@ CONFIG_EXT2_FS=m CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=m -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=m +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig index 39a2419e1f3e..b507dc4dddd4 100644 --- a/arch/mips/configs/rm200_defconfig +++ b/arch/mips/configs/rm200_defconfig @@ -307,7 +307,7 @@ CONFIG_USB_SISUSBVGA=m CONFIG_USB_LD=m CONFIG_USB_TEST=m CONFIG_EXT2_FS=m -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_XFS_FS=m CONFIG_XFS_QUOTA=y CONFIG_AUTOFS_FS=m diff --git a/arch/openrisc/configs/or1klitex_defconfig b/arch/openrisc/configs/or1klitex_defconfig index 3e849d25838a..fb1eb9a68bd6 100644 --- a/arch/openrisc/configs/or1klitex_defconfig +++ b/arch/openrisc/configs/or1klitex_defconfig @@ -38,7 +38,7 @@ CONFIG_MMC_LITEX=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_LITEX_SOC_CONTROLLER=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_EXFAT_FS=y diff --git a/arch/openrisc/configs/virt_defconfig b/arch/openrisc/configs/virt_defconfig index a93a3e1e4f87..0b9979b35ca8 100644 --- a/arch/openrisc/configs/virt_defconfig +++ b/arch/openrisc/configs/virt_defconfig @@ -94,8 +94,8 @@ CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_INPUT=y CONFIG_VIRTIO_MMIO=y CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y # CONFIG_DNOTIFY is not set CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig index 94928d114d4c..52031bde9f17 100644 --- a/arch/parisc/configs/generic-32bit_defconfig +++ b/arch/parisc/configs/generic-32bit_defconfig @@ -232,8 +232,8 @@ CONFIG_AUXDISPLAY=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V2=y diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig index d8cd7f858b2a..1aec04c09d0b 100644 --- a/arch/parisc/configs/generic-64bit_defconfig +++ b/arch/parisc/configs/generic-64bit_defconfig @@ -251,8 +251,8 @@ CONFIG_STAGING=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_BTRFS_FS=m CONFIG_QUOTA=y diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 5782e743fd27..4ebc333dd786 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1747,6 +1747,9 @@ void __init fadump_setup_param_area(void) { phys_addr_t range_start, range_end; + if (!fw_dump.fadump_enabled) + return; + if (!fw_dump.param_area_supported || fw_dump.dump_active) return; diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 1302b5ac5672..89a1b8c21ab4 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -916,8 +916,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, * it fires once. */ if (single_escalation) { - struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_get_chip_data(xc->esc_virq[prio]); xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); vcpu->arch.xive_esc_raddr = xd->eoi_page; @@ -1612,7 +1611,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, /* Grab info about irq */ state->pt_number = hw_irq; - state->pt_data = irq_data_get_irq_handler_data(host_data); + state->pt_data = irq_data_get_irq_chip_data(host_data); /* * Configure the IRQ to match the existing configuration of @@ -1787,8 +1786,7 @@ void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) */ void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, int irq) { - struct irq_data *d = irq_get_irq_data(irq); - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_get_chip_data(irq); /* * This slightly odd sequence gives the right result @@ -2827,9 +2825,7 @@ int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) i0, i1); } if (xc->esc_virq[i]) { - struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); - struct xive_irq_data *xd = - irq_data_get_irq_handler_data(d); + struct xive_irq_data *xd = irq_get_chip_data(xc->esc_virq[i]); u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); seq_printf(m, " ESC %d %c%c EOI @%llx", diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c index b65256a63e87..9c9650319f3b 100644 --- a/arch/powerpc/platforms/powernv/vas.c +++ b/arch/powerpc/platforms/powernv/vas.c @@ -121,7 +121,7 @@ static int init_vas_instance(struct platform_device *pdev) return -EINVAL; } - xd = irq_get_handler_data(vinst->virq); + xd = irq_get_chip_data(vinst->virq); if (!xd) { pr_err("Inst%d: Invalid virq %d\n", vinst->vas_id, vinst->virq); diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 825f9432e03d..a82aaa786e9e 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c @@ -443,8 +443,7 @@ static int pseries_msi_ops_prepare(struct irq_domain *domain, struct device *dev */ static void pseries_msi_ops_teardown(struct irq_domain *domain, msi_alloc_info_t *arg) { - struct msi_desc *desc = arg->desc; - struct pci_dev *pdev = msi_desc_to_pci_dev(desc); + struct pci_dev *pdev = to_pci_dev(domain->dev); rtas_disable_msi(pdev); } diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 625361a15424..8d0123b0ae84 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -1580,7 +1580,7 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) cpu, irq); #endif raw_spin_lock(&desc->lock); - xd = irq_desc_get_handler_data(desc); + xd = irq_desc_get_chip_data(desc); /* * Clear saved_p to indicate that it's no longer pending diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 0c6038dc5dfd..22cda9c452d2 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -29,7 +29,7 @@ config RISCV select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX - select ARCH_HAS_ELF_CORE_EFLAGS + select ARCH_HAS_ELF_CORE_EFLAGS if BINFMT_ELF && ELF_CORE select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h index 7559d728c5ff..78b18e2fd771 100644 --- a/arch/riscv/include/asm/kgdb.h +++ b/arch/riscv/include/asm/kgdb.h @@ -3,14 +3,18 @@ #ifndef __ASM_KGDB_H_ #define __ASM_KGDB_H_ +#include + #ifdef __KERNEL__ #define GDB_SIZEOF_REG sizeof(unsigned long) -#define DBG_MAX_REG_NUM (36) -#define NUMREGBYTES ((DBG_MAX_REG_NUM) * GDB_SIZEOF_REG) +#define DBG_MAX_REG_NUM 36 +#define NUMREGBYTES (DBG_MAX_REG_NUM * GDB_SIZEOF_REG) #define CACHE_FLUSH_IS_SAFE 1 #define BUFMAX 2048 +static_assert(BUFMAX > NUMREGBYTES, + "As per KGDB documentation, BUFMAX must be larger than NUMREGBYTES"); #ifdef CONFIG_RISCV_ISA_C #define BREAK_INSTR_SIZE 2 #else @@ -97,6 +101,7 @@ extern unsigned long kgdb_compiled_break; #define DBG_REG_STATUS_OFF 33 #define DBG_REG_BADADDR_OFF 34 #define DBG_REG_CAUSE_OFF 35 +/* NOTE: increase DBG_MAX_REG_NUM if you add more values here. */ extern const char riscv_gdb_stub_feature[64]; diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c index a1e38ecfc8be..3f50d3dd76c6 100644 --- a/arch/riscv/kernel/cpu-hotplug.c +++ b/arch/riscv/kernel/cpu-hotplug.c @@ -54,6 +54,7 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) pr_notice("CPU%u: off\n", cpu); + clear_tasks_mm_cpumask(cpu); /* Verify from the firmware if the cpu is really stopped*/ if (cpu_ops->cpu_is_stopped) ret = cpu_ops->cpu_is_stopped(cpu); diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index d3d92a4becc7..9b9dec6893b8 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -455,7 +455,7 @@ SYM_DATA_START_LOCAL(excp_vect_table) RISCV_PTR do_trap_ecall_s RISCV_PTR do_trap_unknown RISCV_PTR do_trap_ecall_m - /* instruciton page fault */ + /* instruction page fault */ ALT_PAGE_FAULT(RISCV_PTR do_page_fault) RISCV_PTR do_page_fault /* load page fault */ RISCV_PTR do_trap_unknown diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index c0738d6c6498..8723390c7cad 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -49,10 +49,15 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) post_kprobe_handler(p, kcb, regs); } -static bool __kprobes arch_check_kprobe(struct kprobe *p) +static bool __kprobes arch_check_kprobe(unsigned long addr) { - unsigned long tmp = (unsigned long)p->addr - p->offset; - unsigned long addr = (unsigned long)p->addr; + unsigned long tmp, offset; + + /* start iterating at the closest preceding symbol */ + if (!kallsyms_lookup_size_offset(addr, NULL, &offset)) + return false; + + tmp = addr - offset; while (tmp <= addr) { if (tmp == addr) @@ -71,7 +76,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) if ((unsigned long)insn & 0x1) return -EILSEQ; - if (!arch_check_kprobe(p)) + if (!arch_check_kprobe((unsigned long)p->addr)) return -EILSEQ; /* copy instruction */ diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 14235e58c539..b5bc5fc65cea 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -331,11 +331,14 @@ void __init setup_arch(char **cmdline_p) /* Parse the ACPI tables for possible boot-time configuration */ acpi_boot_table_init(); + if (acpi_disabled) { #if IS_ENABLED(CONFIG_BUILTIN_DTB) - unflatten_and_copy_device_tree(); + unflatten_and_copy_device_tree(); #else - unflatten_device_tree(); + unflatten_device_tree(); #endif + } + misc_mem_init(); init_resources(); diff --git a/arch/riscv/kernel/tests/kprobes/test-kprobes.h b/arch/riscv/kernel/tests/kprobes/test-kprobes.h index 3886ab491ecb..537f44aa9d3f 100644 --- a/arch/riscv/kernel/tests/kprobes/test-kprobes.h +++ b/arch/riscv/kernel/tests/kprobes/test-kprobes.h @@ -11,7 +11,7 @@ #define KPROBE_TEST_MAGIC_LOWER 0x0000babe #define KPROBE_TEST_MAGIC_UPPER 0xcafe0000 -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ /* array of addresses to install kprobes */ extern void *test_kprobes_addresses[]; @@ -19,6 +19,6 @@ extern void *test_kprobes_addresses[]; /* array of functions that return KPROBE_TEST_MAGIC */ extern long (*test_kprobes_functions[])(void); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* TEST_KPROBES_H */ diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig index b6f36c938f1d..48b2e97114f9 100644 --- a/arch/sh/configs/ap325rxa_defconfig +++ b/arch/sh/configs/ap325rxa_defconfig @@ -81,10 +81,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig index 9c2644443c4d..85db9ce42d1a 100644 --- a/arch/sh/configs/apsh4a3a_defconfig +++ b/arch/sh/configs/apsh4a3a_defconfig @@ -60,8 +60,7 @@ CONFIG_FONT_8x16=y CONFIG_LOGO=y # CONFIG_USB_SUPPORT is not set CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig index 137573610ec4..e8b3b720578b 100644 --- a/arch/sh/configs/apsh4ad0a_defconfig +++ b/arch/sh/configs/apsh4ad0a_defconfig @@ -88,8 +88,7 @@ CONFIG_USB_MON=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig index e76694aace25..fcca7cc5a75a 100644 --- a/arch/sh/configs/ecovec24_defconfig +++ b/arch/sh/configs/ecovec24_defconfig @@ -109,10 +109,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y diff --git a/arch/sh/configs/edosk7760_defconfig b/arch/sh/configs/edosk7760_defconfig index f427a95bcd21..98f4611ba553 100644 --- a/arch/sh/configs/edosk7760_defconfig +++ b/arch/sh/configs/edosk7760_defconfig @@ -87,8 +87,7 @@ CONFIG_SND_SOC=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_XIP=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_NFS_FS=y diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig index da176f100e00..e5d102cbff89 100644 --- a/arch/sh/configs/espt_defconfig +++ b/arch/sh/configs/espt_defconfig @@ -59,8 +59,7 @@ CONFIG_USB_MON=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_AUTOFS_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y diff --git a/arch/sh/configs/landisk_defconfig b/arch/sh/configs/landisk_defconfig index 924bb3233b0b..22177aa8f961 100644 --- a/arch/sh/configs/landisk_defconfig +++ b/arch/sh/configs/landisk_defconfig @@ -93,8 +93,7 @@ CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m CONFIG_USB_SISUSBVGA=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_ISO9660_FS=m CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/sh/configs/lboxre2_defconfig b/arch/sh/configs/lboxre2_defconfig index 0307bb2be79f..ff992301622b 100644 --- a/arch/sh/configs/lboxre2_defconfig +++ b/arch/sh/configs/lboxre2_defconfig @@ -49,8 +49,7 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_RTC_CLASS=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y diff --git a/arch/sh/configs/magicpanelr2_defconfig b/arch/sh/configs/magicpanelr2_defconfig index 93b9aa32dc7c..a29fb912a242 100644 --- a/arch/sh/configs/magicpanelr2_defconfig +++ b/arch/sh/configs/magicpanelr2_defconfig @@ -64,9 +64,8 @@ CONFIG_RTC_CLASS=y # CONFIG_RTC_HCTOSYS is not set CONFIG_RTC_DRV_SH=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set # CONFIG_DNOTIFY is not set CONFIG_PROC_KCORE=y CONFIG_TMPFS=y diff --git a/arch/sh/configs/r7780mp_defconfig b/arch/sh/configs/r7780mp_defconfig index f28b8c4181c2..58b792dacfec 100644 --- a/arch/sh/configs/r7780mp_defconfig +++ b/arch/sh/configs/r7780mp_defconfig @@ -74,8 +74,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_RS5C372=y CONFIG_RTC_DRV_SH=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/sh/configs/r7785rp_defconfig b/arch/sh/configs/r7785rp_defconfig index 3a4239f20ff1..7edf18451158 100644 --- a/arch/sh/configs/r7785rp_defconfig +++ b/arch/sh/configs/r7785rp_defconfig @@ -69,8 +69,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_RS5C372=y CONFIG_RTC_DRV_SH=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_FUSE_FS=m CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig index e4ef259425c4..28a81efefb02 100644 --- a/arch/sh/configs/rsk7264_defconfig +++ b/arch/sh/configs/rsk7264_defconfig @@ -59,8 +59,7 @@ CONFIG_USB_R8A66597_HCD=y CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE_DEBUG=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y diff --git a/arch/sh/configs/rsk7269_defconfig b/arch/sh/configs/rsk7269_defconfig index e0d1560b2bfd..f8bfa46643ff 100644 --- a/arch/sh/configs/rsk7269_defconfig +++ b/arch/sh/configs/rsk7269_defconfig @@ -43,8 +43,7 @@ CONFIG_USB_R8A66597_HCD=y CONFIG_USB_STORAGE=y CONFIG_USB_STORAGE_DEBUG=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y diff --git a/arch/sh/configs/sdk7780_defconfig b/arch/sh/configs/sdk7780_defconfig index 9870d16d9711..311817161afb 100644 --- a/arch/sh/configs/sdk7780_defconfig +++ b/arch/sh/configs/sdk7780_defconfig @@ -102,9 +102,8 @@ CONFIG_LEDS_CLASS=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_AUTOFS_FS=y CONFIG_ISO9660_FS=y CONFIG_MSDOS_FS=y diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig index 07894f13441e..2433aa5f44a8 100644 --- a/arch/sh/configs/sdk7786_defconfig +++ b/arch/sh/configs/sdk7786_defconfig @@ -161,8 +161,7 @@ CONFIG_STAGING=y # CONFIG_STAGING_EXCLUDE_BUILD is not set CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y CONFIG_XFS_FS=y CONFIG_BTRFS_FS=y diff --git a/arch/sh/configs/se7343_defconfig b/arch/sh/configs/se7343_defconfig index 75db12fb9ad1..b0baa5771c26 100644 --- a/arch/sh/configs/se7343_defconfig +++ b/arch/sh/configs/se7343_defconfig @@ -84,8 +84,7 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_ISP116X_HCD=y CONFIG_UIO=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y diff --git a/arch/sh/configs/se7712_defconfig b/arch/sh/configs/se7712_defconfig index 8770a72e6a63..1078c286a610 100644 --- a/arch/sh/configs/se7712_defconfig +++ b/arch/sh/configs/se7712_defconfig @@ -83,8 +83,7 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y diff --git a/arch/sh/configs/se7721_defconfig b/arch/sh/configs/se7721_defconfig index b15c6406a0e8..edb9e0d2dce5 100644 --- a/arch/sh/configs/se7721_defconfig +++ b/arch/sh/configs/se7721_defconfig @@ -107,8 +107,7 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y # CONFIG_DNOTIFY is not set CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/sh/configs/se7722_defconfig b/arch/sh/configs/se7722_defconfig index 5327a2f70980..33daa0a17a32 100644 --- a/arch/sh/configs/se7722_defconfig +++ b/arch/sh/configs/se7722_defconfig @@ -44,8 +44,7 @@ CONFIG_HW_RANDOM=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SH=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig index 9501e69eb886..d572655f842d 100644 --- a/arch/sh/configs/se7724_defconfig +++ b/arch/sh/configs/se7724_defconfig @@ -110,10 +110,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig index 4d75c92cac10..3d194d81c92b 100644 --- a/arch/sh/configs/sh03_defconfig +++ b/arch/sh/configs/sh03_defconfig @@ -57,9 +57,8 @@ CONFIG_WATCHDOG=y CONFIG_SH_WDT=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_AUTOFS_FS=y CONFIG_ISO9660_FS=m CONFIG_JOLIET=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index cc6292b3235a..889daa5d2faa 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -95,7 +95,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_INTF_DEV_UIE_EMUL=y CONFIG_DMADEVICES=y CONFIG_TIMB_DMA=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig index 48a0f9beb116..25e9d22779b3 100644 --- a/arch/sh/configs/sh7757lcr_defconfig +++ b/arch/sh/configs/sh7757lcr_defconfig @@ -64,7 +64,7 @@ CONFIG_MMC=y CONFIG_MMC_SDHI=y CONFIG_MMC_SH_MMCIF=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_ISO9660_FS=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig index b77b3313157e..e7b72ff377a8 100644 --- a/arch/sh/configs/sh7763rdp_defconfig +++ b/arch/sh/configs/sh7763rdp_defconfig @@ -61,8 +61,7 @@ CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y CONFIG_MMC=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_AUTOFS_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig index 44f9b2317f09..17d2471d8e51 100644 --- a/arch/sh/configs/sh7785lcr_32bit_defconfig +++ b/arch/sh/configs/sh7785lcr_32bit_defconfig @@ -113,8 +113,7 @@ CONFIG_RTC_DRV_RS5C372=y CONFIG_DMADEVICES=y CONFIG_UIO=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig index aec74b0e7003..34c8fe755add 100644 --- a/arch/sh/configs/sh7785lcr_defconfig +++ b/arch/sh/configs/sh7785lcr_defconfig @@ -90,8 +90,7 @@ CONFIG_USB_TEST=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_RS5C372=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_NTFS_FS=y diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig index 9a0df5ea3866..52e7a42d66c7 100644 --- a/arch/sh/configs/shx3_defconfig +++ b/arch/sh/configs/shx3_defconfig @@ -84,8 +84,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SH=y CONFIG_UIO=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig index 8ef72b8dbcd3..2c474645ec36 100644 --- a/arch/sh/configs/titan_defconfig +++ b/arch/sh/configs/titan_defconfig @@ -215,9 +215,8 @@ CONFIG_USB_SERIAL_PL2303=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SH=m CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set +CONFIG_EXT4_FS=y +# CONFIG_EXT4_FS_XATTR is not set CONFIG_XFS_FS=m CONFIG_FUSE_FS=m CONFIG_ISO9660_FS=m diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig index 103b81ec1ffb..b0c2ba478353 100644 --- a/arch/sh/configs/ul2_defconfig +++ b/arch/sh/configs/ul2_defconfig @@ -66,8 +66,7 @@ CONFIG_USB_R8A66597_HCD=y CONFIG_USB_STORAGE=y CONFIG_MMC=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig index 00ef62133b04..e6d807f52253 100644 --- a/arch/sh/configs/urquell_defconfig +++ b/arch/sh/configs/urquell_defconfig @@ -114,8 +114,7 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_SH=y CONFIG_RTC_DRV_GENERIC=y CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y CONFIG_BTRFS_FS=y CONFIG_MSDOS_FS=y diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 7a7c4dec2925..127940aafc39 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -187,10 +187,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5398db4dedb4..ccaa51ce63f6 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1355,11 +1355,23 @@ static __init int print_s5_reset_status_mmio(void) return 0; value = ioread32(addr); - iounmap(addr); /* Value with "all bits set" is an error response and should be ignored. */ - if (value == U32_MAX) + if (value == U32_MAX) { + iounmap(addr); return 0; + } + + /* + * Clear all reason bits so they won't be retained if the next reset + * does not update the register. Besides, some bits are never cleared by + * hardware so it's software's responsibility to clear them. + * + * Writing the value back effectively clears all reason bits as they are + * write-1-to-clear. + */ + iowrite32(value, addr); + iounmap(addr); for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) { if (!(value & BIT(i))) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index c8945610d455..2cd25a0d4637 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -242,7 +242,9 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d, u32 unused, u32 rmid, enum resctrl_event_id eventid, u64 *val, void *ignored) { + struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d); int cpu = cpumask_any(&d->hdr.cpu_mask); + struct arch_mbm_state *am; u64 msr_val; u32 prmid; int ret; @@ -251,12 +253,16 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d, prmid = logical_rmid_to_physical_rmid(cpu, rmid); ret = __rmid_read_phys(prmid, eventid, &msr_val); - if (ret) - return ret; - *val = get_corrected_val(r, d, rmid, eventid, msr_val); + if (!ret) { + *val = get_corrected_val(r, d, rmid, eventid, msr_val); + } else if (ret == -EINVAL) { + am = get_arch_mbm_state(hw_dom, rmid, eventid); + if (am) + am->prev_msr = 0; + } - return 0; + return ret; } static int __cntr_id_read(u32 cntr_id, u64 *val) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 40ac4cb44ed2..487ad19a236e 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -108,16 +108,18 @@ void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL; int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS; - perf_get_x86_pmu_capability(&kvm_host_pmu); - /* * Hybrid PMUs don't play nice with virtualization without careful * configuration by userspace, and KVM's APIs for reporting supported * vPMU features do not account for hybrid PMUs. Disable vPMU support * for hybrid PMUs until KVM gains a way to let userspace opt-in. */ - if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) + if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { enable_pmu = false; + memset(&kvm_host_pmu, 0, sizeof(kvm_host_pmu)); + } else { + perf_get_x86_pmu_capability(&kvm_host_pmu); + } if (enable_pmu) { /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 42ecd093bb4c..b4b5d2d09634 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13941,10 +13941,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) #ifdef CONFIG_KVM_GUEST_MEMFD /* - * KVM doesn't yet support mmap() on guest_memfd for VMs with private memory - * (the private vs. shared tracking needs to be moved into guest_memfd). + * KVM doesn't yet support initializing guest_memfd memory as shared for VMs + * with private memory (the private vs. shared tracking needs to be moved into + * guest_memfd). */ -bool kvm_arch_supports_gmem_mmap(struct kvm *kvm) +bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm) { return !kvm_arch_has_private_mem(kvm); } diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index d2d54b8c4dbb..970981893c9b 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -446,7 +446,7 @@ static void cpa_flush(struct cpa_data *cpa, int cache) } start = fix_addr(__cpa_addr(cpa, 0)); - end = fix_addr(__cpa_addr(cpa, cpa->numpages)); + end = start + cpa->numpages * PAGE_SIZE; if (cpa->force_flush_all) end = TLB_FLUSH_ALL; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 39f80111e6f1..5d221709353e 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -911,11 +911,31 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, * CR3 and cpu_tlbstate.loaded_mm are not all in sync. */ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); - barrier(); - /* Start receiving IPIs and then read tlb_gen (and LAM below) */ + /* + * Make sure this CPU is set in mm_cpumask() such that we'll + * receive invalidation IPIs. + * + * Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic + * operation, or explicitly provide one. Such that: + * + * switch_mm_irqs_off() flush_tlb_mm_range() + * smp_store_release(loaded_mm, SWITCHING); atomic64_inc_return(tlb_gen) + * smp_mb(); // here // smp_mb() implied + * atomic64_read(tlb_gen); this_cpu_read(loaded_mm); + * + * we properly order against flush_tlb_mm_range(), where the + * loaded_mm load can happen in mative_flush_tlb_multi() -> + * should_flush_tlb(). + * + * This way switch_mm() must see the new tlb_gen or + * flush_tlb_mm_range() must see the new loaded_mm, or both. + */ if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next))) cpumask_set_cpu(cpu, mm_cpumask(next)); + else + smp_mb(); + next_tlb_gen = atomic64_read(&next->context.tlb_gen); ns = choose_new_asid(next, next_tlb_gen); diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig index f2af1a32c9c7..dc942bbac69f 100644 --- a/arch/xtensa/configs/audio_kc705_defconfig +++ b/arch/xtensa/configs/audio_kc705_defconfig @@ -103,7 +103,7 @@ CONFIG_SND_SIMPLE_CARD=y # CONFIG_USB_SUPPORT is not set CONFIG_COMMON_CLK_CDCE706=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_VFAT_FS=y diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig index 88ed5284e21c..81a057f25f21 100644 --- a/arch/xtensa/configs/cadence_csp_defconfig +++ b/arch/xtensa/configs/cadence_csp_defconfig @@ -80,7 +80,7 @@ CONFIG_SOFT_WATCHDOG=y # CONFIG_VGA_CONSOLE is not set # CONFIG_USB_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig index 4427907becca..3ee7e1c56556 100644 --- a/arch/xtensa/configs/generic_kc705_defconfig +++ b/arch/xtensa/configs/generic_kc705_defconfig @@ -90,7 +90,7 @@ CONFIG_SOFT_WATCHDOG=y # CONFIG_VGA_CONSOLE is not set # CONFIG_USB_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_VFAT_FS=y diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig index 5828228522ba..c6e96f0aa700 100644 --- a/arch/xtensa/configs/nommu_kc705_defconfig +++ b/arch/xtensa/configs/nommu_kc705_defconfig @@ -91,7 +91,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=y # CONFIG_VGA_CONSOLE is not set # CONFIG_USB_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_VFAT_FS=y diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig index 326966ca7831..373d42b9e510 100644 --- a/arch/xtensa/configs/smp_lx200_defconfig +++ b/arch/xtensa/configs/smp_lx200_defconfig @@ -94,7 +94,7 @@ CONFIG_SOFT_WATCHDOG=y # CONFIG_VGA_CONSOLE is not set # CONFIG_USB_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_VFAT_FS=y diff --git a/arch/xtensa/configs/virt_defconfig b/arch/xtensa/configs/virt_defconfig index e37048985b47..72628d31e87a 100644 --- a/arch/xtensa/configs/virt_defconfig +++ b/arch/xtensa/configs/virt_defconfig @@ -76,7 +76,7 @@ CONFIG_LOGO=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_INPUT=y # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig index ee47438f9b51..5d6013ea70fc 100644 --- a/arch/xtensa/configs/xip_kc705_defconfig +++ b/arch/xtensa/configs/xip_kc705_defconfig @@ -82,7 +82,7 @@ CONFIG_SOFT_WATCHDOG=y # CONFIG_VGA_CONSOLE is not set # CONFIG_USB_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set -CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_VFAT_FS=y CONFIG_PROC_KCORE=y diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f93de34fe87d..3cffb68ba5d8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -812,8 +812,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx) } /* * Similar to blkg_conf_open_bdev, but additionally freezes the queue, - * acquires q->elevator_lock, and ensures the correct locking order - * between q->elevator_lock and q->rq_qos_mutex. + * ensures the correct locking order between freeze queue and q->rq_qos_mutex. * * This function returns negative error on failure. On success it returns * memflags which must be saved and later passed to blkg_conf_exit_frozen @@ -834,13 +833,11 @@ unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx) * At this point, we haven’t started protecting anything related to QoS, * so we release q->rq_qos_mutex here, which was first acquired in blkg_ * conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing - * the queue and acquiring q->elevator_lock to maintain the correct - * locking order. + * the queue to maintain the correct locking order. */ mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex); memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue); - mutex_lock(&ctx->bdev->bd_queue->elevator_lock); mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex); return memflags; @@ -995,9 +992,8 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx) EXPORT_SYMBOL_GPL(blkg_conf_exit); /* - * Similar to blkg_conf_exit, but also unfreezes the queue and releases - * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen - * is used to open the bdev. + * Similar to blkg_conf_exit, but also unfreezes the queue. Should be used + * when blkg_conf_open_bdev_frozen is used to open the bdev. */ void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags) { @@ -1005,7 +1001,6 @@ void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags) struct request_queue *q = ctx->bdev->bd_queue; blkg_conf_exit(ctx); - mutex_unlock(&q->elevator_lock); blk_mq_unfreeze_queue(q, memflags); } } diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index d06bb137a743..e0bed16485c3 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -557,7 +557,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, if (blk_mq_is_shared_tags(flags)) { /* Shared tags are stored at index 0 in @et->tags. */ q->sched_shared_tags = et->tags[0]; - blk_mq_tag_update_sched_shared_tags(q); + blk_mq_tag_update_sched_shared_tags(q, et->nr_requests); } queue_for_each_hw_ctx(q, hctx, i) { diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c7a4d4b9cc87..5b664dbdf655 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -622,10 +622,11 @@ void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); } -void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q, + unsigned int nr) { sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, - q->nr_requests - q->tag_set->reserved_tags); + nr - q->tag_set->reserved_tags); } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index 09f579414161..d626d32f6e57 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4941,7 +4941,7 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q, * tags can't grow, see blk_mq_alloc_sched_tags(). */ if (q->elevator) - blk_mq_tag_update_sched_shared_tags(q); + blk_mq_tag_update_sched_shared_tags(q, nr); else blk_mq_tag_resize_shared_tags(set, nr); } else if (!q->elevator) { diff --git a/block/blk-mq.h b/block/blk-mq.h index af42dc018808..c4fccdeb5441 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -186,7 +186,8 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size); -void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q, + unsigned int nr); void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig index bb01cebc42bf..bdf48ccafcf2 100644 --- a/drivers/accel/Kconfig +++ b/drivers/accel/Kconfig @@ -25,6 +25,7 @@ menuconfig DRM_ACCEL and debugfs). source "drivers/accel/amdxdna/Kconfig" +source "drivers/accel/ethosu/Kconfig" source "drivers/accel/habanalabs/Kconfig" source "drivers/accel/ivpu/Kconfig" source "drivers/accel/qaic/Kconfig" diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile index ffc3fa588666..1d3a7251b950 100644 --- a/drivers/accel/Makefile +++ b/drivers/accel/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/ +obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) += ethosu/ obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/ obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/ obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/ diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile index 6797dac65efa..6344aaf523fa 100644 --- a/drivers/accel/amdxdna/Makefile +++ b/drivers/accel/amdxdna/Makefile @@ -14,6 +14,7 @@ amdxdna-y := \ amdxdna_mailbox.o \ amdxdna_mailbox_helper.o \ amdxdna_pci_drv.o \ + amdxdna_pm.o \ amdxdna_sysfs.o \ amdxdna_ubuf.o \ npu1_regs.o \ diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO index ad8ac6e315b6..0e4bbebeaedf 100644 --- a/drivers/accel/amdxdna/TODO +++ b/drivers/accel/amdxdna/TODO @@ -1,2 +1 @@ - Add debugfs support -- Add debug BO support diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c index e9f9b1fa5dc1..b78c47ed0d34 100644 --- a/drivers/accel/amdxdna/aie2_ctx.c +++ b/drivers/accel/amdxdna/aie2_ctx.c @@ -21,6 +21,7 @@ #include "amdxdna_gem.h" #include "amdxdna_mailbox.h" #include "amdxdna_pci_drv.h" +#include "amdxdna_pm.h" static bool force_cmdlist; module_param(force_cmdlist, bool, 0600); @@ -88,7 +89,7 @@ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hw goto out; } - ret = aie2_config_cu(hwctx); + ret = aie2_config_cu(hwctx, NULL); if (ret) { XDNA_ERR(xdna, "Config cu failed, ret %d", ret); goto out; @@ -167,14 +168,11 @@ static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg) int aie2_hwctx_resume(struct amdxdna_client *client) { - struct amdxdna_dev *xdna = client->xdna; - /* * The resume path cannot guarantee that mailbox channel can be * regenerated. If this happen, when submit message to this * mailbox channel, error will return. */ - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb); } @@ -184,6 +182,8 @@ aie2_sched_notify(struct amdxdna_sched_job *job) struct dma_fence *fence = job->fence; trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq); + + amdxdna_pm_suspend_put(job->hwctx->client->xdna); job->hwctx->priv->completed++; dma_fence_signal(fence); @@ -204,10 +204,13 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size) cmd_abo = job->cmd_bo; - if (unlikely(!data)) + if (unlikely(job->job_timeout)) { + amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT); + ret = -EINVAL; goto out; + } - if (unlikely(size != sizeof(u32))) { + if (unlikely(!data) || unlikely(size != sizeof(u32))) { amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT); ret = -EINVAL; goto out; @@ -226,11 +229,10 @@ out: } static int -aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size) +aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size) { struct amdxdna_sched_job *job = handle; int ret = 0; - u32 status; if (unlikely(!data)) goto out; @@ -240,8 +242,7 @@ aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size) goto out; } - status = readl(data); - XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status); + job->drv_cmd->result = readl(data); out: aie2_sched_notify(job); @@ -260,6 +261,13 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size) int ret = 0; cmd_abo = job->cmd_bo; + + if (unlikely(job->job_timeout)) { + amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT); + ret = -EINVAL; + goto out; + } + if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) { amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT); ret = -EINVAL; @@ -314,8 +322,18 @@ aie2_sched_job_run(struct drm_sched_job *sched_job) kref_get(&job->refcnt); fence = dma_fence_get(job->fence); - if (unlikely(!cmd_abo)) { - ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler); + if (job->drv_cmd) { + switch (job->drv_cmd->opcode) { + case SYNC_DEBUG_BO: + ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler); + break; + case ATTACH_DEBUG_BO: + ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler); + break; + default: + ret = -EINVAL; + break; + } goto out; } @@ -362,6 +380,7 @@ aie2_sched_job_timedout(struct drm_sched_job *sched_job) xdna = hwctx->client->xdna; trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq); + job->job_timeout = true; mutex_lock(&xdna->dev_lock); aie2_hwctx_stop(xdna, hwctx, sched_job); @@ -531,13 +550,12 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = HWCTX_MAX_CMDS, .timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT), - .name = hwctx->name, + .name = "amdxdna_js", .dev = xdna->ddev.dev, }; struct drm_gpu_scheduler *sched; struct amdxdna_hwctx_priv *priv; struct amdxdna_gem_obj *heap; - struct amdxdna_dev_hdl *ndev; int i, ret; priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL); @@ -610,10 +628,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) goto free_entity; } + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto free_col_list; + ret = aie2_alloc_resource(hwctx); if (ret) { XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret); - goto free_col_list; + goto suspend_put; } ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id, @@ -628,10 +650,9 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret); goto release_resource; } + amdxdna_pm_suspend_put(xdna); hwctx->status = HWCTX_STAT_INIT; - ndev = xdna->dev_handle; - ndev->hwctx_num++; init_waitqueue_head(&priv->job_free_wq); XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name); @@ -640,6 +661,8 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx) release_resource: aie2_release_resource(hwctx); +suspend_put: + amdxdna_pm_suspend_put(xdna); free_col_list: kfree(hwctx->col_list); free_entity: @@ -662,13 +685,10 @@ free_priv: void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx) { - struct amdxdna_dev_hdl *ndev; struct amdxdna_dev *xdna; int idx; xdna = hwctx->client->xdna; - ndev = xdna->dev_handle; - ndev->hwctx_num--; XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq); drm_sched_entity_destroy(&hwctx->priv->entity); @@ -697,6 +717,14 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx) kfree(hwctx->cus); } +static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size) +{ + struct amdxdna_hwctx *hwctx = handle; + + amdxdna_pm_suspend_put(hwctx->client->xdna); + return 0; +} + static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size) { struct amdxdna_hwctx_param_config_cu *config = buf; @@ -728,10 +756,14 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size if (!hwctx->cus) return -ENOMEM; - ret = aie2_config_cu(hwctx); + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto free_cus; + + ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler); if (ret) { XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret); - goto free_cus; + goto pm_suspend_put; } wmb(); /* To avoid locking in command submit when check status */ @@ -739,12 +771,82 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size return 0; +pm_suspend_put: + amdxdna_pm_suspend_put(xdna); free_cus: kfree(hwctx->cus); hwctx->cus = NULL; return ret; } +static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq) +{ + struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq); + + if (!out_fence) { + XDNA_ERR(hwctx->client->xdna, "Failed to get fence"); + return; + } + + dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT); + dma_fence_put(out_fence); +} + +static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl, + bool attach) +{ + struct amdxdna_client *client = hwctx->client; + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_drv_cmd cmd = { 0 }; + struct amdxdna_gem_obj *abo; + u64 seq; + int ret; + + abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV); + if (!abo) { + XDNA_ERR(xdna, "Get bo %d failed", bo_hdl); + return -EINVAL; + } + + if (attach) { + if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) { + ret = -EBUSY; + goto put_obj; + } + cmd.opcode = ATTACH_DEBUG_BO; + } else { + if (abo->assigned_hwctx != hwctx->id) { + ret = -EINVAL; + goto put_obj; + } + cmd.opcode = DETACH_DEBUG_BO; + } + + ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE, + &bo_hdl, 1, hwctx->id, &seq); + if (ret) { + XDNA_ERR(xdna, "Submit command failed"); + goto put_obj; + } + + aie2_cmd_wait(hwctx, seq); + if (cmd.result) { + XDNA_ERR(xdna, "Response failure 0x%x", cmd.result); + goto put_obj; + } + + if (attach) + abo->assigned_hwctx = hwctx->id; + else + abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE; + + XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name); + +put_obj: + amdxdna_gem_put_obj(abo); + return ret; +} + int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size) { struct amdxdna_dev *xdna = hwctx->client->xdna; @@ -754,14 +856,40 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu case DRM_AMDXDNA_HWCTX_CONFIG_CU: return aie2_hwctx_cu_config(hwctx, buf, size); case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF: + return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true); case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF: - return -EOPNOTSUPP; + return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false); default: XDNA_DBG(xdna, "Not supported type %d", type); return -EOPNOTSUPP; } } +int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl) +{ + struct amdxdna_client *client = hwctx->client; + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_drv_cmd cmd = { 0 }; + u64 seq; + int ret; + + cmd.opcode = SYNC_DEBUG_BO; + ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE, + &debug_bo_hdl, 1, hwctx->id, &seq); + if (ret) { + XDNA_ERR(xdna, "Submit command failed"); + return ret; + } + + aie2_cmd_wait(hwctx, seq); + if (cmd.result) { + XDNA_ERR(xdna, "Response failure 0x%x", cmd.result); + return -EINVAL; + } + + return 0; +} + static int aie2_populate_range(struct amdxdna_gem_obj *abo) { struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev); @@ -862,11 +990,15 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, goto free_chain; } + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto cleanup_job; + retry: ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx); if (ret) { XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret); - goto cleanup_job; + goto suspend_put; } for (i = 0; i < job->bo_cnt; i++) { @@ -874,7 +1006,7 @@ retry: if (ret) { XDNA_WARN(xdna, "Failed to reserve fences %d", ret); drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx); - goto cleanup_job; + goto suspend_put; } } @@ -889,12 +1021,12 @@ retry: msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); } else if (time_after(jiffies, timeout)) { ret = -ETIME; - goto cleanup_job; + goto suspend_put; } ret = aie2_populate_range(abo); if (ret) - goto cleanup_job; + goto suspend_put; goto retry; } } @@ -920,6 +1052,8 @@ retry: return 0; +suspend_put: + amdxdna_pm_suspend_put(xdna); cleanup_job: drm_sched_job_cleanup(&job->base); free_chain: diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c index 5ee905632a39..d452008ec4f4 100644 --- a/drivers/accel/amdxdna/aie2_error.c +++ b/drivers/accel/amdxdna/aie2_error.c @@ -13,6 +13,7 @@ #include "aie2_msg_priv.h" #include "aie2_pci.h" +#include "amdxdna_error.h" #include "amdxdna_mailbox.h" #include "amdxdna_pci_drv.h" @@ -46,6 +47,7 @@ enum aie_module_type { AIE_MEM_MOD = 0, AIE_CORE_MOD, AIE_PL_MOD, + AIE_UNKNOWN_MOD, }; enum aie_error_category { @@ -143,6 +145,31 @@ static const struct aie_event_category aie_ml_shim_tile_event_cat[] = { EVENT_CATEGORY(74U, AIE_ERROR_LOCK), }; +static const enum amdxdna_error_num aie_cat_err_num_map[] = { + [AIE_ERROR_SATURATION] = AMDXDNA_ERROR_NUM_AIE_SATURATION, + [AIE_ERROR_FP] = AMDXDNA_ERROR_NUM_AIE_FP, + [AIE_ERROR_STREAM] = AMDXDNA_ERROR_NUM_AIE_STREAM, + [AIE_ERROR_ACCESS] = AMDXDNA_ERROR_NUM_AIE_ACCESS, + [AIE_ERROR_BUS] = AMDXDNA_ERROR_NUM_AIE_BUS, + [AIE_ERROR_INSTRUCTION] = AMDXDNA_ERROR_NUM_AIE_INSTRUCTION, + [AIE_ERROR_ECC] = AMDXDNA_ERROR_NUM_AIE_ECC, + [AIE_ERROR_LOCK] = AMDXDNA_ERROR_NUM_AIE_LOCK, + [AIE_ERROR_DMA] = AMDXDNA_ERROR_NUM_AIE_DMA, + [AIE_ERROR_MEM_PARITY] = AMDXDNA_ERROR_NUM_AIE_MEM_PARITY, + [AIE_ERROR_UNKNOWN] = AMDXDNA_ERROR_NUM_UNKNOWN, +}; + +static_assert(ARRAY_SIZE(aie_cat_err_num_map) == AIE_ERROR_UNKNOWN + 1); + +static const enum amdxdna_error_module aie_err_mod_map[] = { + [AIE_MEM_MOD] = AMDXDNA_ERROR_MODULE_AIE_MEMORY, + [AIE_CORE_MOD] = AMDXDNA_ERROR_MODULE_AIE_CORE, + [AIE_PL_MOD] = AMDXDNA_ERROR_MODULE_AIE_PL, + [AIE_UNKNOWN_MOD] = AMDXDNA_ERROR_MODULE_UNKNOWN, +}; + +static_assert(ARRAY_SIZE(aie_err_mod_map) == AIE_UNKNOWN_MOD + 1); + static enum aie_error_category aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type) { @@ -176,12 +203,40 @@ aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type) if (event_id != lut[i].event_id) continue; + if (lut[i].category > AIE_ERROR_UNKNOWN) + return AIE_ERROR_UNKNOWN; + return lut[i].category; } return AIE_ERROR_UNKNOWN; } +static void aie2_update_last_async_error(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err) +{ + struct aie_error *errs = err_info; + enum amdxdna_error_module err_mod; + enum aie_error_category aie_err; + enum amdxdna_error_num err_num; + struct aie_error *last_err; + + last_err = &errs[num_err - 1]; + if (last_err->mod_type >= AIE_UNKNOWN_MOD) { + err_num = aie_cat_err_num_map[AIE_ERROR_UNKNOWN]; + err_mod = aie_err_mod_map[AIE_UNKNOWN_MOD]; + } else { + aie_err = aie_get_error_category(last_err->row, + last_err->event_id, + last_err->mod_type); + err_num = aie_cat_err_num_map[aie_err]; + err_mod = aie_err_mod_map[last_err->mod_type]; + } + + ndev->last_async_err.err_code = AMDXDNA_ERROR_ENCODE(err_num, err_mod); + ndev->last_async_err.ts_us = ktime_to_us(ktime_get_real()); + ndev->last_async_err.ex_err_code = AMDXDNA_EXTRA_ERR_ENCODE(last_err->row, last_err->col); +} + static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err) { struct aie_error *errs = err_info; @@ -264,29 +319,14 @@ static void aie2_error_worker(struct work_struct *err_work) } mutex_lock(&xdna->dev_lock); + aie2_update_last_async_error(e->ndev, info->payload, info->err_cnt); + /* Re-sent this event to firmware */ if (aie2_error_event_send(e)) XDNA_WARN(xdna, "Unable to register async event"); mutex_unlock(&xdna->dev_lock); } -int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev) -{ - struct amdxdna_dev *xdna = ndev->xdna; - struct async_event *e; - int i, ret; - - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); - for (i = 0; i < ndev->async_events->event_cnt; i++) { - e = &ndev->async_events->event[i]; - ret = aie2_error_event_send(e); - if (ret) - return ret; - } - - return 0; -} - void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev) { struct amdxdna_dev *xdna = ndev->xdna; @@ -341,6 +381,10 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev) e->size = ASYNC_BUF_SIZE; e->resp.status = MAX_AIE2_STATUS_CODE; INIT_WORK(&e->work, aie2_error_worker); + + ret = aie2_error_event_send(e); + if (ret) + goto free_wq; } ndev->async_events = events; @@ -349,6 +393,8 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev) events->event_cnt, events->size); return 0; +free_wq: + destroy_workqueue(events->wq); free_buf: dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf, events->addr, DMA_FROM_DEVICE); @@ -356,3 +402,18 @@ free_events: kfree(events); return ret; } + +int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev, struct amdxdna_drm_get_array *args) +{ + struct amdxdna_dev *xdna = ndev->xdna; + + drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + + args->num_element = 1; + args->element_size = sizeof(ndev->last_async_err); + if (copy_to_user(u64_to_user_ptr(args->buffer), + &ndev->last_async_err, args->element_size)) + return -EFAULT; + + return 0; +} diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c index 9caad083543d..69cdce9ff208 100644 --- a/drivers/accel/amdxdna/aie2_message.c +++ b/drivers/accel/amdxdna/aie2_message.c @@ -27,6 +27,8 @@ #define DECLARE_AIE2_MSG(name, op) \ DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE) +#define EXEC_MSG_OPS(xdna) ((xdna)->dev_handle->exec_msg_ops) + static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev, struct xdna_mailbox_msg *msg) { @@ -37,7 +39,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev, if (!ndev->mgmt_chann) return -ENODEV; - drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + drm_WARN_ON(&xdna->ddev, xdna->rpm_on && !mutex_is_locked(&xdna->dev_lock)); ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg); if (ret == -ETIME) { xdna_mailbox_stop_channel(ndev->mgmt_chann); @@ -45,7 +47,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev, ndev->mgmt_chann = NULL; } - if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) { + if (!ret && *hdl->status != AIE2_STATUS_SUCCESS) { XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x", msg->opcode, *hdl->data); ret = -EINVAL; @@ -233,6 +235,7 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct ret = -EINVAL; goto out_destroy_context; } + ndev->hwctx_num++; XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d", hwctx->name, ret, resp.msix_id); @@ -267,6 +270,7 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc hwctx->fw_ctx_id); hwctx->priv->mbox_chann = NULL; hwctx->fw_ctx_id = -1; + ndev->hwctx_num--; return ret; } @@ -332,11 +336,6 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, goto fail; } - if (resp.status != AIE2_STATUS_SUCCESS) { - XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status); - ret = -EINVAL; - goto fail; - } XDNA_DBG(xdna, "Query NPU status completed"); if (size < resp.size) { @@ -358,6 +357,55 @@ fail: return ret; } +int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev, + char __user *buf, u32 size, + struct amdxdna_drm_query_telemetry_header *header) +{ + DECLARE_AIE2_MSG(get_telemetry, MSG_OP_GET_TELEMETRY); + struct amdxdna_dev *xdna = ndev->xdna; + dma_addr_t dma_addr; + u8 *addr; + int ret; + + if (header->type >= MAX_TELEMETRY_TYPE) + return -EINVAL; + + addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr, + DMA_FROM_DEVICE, GFP_KERNEL); + if (!addr) + return -ENOMEM; + + req.buf_addr = dma_addr; + req.buf_size = size; + req.type = header->type; + + drm_clflush_virt_range(addr, size); /* device can access */ + ret = aie2_send_mgmt_msg_wait(ndev, &msg); + if (ret) { + XDNA_ERR(xdna, "Query telemetry failed, status %d", ret); + goto free_buf; + } + + if (size < resp.size) { + ret = -EINVAL; + XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size); + goto free_buf; + } + + if (copy_to_user(buf, addr, resp.size)) { + ret = -EFAULT; + XDNA_ERR(xdna, "Failed to copy telemetry to user space"); + goto free_buf; + } + + header->major = resp.major; + header->minor = resp.minor; + +free_buf: + dma_free_noncoherent(xdna->ddev.dev, size, addr, dma_addr, DMA_FROM_DEVICE); + return ret; +} + int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size, void *handle, int (*cb)(void*, void __iomem *, size_t)) { @@ -377,15 +425,17 @@ int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT); } -int aie2_config_cu(struct amdxdna_hwctx *hwctx) +int aie2_config_cu(struct amdxdna_hwctx *hwctx, + int (*notify_cb)(void *, void __iomem *, size_t)) { struct mailbox_channel *chann = hwctx->priv->mbox_chann; struct amdxdna_dev *xdna = hwctx->client->xdna; u32 shift = xdna->dev_info->dev_mem_buf_shift; - DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU); + struct config_cu_req req = { 0 }; + struct xdna_mailbox_msg msg; struct drm_gem_object *gobj; struct amdxdna_gem_obj *abo; - int ret, i; + int i; if (!chann) return -ENODEV; @@ -423,80 +473,324 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx) } req.num_cus = hwctx->cus->num_cus; - ret = xdna_send_msg_wait(xdna, chann, &msg); - if (ret == -ETIME) - aie2_destroy_context(xdna->dev_handle, hwctx); + msg.send_data = (u8 *)&req; + msg.send_size = sizeof(req); + msg.handle = hwctx; + msg.opcode = MSG_OP_CONFIG_CU; + msg.notify_cb = notify_cb; + return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT); +} - if (resp.status == AIE2_STATUS_SUCCESS) { - XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret); - return 0; +static int aie2_init_exec_cu_req(struct amdxdna_gem_obj *cmd_bo, void *req, + size_t *size, u32 *msg_op) +{ + struct execute_buffer_req *cu_req = req; + u32 cmd_len; + void *cmd; + + cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + if (cmd_len > sizeof(cu_req->payload)) + return -EINVAL; + + cu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (cu_req->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + memcpy(cu_req->payload, cmd, cmd_len); + + *size = sizeof(*cu_req); + *msg_op = MSG_OP_EXECUTE_BUFFER_CF; + return 0; +} + +static int aie2_init_exec_dpu_req(struct amdxdna_gem_obj *cmd_bo, void *req, + size_t *size, u32 *msg_op) +{ + struct exec_dpu_req *dpu_req = req; + struct amdxdna_cmd_start_npu *sn; + u32 cmd_len; + + sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + if (cmd_len - sizeof(*sn) > sizeof(dpu_req->payload)) + return -EINVAL; + + dpu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (dpu_req->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + dpu_req->inst_buf_addr = sn->buffer; + dpu_req->inst_size = sn->buffer_size; + dpu_req->inst_prop_cnt = sn->prop_count; + memcpy(dpu_req->payload, sn->prop_args, cmd_len - sizeof(*sn)); + + *size = sizeof(*dpu_req); + *msg_op = MSG_OP_EXEC_DPU; + return 0; +} + +static void aie2_init_exec_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt) +{ + struct cmd_chain_req *chain_req = req; + + chain_req->buf_addr = slot_addr; + chain_req->buf_size = size; + chain_req->count = cmd_cnt; +} + +static void aie2_init_npu_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt) +{ + struct cmd_chain_npu_req *npu_chain_req = req; + + npu_chain_req->flags = 0; + npu_chain_req->reserved = 0; + npu_chain_req->buf_addr = slot_addr; + npu_chain_req->buf_size = size; + npu_chain_req->count = cmd_cnt; +} + +static int +aie2_cmdlist_fill_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_execbuf_cf *cf_slot = slot; + u32 cmd_len; + void *cmd; + + cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + if (*size < sizeof(*cf_slot) + cmd_len) + return -EINVAL; + + cf_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (cf_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + cf_slot->arg_cnt = cmd_len / sizeof(u32); + memcpy(cf_slot->args, cmd, cmd_len); + /* Accurate slot size to hint firmware to do necessary copy */ + *size = sizeof(*cf_slot) + cmd_len; + return 0; +} + +static int +aie2_cmdlist_fill_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_dpu *dpu_slot = slot; + struct amdxdna_cmd_start_npu *sn; + u32 cmd_len; + u32 arg_sz; + + sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + arg_sz = cmd_len - sizeof(*sn); + if (cmd_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE) + return -EINVAL; + + if (*size < sizeof(*dpu_slot) + arg_sz) + return -EINVAL; + + dpu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (dpu_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + dpu_slot->inst_buf_addr = sn->buffer; + dpu_slot->inst_size = sn->buffer_size; + dpu_slot->inst_prop_cnt = sn->prop_count; + dpu_slot->arg_cnt = arg_sz / sizeof(u32); + memcpy(dpu_slot->args, sn->prop_args, arg_sz); + + /* Accurate slot size to hint firmware to do necessary copy */ + *size = sizeof(*dpu_slot) + arg_sz; + return 0; +} + +static u32 aie2_get_chain_msg_op(u32 cmd_op) +{ + switch (cmd_op) { + case ERT_START_CU: + return MSG_OP_CHAIN_EXEC_BUFFER_CF; + case ERT_START_NPU: + return MSG_OP_CHAIN_EXEC_DPU; + default: + break; + } + + return MSG_OP_MAX_OPCODE; +} + +static struct aie2_exec_msg_ops legacy_exec_message_ops = { + .init_cu_req = aie2_init_exec_cu_req, + .init_dpu_req = aie2_init_exec_dpu_req, + .init_chain_req = aie2_init_exec_chain_req, + .fill_cf_slot = aie2_cmdlist_fill_cf, + .fill_dpu_slot = aie2_cmdlist_fill_dpu, + .get_chain_msg_op = aie2_get_chain_msg_op, +}; + +static int +aie2_cmdlist_fill_npu_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_npu *npu_slot = slot; + u32 cmd_len; + void *cmd; + + cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + if (*size < sizeof(*npu_slot) + cmd_len) + return -EINVAL; + + npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (npu_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + memset(npu_slot, 0, sizeof(*npu_slot)); + npu_slot->type = EXEC_NPU_TYPE_NON_ELF; + npu_slot->arg_cnt = cmd_len / sizeof(u32); + memcpy(npu_slot->args, cmd, cmd_len); + + *size = sizeof(*npu_slot) + cmd_len; + return 0; +} + +static int +aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size) +{ + struct cmd_chain_slot_npu *npu_slot = slot; + struct amdxdna_cmd_start_npu *sn; + u32 cmd_len; + u32 arg_sz; + + sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); + arg_sz = cmd_len - sizeof(*sn); + if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE) + return -EINVAL; + + if (*size < sizeof(*npu_slot) + arg_sz) + return -EINVAL; + + npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); + if (npu_slot->cu_idx == INVALID_CU_IDX) + return -EINVAL; + + memset(npu_slot, 0, sizeof(*npu_slot)); + npu_slot->type = EXEC_NPU_TYPE_PARTIAL_ELF; + npu_slot->inst_buf_addr = sn->buffer; + npu_slot->inst_size = sn->buffer_size; + npu_slot->inst_prop_cnt = sn->prop_count; + npu_slot->arg_cnt = arg_sz / sizeof(u32); + memcpy(npu_slot->args, sn->prop_args, arg_sz); + + *size = sizeof(*npu_slot) + arg_sz; + return 0; +} + +static u32 aie2_get_npu_chain_msg_op(u32 cmd_op) +{ + return MSG_OP_CHAIN_EXEC_NPU; +} + +static struct aie2_exec_msg_ops npu_exec_message_ops = { + .init_cu_req = aie2_init_exec_cu_req, + .init_dpu_req = aie2_init_exec_dpu_req, + .init_chain_req = aie2_init_npu_chain_req, + .fill_cf_slot = aie2_cmdlist_fill_npu_cf, + .fill_dpu_slot = aie2_cmdlist_fill_npu_dpu, + .get_chain_msg_op = aie2_get_npu_chain_msg_op, +}; + +static int aie2_init_exec_req(void *req, struct amdxdna_gem_obj *cmd_abo, + size_t *size, u32 *msg_op) +{ + struct amdxdna_dev *xdna = cmd_abo->client->xdna; + int ret; + u32 op; + + + op = amdxdna_cmd_get_op(cmd_abo); + switch (op) { + case ERT_START_CU: + ret = EXEC_MSG_OPS(xdna)->init_cu_req(cmd_abo, req, size, msg_op); + if (ret) { + XDNA_DBG(xdna, "Init CU req failed ret %d", ret); + return ret; + } + break; + case ERT_START_NPU: + ret = EXEC_MSG_OPS(xdna)->init_dpu_req(cmd_abo, req, size, msg_op); + if (ret) { + XDNA_DBG(xdna, "Init DPU req failed ret %d", ret); + return ret; + } + + break; + default: + XDNA_ERR(xdna, "Unsupported op %d", op); + ret = -EOPNOTSUPP; + break; } - XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d", - msg.opcode, resp.status, ret); return ret; } +static int +aie2_cmdlist_fill_slot(void *slot, struct amdxdna_gem_obj *cmd_abo, + size_t *size, u32 *cmd_op) +{ + struct amdxdna_dev *xdna = cmd_abo->client->xdna; + int ret; + u32 op; + + op = amdxdna_cmd_get_op(cmd_abo); + if (*cmd_op == ERT_INVALID_CMD) + *cmd_op = op; + else if (op != *cmd_op) + return -EINVAL; + + switch (op) { + case ERT_START_CU: + ret = EXEC_MSG_OPS(xdna)->fill_cf_slot(cmd_abo, slot, size); + break; + case ERT_START_NPU: + ret = EXEC_MSG_OPS(xdna)->fill_dpu_slot(cmd_abo, slot, size); + break; + default: + XDNA_INFO(xdna, "Unsupported op %d", op); + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +void aie2_msg_init(struct amdxdna_dev_hdl *ndev) +{ + if (AIE2_FEATURE_ON(ndev, AIE2_NPU_COMMAND)) + ndev->exec_msg_ops = &npu_exec_message_ops; + else + ndev->exec_msg_ops = &legacy_exec_message_ops; +} + +static inline struct amdxdna_gem_obj * +aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job) +{ + int idx = get_job_idx(job->seq); + + return job->hwctx->priv->cmd_buf[idx]; +} + int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)) { struct mailbox_channel *chann = hwctx->priv->mbox_chann; struct amdxdna_dev *xdna = hwctx->client->xdna; struct amdxdna_gem_obj *cmd_abo = job->cmd_bo; - union { - struct execute_buffer_req ebuf; - struct exec_dpu_req dpu; - } req; struct xdna_mailbox_msg msg; - u32 payload_len; - void *payload; - int cu_idx; + union exec_req req; int ret; - u32 op; if (!chann) return -ENODEV; - payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len); - if (!payload) { - XDNA_ERR(xdna, "Invalid command, cannot get payload"); - return -EINVAL; - } + ret = aie2_init_exec_req(&req, cmd_abo, &msg.send_size, &msg.opcode); + if (ret) + return ret; - cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo); - if (cu_idx < 0) { - XDNA_DBG(xdna, "Invalid cu idx"); - return -EINVAL; - } - - op = amdxdna_cmd_get_op(cmd_abo); - switch (op) { - case ERT_START_CU: - if (unlikely(payload_len > sizeof(req.ebuf.payload))) - XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len); - req.ebuf.cu_idx = cu_idx; - memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload)); - msg.send_size = sizeof(req.ebuf); - msg.opcode = MSG_OP_EXECUTE_BUFFER_CF; - break; - case ERT_START_NPU: { - struct amdxdna_cmd_start_npu *sn = payload; - - if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload))) - XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len); - req.dpu.inst_buf_addr = sn->buffer; - req.dpu.inst_size = sn->buffer_size; - req.dpu.inst_prop_cnt = sn->prop_count; - req.dpu.cu_idx = cu_idx; - memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload)); - msg.send_size = sizeof(req.dpu); - msg.opcode = MSG_OP_EXEC_DPU; - break; - } - default: - XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op); - return -EINVAL; - } msg.handle = job; msg.notify_cb = notify_cb; msg.send_data = (u8 *)&req; @@ -512,135 +806,6 @@ int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, return 0; } -static int -aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset, - struct amdxdna_gem_obj *abo, u32 *size) -{ - struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset; - int cu_idx = amdxdna_cmd_get_cu_idx(abo); - u32 payload_len; - void *payload; - - if (cu_idx < 0) - return -EINVAL; - - payload = amdxdna_cmd_get_payload(abo, &payload_len); - if (!payload) - return -EINVAL; - - if (!slot_has_space(*buf, offset, payload_len)) - return -ENOSPC; - - buf->cu_idx = cu_idx; - buf->arg_cnt = payload_len / sizeof(u32); - memcpy(buf->args, payload, payload_len); - /* Accurate buf size to hint firmware to do necessary copy */ - *size = sizeof(*buf) + payload_len; - return 0; -} - -static int -aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset, - struct amdxdna_gem_obj *abo, u32 *size) -{ - struct cmd_chain_slot_dpu *buf = cmd_buf + offset; - int cu_idx = amdxdna_cmd_get_cu_idx(abo); - struct amdxdna_cmd_start_npu *sn; - u32 payload_len; - void *payload; - u32 arg_sz; - - if (cu_idx < 0) - return -EINVAL; - - payload = amdxdna_cmd_get_payload(abo, &payload_len); - if (!payload) - return -EINVAL; - sn = payload; - arg_sz = payload_len - sizeof(*sn); - if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE) - return -EINVAL; - - if (!slot_has_space(*buf, offset, arg_sz)) - return -ENOSPC; - - buf->inst_buf_addr = sn->buffer; - buf->inst_size = sn->buffer_size; - buf->inst_prop_cnt = sn->prop_count; - buf->cu_idx = cu_idx; - buf->arg_cnt = arg_sz / sizeof(u32); - memcpy(buf->args, sn->prop_args, arg_sz); - - /* Accurate buf size to hint firmware to do necessary copy */ - *size = sizeof(*buf) + arg_sz; - return 0; -} - -static int -aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset, - struct amdxdna_gem_obj *abo, u32 *size) -{ - u32 this_op = amdxdna_cmd_get_op(abo); - void *cmd_buf = cmdbuf_abo->mem.kva; - int ret; - - if (this_op != op) { - ret = -EINVAL; - goto done; - } - - switch (op) { - case ERT_START_CU: - ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size); - break; - case ERT_START_NPU: - ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size); - break; - default: - ret = -EOPNOTSUPP; - } - -done: - if (ret) { - XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d", - op, ret); - } - return ret; -} - -static inline struct amdxdna_gem_obj * -aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job) -{ - int idx = get_job_idx(job->seq); - - return job->hwctx->priv->cmd_buf[idx]; -} - -static void -aie2_cmdlist_prepare_request(struct cmd_chain_req *req, - struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt) -{ - req->buf_addr = cmdbuf_abo->mem.dev_addr; - req->buf_size = size; - req->count = cnt; - drm_clflush_virt_range(cmdbuf_abo->mem.kva, size); - XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d", - req->buf_addr, size, cnt); -} - -static inline u32 -aie2_cmd_op_to_msg_op(u32 op) -{ - switch (op) { - case ERT_START_CU: - return MSG_OP_CHAIN_EXEC_BUFFER_CF; - case ERT_START_NPU: - return MSG_OP_CHAIN_EXEC_DPU; - default: - return MSG_OP_MAX_OPCODE; - } -} - int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)) @@ -649,12 +814,13 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, struct mailbox_channel *chann = hwctx->priv->mbox_chann; struct amdxdna_client *client = hwctx->client; struct amdxdna_gem_obj *cmd_abo = job->cmd_bo; + struct amdxdna_dev *xdna = client->xdna; struct amdxdna_cmd_chain *payload; struct xdna_mailbox_msg msg; - struct cmd_chain_req req; + union exec_chain_req req; u32 payload_len; u32 offset = 0; - u32 size; + size_t size; int ret; u32 op; u32 i; @@ -665,41 +831,42 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, payload_len < struct_size(payload, data, payload->command_count)) return -EINVAL; + op = ERT_INVALID_CMD; for (i = 0; i < payload->command_count; i++) { u32 boh = (u32)(payload->data[i]); struct amdxdna_gem_obj *abo; abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD); if (!abo) { - XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh); + XDNA_ERR(xdna, "Failed to find cmd BO %d", boh); return -ENOENT; } - /* All sub-cmd should have same op, use the first one. */ - if (i == 0) - op = amdxdna_cmd_get_op(abo); - - ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size); + size = cmdbuf_abo->mem.size - offset; + ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva + offset, + abo, &size, &op); amdxdna_gem_put_obj(abo); if (ret) - return -EINVAL; + return ret; offset += size; } - - /* The offset is the accumulated total size of the cmd buffer */ - aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count); - - msg.opcode = aie2_cmd_op_to_msg_op(op); + msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op); if (msg.opcode == MSG_OP_MAX_OPCODE) return -EOPNOTSUPP; + + /* The offset is the accumulated total size of the cmd buffer */ + EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr, + offset, payload->command_count); + drm_clflush_virt_range(cmdbuf_abo->mem.kva, offset); + msg.handle = job; msg.notify_cb = notify_cb; msg.send_data = (u8 *)&req; msg.send_size = sizeof(req); ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT); if (ret) { - XDNA_ERR(hwctx->client->xdna, "Send message failed"); + XDNA_ERR(xdna, "Send message failed"); return ret; } @@ -712,23 +879,27 @@ int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx, { struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job); struct mailbox_channel *chann = hwctx->priv->mbox_chann; + struct amdxdna_dev *xdna = hwctx->client->xdna; struct amdxdna_gem_obj *cmd_abo = job->cmd_bo; struct xdna_mailbox_msg msg; - struct cmd_chain_req req; - u32 size; + union exec_chain_req req; + u32 op = ERT_INVALID_CMD; + size_t size; int ret; - u32 op; - op = amdxdna_cmd_get_op(cmd_abo); - ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size); + size = cmdbuf_abo->mem.size; + ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva, cmd_abo, &size, &op); if (ret) return ret; - aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1); - - msg.opcode = aie2_cmd_op_to_msg_op(op); + msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op); if (msg.opcode == MSG_OP_MAX_OPCODE) return -EOPNOTSUPP; + + EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr, + size, 1); + drm_clflush_virt_range(cmdbuf_abo->mem.kva, size); + msg.handle = job; msg.notify_cb = notify_cb; msg.send_data = (u8 *)&req; @@ -753,7 +924,7 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int ret = 0; req.src_addr = 0; - req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr; + req.dst_addr = amdxdna_dev_bo_offset(abo); req.size = abo->mem.size; /* Device to Host */ @@ -777,3 +948,32 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, return 0; } + +int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, + int (*notify_cb)(void *, void __iomem *, size_t)) +{ + struct mailbox_channel *chann = hwctx->priv->mbox_chann; + struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]); + struct amdxdna_dev *xdna = hwctx->client->xdna; + struct config_debug_bo_req req; + struct xdna_mailbox_msg msg; + + if (job->drv_cmd->opcode == ATTACH_DEBUG_BO) + req.config = DEBUG_BO_REGISTER; + else + req.config = DEBUG_BO_UNREGISTER; + + req.offset = amdxdna_dev_bo_offset(abo); + req.size = abo->mem.size; + + XDNA_DBG(xdna, "offset 0x%llx size 0x%llx config %d", + req.offset, req.size, req.config); + + msg.handle = job; + msg.notify_cb = notify_cb; + msg.send_data = (u8 *)&req; + msg.send_size = sizeof(req); + msg.opcode = MSG_OP_CONFIG_DEBUG_BO; + + return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT); +} diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h index 6df9065b13f6..947daa63f064 100644 --- a/drivers/accel/amdxdna/aie2_msg_priv.h +++ b/drivers/accel/amdxdna/aie2_msg_priv.h @@ -9,7 +9,8 @@ enum aie2_msg_opcode { MSG_OP_CREATE_CONTEXT = 0x2, MSG_OP_DESTROY_CONTEXT = 0x3, - MSG_OP_SYNC_BO = 0x7, + MSG_OP_GET_TELEMETRY = 0x4, + MSG_OP_SYNC_BO = 0x7, MSG_OP_EXECUTE_BUFFER_CF = 0xC, MSG_OP_QUERY_COL_STATUS = 0xD, MSG_OP_QUERY_AIE_TILE_INFO = 0xE, @@ -18,6 +19,8 @@ enum aie2_msg_opcode { MSG_OP_CONFIG_CU = 0x11, MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12, MSG_OP_CHAIN_EXEC_DPU = 0x13, + MSG_OP_CONFIG_DEBUG_BO = 0x14, + MSG_OP_CHAIN_EXEC_NPU = 0x18, MSG_OP_MAX_XRT_OPCODE, MSG_OP_SUSPEND = 0x101, MSG_OP_RESUME = 0x102, @@ -135,6 +138,28 @@ struct destroy_ctx_resp { enum aie2_msg_status status; } __packed; +enum telemetry_type { + TELEMETRY_TYPE_DISABLED, + TELEMETRY_TYPE_HEALTH, + TELEMETRY_TYPE_ERROR_INFO, + TELEMETRY_TYPE_PROFILING, + TELEMETRY_TYPE_DEBUG, + MAX_TELEMETRY_TYPE +}; + +struct get_telemetry_req { + enum telemetry_type type; + __u64 buf_addr; + __u32 buf_size; +} __packed; + +struct get_telemetry_resp { + __u32 major; + __u32 minor; + __u32 size; + enum aie2_msg_status status; +} __packed; + struct execute_buffer_req { __u32 cu_idx; __u32 payload[19]; @@ -148,6 +173,16 @@ struct exec_dpu_req { __u32 payload[35]; } __packed; +enum exec_npu_type { + EXEC_NPU_TYPE_NON_ELF = 0x1, + EXEC_NPU_TYPE_PARTIAL_ELF = 0x2, +}; + +union exec_req { + struct execute_buffer_req ebuf; + struct exec_dpu_req dpu_req; +}; + struct execute_buffer_resp { enum aie2_msg_status status; } __packed; @@ -319,9 +354,6 @@ struct async_event_msg_resp { } __packed; #define MAX_CHAIN_CMDBUF_SIZE SZ_4K -#define slot_has_space(slot, offset, payload_size) \ - (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \ - sizeof(typeof(slot))) struct cmd_chain_slot_execbuf_cf { __u32 cu_idx; @@ -339,12 +371,40 @@ struct cmd_chain_slot_dpu { __u32 args[] __counted_by(arg_cnt); }; +#define MAX_NPU_ARGS_SIZE (26 * sizeof(__u32)) +struct cmd_chain_slot_npu { + enum exec_npu_type type; + u64 inst_buf_addr; + u64 save_buf_addr; + u64 restore_buf_addr; + u32 inst_size; + u32 save_size; + u32 restore_size; + u32 inst_prop_cnt; + u32 cu_idx; + u32 arg_cnt; + u32 args[] __counted_by(arg_cnt); +} __packed; + struct cmd_chain_req { __u64 buf_addr; __u32 buf_size; __u32 count; } __packed; +struct cmd_chain_npu_req { + u32 flags; + u32 reserved; + u64 buf_addr; + u32 buf_size; + u32 count; +} __packed; + +union exec_chain_req { + struct cmd_chain_npu_req npu_req; + struct cmd_chain_req req; +}; + struct cmd_chain_resp { enum aie2_msg_status status; __u32 fail_cmd_idx; @@ -365,4 +425,21 @@ struct sync_bo_req { struct sync_bo_resp { enum aie2_msg_status status; } __packed; + +#define DEBUG_BO_UNREGISTER 0 +#define DEBUG_BO_REGISTER 1 +struct config_debug_bo_req { + __u64 offset; + __u64 size; + /* + * config operations. + * DEBUG_BO_REGISTER: Register debug buffer + * DEBUG_BO_UNREGISTER: Unregister debug buffer + */ + __u32 config; +} __packed; + +struct config_debug_bo_resp { + enum aie2_msg_status status; +} __packed; #endif /* _AIE2_MSG_PRIV_H_ */ diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c index 87c425e3d2b9..d7ccbdaf47f5 100644 --- a/drivers/accel/amdxdna/aie2_pci.c +++ b/drivers/accel/amdxdna/aie2_pci.c @@ -25,6 +25,7 @@ #include "amdxdna_gem.h" #include "amdxdna_mailbox.h" #include "amdxdna_pci_drv.h" +#include "amdxdna_pm.h" static int aie2_max_col = XRS_MAX_COL; module_param(aie2_max_col, uint, 0600); @@ -54,6 +55,7 @@ struct mgmt_mbox_chann_info { static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor) { + const struct aie2_fw_feature_tbl *feature; struct amdxdna_dev *xdna = ndev->xdna; /* @@ -77,6 +79,17 @@ static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 f XDNA_ERR(xdna, "Firmware minor version smaller than supported"); return -EINVAL; } + + for (feature = ndev->priv->fw_feature_tbl; feature && feature->min_minor; + feature++) { + if (fw_minor < feature->min_minor) + continue; + if (feature->max_minor > 0 && fw_minor > feature->max_minor) + continue; + + set_bit(feature->feature, &ndev->feature_mask); + } + return 0; } @@ -223,15 +236,6 @@ static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev) return ret; } - if (!ndev->async_events) - return 0; - - ret = aie2_error_async_events_send(ndev); - if (ret) { - XDNA_ERR(ndev->xdna, "Send async events failed"); - return ret; - } - return 0; } @@ -257,6 +261,8 @@ static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev) return ret; } + ndev->total_col = min(aie2_max_col, ndev->metadata.cols); + return 0; } @@ -338,6 +344,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna) ndev->mbox = NULL; aie2_psp_stop(ndev->psp_hdl); aie2_smu_fini(ndev); + aie2_error_async_events_free(ndev); pci_disable_device(pdev); ndev->dev_status = AIE2_DEV_INIT; @@ -424,6 +431,18 @@ static int aie2_hw_start(struct amdxdna_dev *xdna) goto destroy_mgmt_chann; } + ret = aie2_mgmt_fw_query(ndev); + if (ret) { + XDNA_ERR(xdna, "failed to query fw, ret %d", ret); + goto destroy_mgmt_chann; + } + + ret = aie2_error_async_events_alloc(ndev); + if (ret) { + XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret); + goto destroy_mgmt_chann; + } + ndev->dev_status = AIE2_DEV_START; return 0; @@ -459,7 +478,6 @@ static int aie2_hw_resume(struct amdxdna_dev *xdna) struct amdxdna_client *client; int ret; - guard(mutex)(&xdna->dev_lock); ret = aie2_hw_start(xdna); if (ret) { XDNA_ERR(xdna, "Start hardware failed, %d", ret); @@ -565,13 +583,6 @@ static int aie2_init(struct amdxdna_dev *xdna) goto release_fw; } - ret = aie2_mgmt_fw_query(ndev); - if (ret) { - XDNA_ERR(xdna, "Query firmware failed, ret %d", ret); - goto stop_hw; - } - ndev->total_col = min(aie2_max_col, ndev->metadata.cols); - xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1; for (i = 0; i < xrs_cfg.clk_list.num_levels; i++) xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk; @@ -587,30 +598,11 @@ static int aie2_init(struct amdxdna_dev *xdna) goto stop_hw; } - ret = aie2_error_async_events_alloc(ndev); - if (ret) { - XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret); - goto stop_hw; - } - - ret = aie2_error_async_events_send(ndev); - if (ret) { - XDNA_ERR(xdna, "Send async events failed, ret %d", ret); - goto async_event_free; - } - - /* Issue a command to make sure firmware handled async events */ - ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver); - if (ret) { - XDNA_ERR(xdna, "Re-query firmware version failed"); - goto async_event_free; - } - release_firmware(fw); + aie2_msg_init(ndev); + amdxdna_pm_init(xdna); return 0; -async_event_free: - aie2_error_async_events_free(ndev); stop_hw: aie2_hw_stop(xdna); release_fw: @@ -621,10 +613,8 @@ release_fw: static void aie2_fini(struct amdxdna_dev *xdna) { - struct amdxdna_dev_hdl *ndev = xdna->dev_handle; - + amdxdna_pm_fini(xdna); aie2_hw_stop(xdna); - aie2_error_async_events_free(ndev); } static int aie2_get_aie_status(struct amdxdna_client *client, @@ -845,7 +835,101 @@ static int aie2_get_hwctx_status(struct amdxdna_client *client, } args->buffer_size -= (u32)(array_args.buffer - args->buffer); - return ret; + return 0; +} + +static int aie2_query_resource_info(struct amdxdna_client *client, + struct amdxdna_drm_get_info *args) +{ + struct amdxdna_drm_get_resource_info res_info; + const struct amdxdna_dev_priv *priv; + struct amdxdna_dev_hdl *ndev; + struct amdxdna_dev *xdna; + + xdna = client->xdna; + ndev = xdna->dev_handle; + priv = ndev->priv; + + res_info.npu_clk_max = priv->dpm_clk_tbl[ndev->max_dpm_level].hclk; + res_info.npu_tops_max = ndev->max_tops; + res_info.npu_task_max = priv->hwctx_limit; + res_info.npu_tops_curr = ndev->curr_tops; + res_info.npu_task_curr = ndev->hwctx_num; + + if (copy_to_user(u64_to_user_ptr(args->buffer), &res_info, sizeof(res_info))) + return -EFAULT; + + return 0; +} + +static int aie2_fill_hwctx_map(struct amdxdna_hwctx *hwctx, void *arg) +{ + struct amdxdna_dev *xdna = hwctx->client->xdna; + u32 *map = arg; + + if (hwctx->fw_ctx_id >= xdna->dev_handle->priv->hwctx_limit) { + XDNA_ERR(xdna, "Invalid fw ctx id %d/%d ", hwctx->fw_ctx_id, + xdna->dev_handle->priv->hwctx_limit); + return -EINVAL; + } + + map[hwctx->fw_ctx_id] = hwctx->id; + return 0; +} + +static int aie2_get_telemetry(struct amdxdna_client *client, + struct amdxdna_drm_get_info *args) +{ + struct amdxdna_drm_query_telemetry_header *header __free(kfree) = NULL; + u32 telemetry_data_sz, header_sz, elem_num; + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_client *tmp_client; + int ret; + + elem_num = xdna->dev_handle->priv->hwctx_limit; + header_sz = struct_size(header, map, elem_num); + if (args->buffer_size <= header_sz) { + XDNA_ERR(xdna, "Invalid buffer size"); + return -EINVAL; + } + + telemetry_data_sz = args->buffer_size - header_sz; + if (telemetry_data_sz > SZ_4M) { + XDNA_ERR(xdna, "Buffer size is too big, %d", telemetry_data_sz); + return -EINVAL; + } + + header = kzalloc(header_sz, GFP_KERNEL); + if (!header) + return -ENOMEM; + + if (copy_from_user(header, u64_to_user_ptr(args->buffer), sizeof(*header))) { + XDNA_ERR(xdna, "Failed to copy telemetry header from user"); + return -EFAULT; + } + + header->map_num_elements = elem_num; + list_for_each_entry(tmp_client, &xdna->client_list, node) { + ret = amdxdna_hwctx_walk(tmp_client, &header->map, + aie2_fill_hwctx_map); + if (ret) + return ret; + } + + ret = aie2_query_telemetry(xdna->dev_handle, + u64_to_user_ptr(args->buffer + header_sz), + telemetry_data_sz, header); + if (ret) { + XDNA_ERR(xdna, "Query telemetry failed ret %d", ret); + return ret; + } + + if (copy_to_user(u64_to_user_ptr(args->buffer), header, header_sz)) { + XDNA_ERR(xdna, "Copy header failed"); + return -EFAULT; + } + + return 0; } static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args) @@ -856,6 +940,10 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i if (!drm_dev_enter(&xdna->ddev, &idx)) return -ENODEV; + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto dev_exit; + switch (args->param) { case DRM_AMDXDNA_QUERY_AIE_STATUS: ret = aie2_get_aie_status(client, args); @@ -878,12 +966,21 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i case DRM_AMDXDNA_GET_POWER_MODE: ret = aie2_get_power_mode(client, args); break; + case DRM_AMDXDNA_QUERY_TELEMETRY: + ret = aie2_get_telemetry(client, args); + break; + case DRM_AMDXDNA_QUERY_RESOURCE_INFO: + ret = aie2_query_resource_info(client, args); + break; default: XDNA_ERR(xdna, "Not supported request parameter %u", args->param); ret = -EOPNOTSUPP; } + + amdxdna_pm_suspend_put(xdna); XDNA_DBG(xdna, "Got param %d", args->param); +dev_exit: drm_dev_exit(idx); return ret; } @@ -898,6 +995,12 @@ static int aie2_query_ctx_status_array(struct amdxdna_client *client, drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock)); + if (args->element_size > SZ_4K || args->num_element > SZ_1K) { + XDNA_DBG(xdna, "Invalid element size %d or number of element %d", + args->element_size, args->num_element); + return -EINVAL; + } + array_args.element_size = min(args->element_size, sizeof(struct amdxdna_drm_hwctx_entry)); array_args.buffer = args->buffer; @@ -914,7 +1017,7 @@ static int aie2_query_ctx_status_array(struct amdxdna_client *client, args->num_element = (u32)((array_args.buffer - args->buffer) / args->element_size); - return ret; + return 0; } static int aie2_get_array(struct amdxdna_client *client, @@ -926,16 +1029,26 @@ static int aie2_get_array(struct amdxdna_client *client, if (!drm_dev_enter(&xdna->ddev, &idx)) return -ENODEV; + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto dev_exit; + switch (args->param) { case DRM_AMDXDNA_HW_CONTEXT_ALL: ret = aie2_query_ctx_status_array(client, args); break; + case DRM_AMDXDNA_HW_LAST_ASYNC_ERR: + ret = aie2_get_array_async_error(xdna->dev_handle, args); + break; default: XDNA_ERR(xdna, "Not supported request parameter %u", args->param); ret = -EOPNOTSUPP; } + + amdxdna_pm_suspend_put(xdna); XDNA_DBG(xdna, "Got param %d", args->param); +dev_exit: drm_dev_exit(idx); return ret; } @@ -974,6 +1087,10 @@ static int aie2_set_state(struct amdxdna_client *client, if (!drm_dev_enter(&xdna->ddev, &idx)) return -ENODEV; + ret = amdxdna_pm_resume_get(xdna); + if (ret) + goto dev_exit; + switch (args->param) { case DRM_AMDXDNA_SET_POWER_MODE: ret = aie2_set_power_mode(client, args); @@ -984,6 +1101,8 @@ static int aie2_set_state(struct amdxdna_client *client, break; } + amdxdna_pm_suspend_put(xdna); +dev_exit: drm_dev_exit(idx); return ret; } @@ -998,6 +1117,7 @@ const struct amdxdna_dev_ops aie2_ops = { .hwctx_init = aie2_hwctx_init, .hwctx_fini = aie2_hwctx_fini, .hwctx_config = aie2_hwctx_config, + .hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo, .cmd_submit = aie2_cmd_submit, .hmm_invalidate = aie2_hmm_invalidate, .get_array = aie2_get_array, diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h index 91a8e948f82a..9793cd1e0c55 100644 --- a/drivers/accel/amdxdna/aie2_pci.h +++ b/drivers/accel/amdxdna/aie2_pci.h @@ -156,6 +156,17 @@ enum aie2_dev_status { AIE2_DEV_START, }; +struct aie2_exec_msg_ops { + int (*init_cu_req)(struct amdxdna_gem_obj *cmd_bo, void *req, + size_t *size, u32 *msg_op); + int (*init_dpu_req)(struct amdxdna_gem_obj *cmd_bo, void *req, + size_t *size, u32 *msg_op); + void (*init_chain_req)(void *req, u64 slot_addr, size_t size, u32 cmd_cnt); + int (*fill_cf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size); + int (*fill_dpu_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size); + u32 (*get_chain_msg_op)(u32 cmd_op); +}; + struct amdxdna_dev_hdl { struct amdxdna_dev *xdna; const struct amdxdna_dev_priv *priv; @@ -173,6 +184,8 @@ struct amdxdna_dev_hdl { u32 total_col; struct aie_version version; struct aie_metadata metadata; + unsigned long feature_mask; + struct aie2_exec_msg_ops *exec_msg_ops; /* power management and clock*/ enum amdxdna_power_mode_type pw_mode; @@ -182,6 +195,8 @@ struct amdxdna_dev_hdl { u32 clk_gating; u32 npuclk_freq; u32 hclk_freq; + u32 max_tops; + u32 curr_tops; /* Mailbox and the management channel */ struct mailbox *mbox; @@ -190,6 +205,8 @@ struct amdxdna_dev_hdl { enum aie2_dev_status dev_status; u32 hwctx_num; + + struct amdxdna_async_error last_async_err; }; #define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \ @@ -204,12 +221,26 @@ struct aie2_hw_ops { int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level); }; +enum aie2_fw_feature { + AIE2_NPU_COMMAND, + AIE2_FEATURE_MAX +}; + +struct aie2_fw_feature_tbl { + enum aie2_fw_feature feature; + u32 max_minor; + u32 min_minor; +}; + +#define AIE2_FEATURE_ON(ndev, feature) test_bit(feature, &(ndev)->feature_mask) + struct amdxdna_dev_priv { const char *fw_path; u64 protocol_major; u64 protocol_minor; const struct rt_config *rt_config; const struct dpm_clk_freq *dpm_clk_tbl; + const struct aie2_fw_feature_tbl *fw_feature_tbl; #define COL_ALIGN_NONE 0 #define COL_ALIGN_NATURE 1 @@ -217,6 +248,7 @@ struct amdxdna_dev_priv { u32 mbox_dev_addr; /* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */ u32 mbox_size; + u32 hwctx_limit; u32 sram_dev_addr; struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX]; struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS]; @@ -234,6 +266,7 @@ extern const struct dpm_clk_freq npu1_dpm_clk_table[]; extern const struct dpm_clk_freq npu4_dpm_clk_table[]; extern const struct rt_config npu1_default_rt_cfg[]; extern const struct rt_config npu4_default_rt_cfg[]; +extern const struct aie2_fw_feature_tbl npu4_fw_feature_table[]; /* aie2_smu.c */ int aie2_smu_init(struct amdxdna_dev_hdl *ndev); @@ -253,10 +286,12 @@ void aie2_psp_stop(struct psp_device *psp); /* aie2_error.c */ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev); void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev); -int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev); int aie2_error_async_msg_thread(void *data); +int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev, + struct amdxdna_drm_get_array *args); /* aie2_message.c */ +void aie2_msg_init(struct amdxdna_dev_hdl *ndev); int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev); int aie2_resume_fw(struct amdxdna_dev_hdl *ndev); int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value); @@ -270,9 +305,13 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx); int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size); int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled); +int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev, + char __user *buf, u32 size, + struct amdxdna_drm_query_telemetry_header *header); int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size, void *handle, int (*cb)(void*, void __iomem *, size_t)); -int aie2_config_cu(struct amdxdna_hwctx *hwctx); +int aie2_config_cu(struct amdxdna_hwctx *hwctx, + int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx, @@ -283,11 +322,14 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx, int (*notify_cb)(void *, void __iomem *, size_t)); int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, int (*notify_cb)(void *, void __iomem *, size_t)); +int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, + int (*notify_cb)(void *, void __iomem *, size_t)); /* aie2_hwctx.c */ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx); void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx); int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size); +int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl); void aie2_hwctx_suspend(struct amdxdna_client *client); int aie2_hwctx_resume(struct amdxdna_client *client); int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq); diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c index d303701b0ded..11c0e9e7b03a 100644 --- a/drivers/accel/amdxdna/aie2_smu.c +++ b/drivers/accel/amdxdna/aie2_smu.c @@ -11,6 +11,7 @@ #include "aie2_pci.h" #include "amdxdna_pci_drv.h" +#include "amdxdna_pm.h" #define SMU_RESULT_OK 1 @@ -22,6 +23,13 @@ #define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7 #define AIE2_SMU_SET_HARD_DPMLEVEL 0x8 +#define NPU4_DPM_TOPS(ndev, dpm_level) \ +({ \ + typeof(ndev) _ndev = ndev; \ + (4096 * (_ndev)->total_col * \ + (_ndev)->priv->dpm_clk_tbl[dpm_level].hclk / 1000000); \ +}) + static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd, u32 reg_arg, u32 *out) { @@ -59,12 +67,16 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) u32 freq; int ret; + ret = amdxdna_pm_resume_get(ndev->xdna); + if (ret) + return ret; + ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ, ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq); if (ret) { XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n", ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret); - return ret; + goto suspend_put; } ndev->npuclk_freq = freq; @@ -73,43 +85,62 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) if (ret) { XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n", ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret); - return ret; + goto suspend_put; } + + amdxdna_pm_suspend_put(ndev->xdna); ndev->hclk_freq = freq; ndev->dpm_level = dpm_level; + ndev->max_tops = 2 * ndev->total_col; + ndev->curr_tops = ndev->max_tops * freq / 1028; XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n", ndev->npuclk_freq, ndev->hclk_freq); return 0; + +suspend_put: + amdxdna_pm_suspend_put(ndev->xdna); + return ret; } int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level) { int ret; + ret = amdxdna_pm_resume_get(ndev->xdna); + if (ret) + return ret; + ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL); if (ret) { XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ", dpm_level, ret); - return ret; + goto suspend_put; } ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL); if (ret) { XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d", dpm_level, ret); - return ret; + goto suspend_put; } + amdxdna_pm_suspend_put(ndev->xdna); ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk; ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk; ndev->dpm_level = dpm_level; + ndev->max_tops = NPU4_DPM_TOPS(ndev, ndev->max_dpm_level); + ndev->curr_tops = NPU4_DPM_TOPS(ndev, dpm_level); XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n", ndev->npuclk_freq, ndev->hclk_freq); return 0; + +suspend_put: + amdxdna_pm_suspend_put(ndev->xdna); + return ret; } int aie2_smu_init(struct amdxdna_dev_hdl *ndev) diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c index 4bfe4ef20550..878cc955f56d 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.c +++ b/drivers/accel/amdxdna/amdxdna_ctx.c @@ -113,14 +113,14 @@ void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size) return &cmd->data[num_masks]; } -int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo) +u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo) { struct amdxdna_cmd *cmd = abo->mem.kva; u32 num_masks, i; u32 *cu_mask; if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN) - return -1; + return INVALID_CU_IDX; num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header); cu_mask = cmd->data; @@ -129,7 +129,7 @@ int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo) return ffs(cu_mask[i]) - 1; } - return -1; + return INVALID_CU_IDX; } /* @@ -161,19 +161,14 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr if (args->ext || args->ext_flags) return -EINVAL; - if (!drm_dev_enter(dev, &idx)) - return -ENODEV; - hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL); - if (!hwctx) { - ret = -ENOMEM; - goto exit; - } + if (!hwctx) + return -ENOMEM; if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) { XDNA_ERR(xdna, "Access QoS info failed"); - ret = -EFAULT; - goto free_hwctx; + kfree(hwctx); + return -EFAULT; } hwctx->client = client; @@ -181,30 +176,36 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr hwctx->num_tiles = args->num_tiles; hwctx->mem_size = args->mem_size; hwctx->max_opc = args->max_opc; + + guard(mutex)(&xdna->dev_lock); + + if (!drm_dev_enter(dev, &idx)) { + ret = -ENODEV; + goto free_hwctx; + } + + ret = xdna->dev_info->ops->hwctx_init(hwctx); + if (ret) { + XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret); + goto dev_exit; + } + + hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id); + if (!hwctx->name) { + ret = -ENOMEM; + goto fini_hwctx; + } + ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx, XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID), &client->next_hwctxid, GFP_KERNEL); if (ret < 0) { XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret); - goto free_hwctx; - } - - hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id); - if (!hwctx->name) { - ret = -ENOMEM; - goto rm_id; - } - - mutex_lock(&xdna->dev_lock); - ret = xdna->dev_info->ops->hwctx_init(hwctx); - if (ret) { - mutex_unlock(&xdna->dev_lock); - XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret); goto free_name; } + args->handle = hwctx->id; args->syncobj_handle = hwctx->syncobj_hdl; - mutex_unlock(&xdna->dev_lock); atomic64_set(&hwctx->job_submit_cnt, 0); atomic64_set(&hwctx->job_free_cnt, 0); @@ -214,12 +215,12 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr free_name: kfree(hwctx->name); -rm_id: - xa_erase(&client->hwctx_xa, hwctx->id); +fini_hwctx: + xdna->dev_info->ops->hwctx_fini(hwctx); +dev_exit: + drm_dev_exit(idx); free_hwctx: kfree(hwctx); -exit: - drm_dev_exit(idx); return ret; } @@ -327,6 +328,38 @@ unlock_srcu: return ret; } +int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl) +{ + struct amdxdna_dev *xdna = client->xdna; + struct amdxdna_hwctx *hwctx; + struct amdxdna_gem_obj *abo; + struct drm_gem_object *gobj; + int ret, idx; + + if (!xdna->dev_info->ops->hwctx_sync_debug_bo) + return -EOPNOTSUPP; + + gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl); + if (!gobj) + return -EINVAL; + + abo = to_xdna_obj(gobj); + guard(mutex)(&xdna->dev_lock); + idx = srcu_read_lock(&client->hwctx_srcu); + hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx); + if (!hwctx) { + ret = -EINVAL; + goto unlock_srcu; + } + + ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl); + +unlock_srcu: + srcu_read_unlock(&client->hwctx_srcu, idx); + drm_gem_object_put(gobj); + return ret; +} + static void amdxdna_arg_bos_put(struct amdxdna_sched_job *job) { @@ -392,6 +425,7 @@ void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job) } int amdxdna_cmd_submit(struct amdxdna_client *client, + struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt, u32 hwctx_hdl, u64 *seq) { @@ -405,6 +439,8 @@ int amdxdna_cmd_submit(struct amdxdna_client *client, if (!job) return -ENOMEM; + job->drv_cmd = drv_cmd; + if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) { job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD); if (!job->cmd_bo) { @@ -412,8 +448,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client, ret = -EINVAL; goto free_job; } - } else { - job->cmd_bo = NULL; } ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt); @@ -431,11 +465,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client, goto unlock_srcu; } - if (hwctx->status != HWCTX_STAT_READY) { - XDNA_ERR(xdna, "HW Context is not ready"); - ret = -EINVAL; - goto unlock_srcu; - } job->hwctx = hwctx; job->mm = current->mm; @@ -512,7 +541,7 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client, } } - ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls, + ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls, args->arg_count, args->hwctx, &args->seq); if (ret) XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret); diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h index 7cd7a55936f0..d02fb32499fa 100644 --- a/drivers/accel/amdxdna/amdxdna_ctx.h +++ b/drivers/accel/amdxdna/amdxdna_ctx.h @@ -13,9 +13,10 @@ struct amdxdna_hwctx_priv; enum ert_cmd_opcode { - ERT_START_CU = 0, - ERT_CMD_CHAIN = 19, - ERT_START_NPU = 20, + ERT_START_CU = 0, + ERT_CMD_CHAIN = 19, + ERT_START_NPU = 20, + ERT_INVALID_CMD = ~0U, }; enum ert_cmd_state { @@ -64,6 +65,8 @@ struct amdxdna_cmd { u32 data[]; }; +#define INVALID_CU_IDX (~0U) + struct amdxdna_hwctx { struct amdxdna_client *client; struct amdxdna_hwctx_priv *priv; @@ -95,6 +98,17 @@ struct amdxdna_hwctx { #define drm_job_to_xdna_job(j) \ container_of(j, struct amdxdna_sched_job, base) +enum amdxdna_job_opcode { + SYNC_DEBUG_BO, + ATTACH_DEBUG_BO, + DETACH_DEBUG_BO, +}; + +struct amdxdna_drv_cmd { + enum amdxdna_job_opcode opcode; + u32 result; +}; + struct amdxdna_sched_job { struct drm_sched_job base; struct kref refcnt; @@ -105,7 +119,9 @@ struct amdxdna_sched_job { /* user can wait on this fence */ struct dma_fence *out_fence; bool job_done; + bool job_timeout; u64 seq; + struct amdxdna_drv_cmd *drv_cmd; struct amdxdna_gem_obj *cmd_bo; size_t bo_cnt; struct drm_gem_object *bos[] __counted_by(bo_cnt); @@ -137,15 +153,17 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo) } void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size); -int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo); +u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo); void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job); void amdxdna_hwctx_remove_all(struct amdxdna_client *client); int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg, int (*walk)(struct amdxdna_hwctx *hwctx, void *arg)); +int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl); int amdxdna_cmd_submit(struct amdxdna_client *client, - u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt, + struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdls, + u32 *arg_bo_hdls, u32 arg_bo_cnt, u32 hwctx_hdl, u64 *seq); int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl, diff --git a/drivers/accel/amdxdna/amdxdna_error.h b/drivers/accel/amdxdna/amdxdna_error.h new file mode 100644 index 000000000000..c51de86ec12b --- /dev/null +++ b/drivers/accel/amdxdna/amdxdna_error.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2025, Advanced Micro Devices, Inc. + */ + +#ifndef _AMDXDNA_ERROR_H_ +#define _AMDXDNA_ERROR_H_ + +#include +#include + +#define AMDXDNA_ERR_DRV_AIE 4 +#define AMDXDNA_ERR_SEV_CRITICAL 3 +#define AMDXDNA_ERR_CLASS_AIE 2 + +#define AMDXDNA_ERR_NUM_MASK GENMASK_U64(15, 0) +#define AMDXDNA_ERR_DRV_MASK GENMASK_U64(23, 16) +#define AMDXDNA_ERR_SEV_MASK GENMASK_U64(31, 24) +#define AMDXDNA_ERR_MOD_MASK GENMASK_U64(39, 32) +#define AMDXDNA_ERR_CLASS_MASK GENMASK_U64(47, 40) + +enum amdxdna_error_num { + AMDXDNA_ERROR_NUM_AIE_SATURATION = 3, + AMDXDNA_ERROR_NUM_AIE_FP, + AMDXDNA_ERROR_NUM_AIE_STREAM, + AMDXDNA_ERROR_NUM_AIE_ACCESS, + AMDXDNA_ERROR_NUM_AIE_BUS, + AMDXDNA_ERROR_NUM_AIE_INSTRUCTION, + AMDXDNA_ERROR_NUM_AIE_ECC, + AMDXDNA_ERROR_NUM_AIE_LOCK, + AMDXDNA_ERROR_NUM_AIE_DMA, + AMDXDNA_ERROR_NUM_AIE_MEM_PARITY, + AMDXDNA_ERROR_NUM_UNKNOWN = 15, +}; + +enum amdxdna_error_module { + AMDXDNA_ERROR_MODULE_AIE_CORE = 3, + AMDXDNA_ERROR_MODULE_AIE_MEMORY, + AMDXDNA_ERROR_MODULE_AIE_SHIM, + AMDXDNA_ERROR_MODULE_AIE_NOC, + AMDXDNA_ERROR_MODULE_AIE_PL, + AMDXDNA_ERROR_MODULE_UNKNOWN = 8, +}; + +#define AMDXDNA_ERROR_ENCODE(err_num, err_mod) \ + (FIELD_PREP(AMDXDNA_ERR_NUM_MASK, err_num) | \ + FIELD_PREP_CONST(AMDXDNA_ERR_DRV_MASK, AMDXDNA_ERR_DRV_AIE) | \ + FIELD_PREP_CONST(AMDXDNA_ERR_SEV_MASK, AMDXDNA_ERR_SEV_CRITICAL) | \ + FIELD_PREP(AMDXDNA_ERR_MOD_MASK, err_mod) | \ + FIELD_PREP_CONST(AMDXDNA_ERR_CLASS_MASK, AMDXDNA_ERR_CLASS_AIE)) + +#define AMDXDNA_EXTRA_ERR_COL_MASK GENMASK_U64(7, 0) +#define AMDXDNA_EXTRA_ERR_ROW_MASK GENMASK_U64(15, 8) + +#define AMDXDNA_EXTRA_ERR_ENCODE(row, col) \ + (FIELD_PREP(AMDXDNA_EXTRA_ERR_COL_MASK, col) | \ + FIELD_PREP(AMDXDNA_EXTRA_ERR_ROW_MASK, row)) + +#endif /* _AMDXDNA_ERROR_H_ */ diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c index d407a36eb412..dfa916eeb2d9 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.c +++ b/drivers/accel/amdxdna/amdxdna_gem.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -392,35 +393,33 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = { .vunmap = drm_gem_dmabuf_vunmap, }; -static int amdxdna_gem_obj_vmap(struct drm_gem_object *obj, struct iosys_map *map) +static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr) { - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); - - iosys_map_clear(map); - - dma_resv_assert_held(obj->resv); + struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); + int ret; if (is_import_bo(abo)) - dma_buf_vmap(abo->dma_buf, map); + ret = dma_buf_vmap_unlocked(abo->dma_buf, &map); else - drm_gem_shmem_object_vmap(obj, map); + ret = drm_gem_vmap(to_gobj(abo), &map); - if (!map->vaddr) - return -ENOMEM; - - return 0; + *vaddr = map.vaddr; + return ret; } -static void amdxdna_gem_obj_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo) { - struct amdxdna_gem_obj *abo = to_xdna_obj(obj); + struct iosys_map map; - dma_resv_assert_held(obj->resv); + if (!abo->mem.kva) + return; + + iosys_map_set_vaddr(&map, abo->mem.kva); if (is_import_bo(abo)) - dma_buf_vunmap(abo->dma_buf, map); + dma_buf_vunmap_unlocked(abo->dma_buf, &map); else - drm_gem_shmem_object_vunmap(obj, map); + drm_gem_vunmap(to_gobj(abo), &map); } static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags) @@ -455,7 +454,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) { struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev); struct amdxdna_gem_obj *abo = to_xdna_obj(gobj); - struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva); XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr); @@ -468,7 +466,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj) if (abo->type == AMDXDNA_BO_DEV_HEAP) drm_mm_takedown(&abo->mm); - drm_gem_vunmap(gobj, &map); + amdxdna_gem_obj_vunmap(abo); mutex_destroy(&abo->lock); if (is_import_bo(abo)) { @@ -489,8 +487,8 @@ static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = { .pin = drm_gem_shmem_object_pin, .unpin = drm_gem_shmem_object_unpin, .get_sg_table = drm_gem_shmem_object_get_sg_table, - .vmap = amdxdna_gem_obj_vmap, - .vunmap = amdxdna_gem_obj_vunmap, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, .mmap = amdxdna_gem_obj_mmap, .vm_ops = &drm_gem_shmem_vm_ops, .export = amdxdna_gem_prime_export, @@ -663,7 +661,6 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, struct drm_file *filp) { struct amdxdna_client *client = filp->driver_priv; - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); struct amdxdna_gem_obj *abo; int ret; @@ -692,12 +689,11 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev, abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base; drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size); - ret = drm_gem_vmap(to_gobj(abo), &map); + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); if (ret) { XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret); goto release_obj; } - abo->mem.kva = map.vaddr; client->dev_heap = abo; drm_gem_object_get(to_gobj(abo)); @@ -748,7 +744,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, struct amdxdna_drm_create_bo *args, struct drm_file *filp) { - struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL); struct amdxdna_dev *xdna = to_xdna_dev(dev); struct amdxdna_gem_obj *abo; int ret; @@ -770,12 +765,11 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev, abo->type = AMDXDNA_BO_CMD; abo->client = filp->driver_priv; - ret = drm_gem_vmap(to_gobj(abo), &map); + ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva); if (ret) { XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret); goto release_obj; } - abo->mem.kva = map.vaddr; return abo; @@ -969,6 +963,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev, XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n", args->handle, args->offset, args->size); + if (args->direction == SYNC_DIRECT_FROM_DEVICE) + ret = amdxdna_hwctx_sync_debug_bo(abo->client, args->handle); + put_obj: drm_gem_object_put(gobj); return ret; diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h index ae29db94a9d3..f79fc7f3c93b 100644 --- a/drivers/accel/amdxdna/amdxdna_gem.h +++ b/drivers/accel/amdxdna/amdxdna_gem.h @@ -7,6 +7,7 @@ #define _AMDXDNA_GEM_H_ #include +#include "amdxdna_pci_drv.h" struct amdxdna_umap { struct vm_area_struct *vma; @@ -62,6 +63,11 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo) drm_gem_object_put(to_gobj(abo)); } +static inline u64 amdxdna_dev_bo_offset(struct amdxdna_gem_obj *abo) +{ + return abo->mem.dev_addr - abo->client->dev_heap->mem.dev_addr; +} + void amdxdna_umap_put(struct amdxdna_umap *mapp); struct drm_gem_object * diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c index da1ac89bb78f..24258dcc18eb 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox.c +++ b/drivers/accel/amdxdna/amdxdna_mailbox.c @@ -194,7 +194,8 @@ static void mailbox_release_msg(struct mailbox_channel *mb_chann, { MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x", mb_msg->pkg.header.id, mb_msg->pkg.header.opcode); - mb_msg->notify_cb(mb_msg->handle, NULL, 0); + if (mb_msg->notify_cb) + mb_msg->notify_cb(mb_msg->handle, NULL, 0); kfree(mb_msg); } @@ -248,7 +249,7 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade { struct mailbox_msg *mb_msg; int msg_id; - int ret; + int ret = 0; msg_id = header->id; if (!mailbox_validate_msgid(msg_id)) { @@ -265,9 +266,11 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x", header->opcode, header->total_size, header->id); - ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size); - if (unlikely(ret)) - MB_ERR(mb_chann, "Message callback ret %d", ret); + if (mb_msg->notify_cb) { + ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size); + if (unlikely(ret)) + MB_ERR(mb_chann, "Message callback ret %d", ret); + } kfree(mb_msg); return ret; diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h index 710ff8873d61..556c712cad0a 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h +++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h @@ -16,16 +16,18 @@ struct xdna_notify { u32 *data; size_t size; int error; + u32 *status; }; -#define DECLARE_XDNA_MSG_COMMON(name, op, status) \ +#define DECLARE_XDNA_MSG_COMMON(name, op, s) \ struct name##_req req = { 0 }; \ - struct name##_resp resp = { status }; \ + struct name##_resp resp = { .status = s }; \ struct xdna_notify hdl = { \ .error = 0, \ .data = (u32 *)&resp, \ .size = sizeof(resp), \ .comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \ + .status = (u32 *)&resp.status, \ }; \ struct xdna_mailbox_msg msg = { \ .send_data = (u8 *)&req, \ diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c index 569cd703729d..7590265d4485 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.c +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c @@ -13,13 +13,11 @@ #include #include #include -#include #include "amdxdna_ctx.h" #include "amdxdna_gem.h" #include "amdxdna_pci_drv.h" - -#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */ +#include "amdxdna_pm.h" MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin"); MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin"); @@ -29,9 +27,13 @@ MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin"); /* * 0.0: Initial version * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY + * 0.2: Support getting last error hardware error + * 0.3: Support firmware debug buffer + * 0.4: Support getting resource information + * 0.5: Support getting telemetry data */ #define AMDXDNA_DRIVER_MAJOR 0 -#define AMDXDNA_DRIVER_MINOR 1 +#define AMDXDNA_DRIVER_MINOR 5 /* * Bind the driver base on (vendor_id, device_id) pair and later use the @@ -61,17 +63,9 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp) struct amdxdna_client *client; int ret; - ret = pm_runtime_resume_and_get(ddev->dev); - if (ret) { - XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret); - return ret; - } - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) { - ret = -ENOMEM; - goto put_rpm; - } + if (!client) + return -ENOMEM; client->pid = pid_nr(rcu_access_pointer(filp->pid)); client->xdna = xdna; @@ -106,9 +100,6 @@ unbind_sva: iommu_sva_unbind_device(client->sva); failed: kfree(client); -put_rpm: - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); return ret; } @@ -130,8 +121,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp) XDNA_DBG(xdna, "pid %d closed", client->pid); kfree(client); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); } static int amdxdna_flush(struct file *f, fl_owner_t id) @@ -310,19 +299,12 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto failed_dev_fini; } - pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(dev); - pm_runtime_allow(dev); - ret = drm_dev_register(&xdna->ddev, 0); if (ret) { XDNA_ERR(xdna, "DRM register failed, ret %d", ret); - pm_runtime_forbid(dev); goto failed_sysfs_fini; } - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); return 0; failed_sysfs_fini: @@ -339,14 +321,10 @@ destroy_notifier_wq: static void amdxdna_remove(struct pci_dev *pdev) { struct amdxdna_dev *xdna = pci_get_drvdata(pdev); - struct device *dev = &pdev->dev; struct amdxdna_client *client; destroy_workqueue(xdna->notifier_wq); - pm_runtime_get_noresume(dev); - pm_runtime_forbid(dev); - drm_dev_unplug(&xdna->ddev); amdxdna_sysfs_fini(xdna); @@ -365,29 +343,9 @@ static void amdxdna_remove(struct pci_dev *pdev) mutex_unlock(&xdna->dev_lock); } -static int amdxdna_pmops_suspend(struct device *dev) -{ - struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); - - if (!xdna->dev_info->ops->suspend) - return -EOPNOTSUPP; - - return xdna->dev_info->ops->suspend(xdna); -} - -static int amdxdna_pmops_resume(struct device *dev) -{ - struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev)); - - if (!xdna->dev_info->ops->resume) - return -EOPNOTSUPP; - - return xdna->dev_info->ops->resume(xdna); -} - static const struct dev_pm_ops amdxdna_pm_ops = { - SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume) - RUNTIME_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume, NULL) + SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume) + RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL) }; static struct pci_driver amdxdna_pci_driver = { diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h index 72d6696d49da..c99477f5e454 100644 --- a/drivers/accel/amdxdna/amdxdna_pci_drv.h +++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h @@ -6,6 +6,7 @@ #ifndef _AMDXDNA_PCI_DRV_H_ #define _AMDXDNA_PCI_DRV_H_ +#include #include #include @@ -54,6 +55,7 @@ struct amdxdna_dev_ops { int (*hwctx_init)(struct amdxdna_hwctx *hwctx); void (*hwctx_fini)(struct amdxdna_hwctx *hwctx); int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size); + int (*hwctx_sync_debug_bo)(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl); void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq); int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq); int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args); @@ -99,6 +101,7 @@ struct amdxdna_dev { struct amdxdna_fw_ver fw_ver; struct rw_semaphore notifier_lock; /* for mmu notifier*/ struct workqueue_struct *notifier_wq; + bool rpm_on; }; /* diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c new file mode 100644 index 000000000000..fa38e65d617c --- /dev/null +++ b/drivers/accel/amdxdna/amdxdna_pm.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025, Advanced Micro Devices, Inc. + */ + +#include +#include +#include + +#include "amdxdna_pm.h" + +#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */ + +int amdxdna_pm_suspend(struct device *dev) +{ + struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); + int ret = -EOPNOTSUPP; + bool rpm; + + if (xdna->dev_info->ops->suspend) { + rpm = xdna->rpm_on; + xdna->rpm_on = false; + ret = xdna->dev_info->ops->suspend(xdna); + xdna->rpm_on = rpm; + } + + XDNA_DBG(xdna, "Suspend done ret %d", ret); + return ret; +} + +int amdxdna_pm_resume(struct device *dev) +{ + struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); + int ret = -EOPNOTSUPP; + bool rpm; + + if (xdna->dev_info->ops->resume) { + rpm = xdna->rpm_on; + xdna->rpm_on = false; + ret = xdna->dev_info->ops->resume(xdna); + xdna->rpm_on = rpm; + } + + XDNA_DBG(xdna, "Resume done ret %d", ret); + return ret; +} + +int amdxdna_pm_resume_get(struct amdxdna_dev *xdna) +{ + struct device *dev = xdna->ddev.dev; + int ret; + + if (!xdna->rpm_on) + return 0; + + ret = pm_runtime_resume_and_get(dev); + if (ret) { + XDNA_ERR(xdna, "Resume failed: %d", ret); + pm_runtime_set_suspended(dev); + } + + return ret; +} + +void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna) +{ + struct device *dev = xdna->ddev.dev; + + if (!xdna->rpm_on) + return; + + pm_runtime_put_autosuspend(dev); +} + +void amdxdna_pm_init(struct amdxdna_dev *xdna) +{ + struct device *dev = xdna->ddev.dev; + + pm_runtime_set_active(dev); + pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_allow(dev); + pm_runtime_put_autosuspend(dev); + xdna->rpm_on = true; +} + +void amdxdna_pm_fini(struct amdxdna_dev *xdna) +{ + struct device *dev = xdna->ddev.dev; + + xdna->rpm_on = false; + pm_runtime_get_noresume(dev); + pm_runtime_forbid(dev); +} diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h new file mode 100644 index 000000000000..77b2d6e45570 --- /dev/null +++ b/drivers/accel/amdxdna/amdxdna_pm.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2025, Advanced Micro Devices, Inc. + */ + +#ifndef _AMDXDNA_PM_H_ +#define _AMDXDNA_PM_H_ + +#include "amdxdna_pci_drv.h" + +int amdxdna_pm_suspend(struct device *dev); +int amdxdna_pm_resume(struct device *dev); +int amdxdna_pm_resume_get(struct amdxdna_dev *xdna); +void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna); +void amdxdna_pm_init(struct amdxdna_dev *xdna); +void amdxdna_pm_fini(struct amdxdna_dev *xdna); + +#endif /* _AMDXDNA_PM_H_ */ diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c index e4f6dac7d00f..ec407f3b48fc 100644 --- a/drivers/accel/amdxdna/npu1_regs.c +++ b/drivers/accel/amdxdna/npu1_regs.c @@ -46,6 +46,7 @@ const struct rt_config npu1_default_rt_cfg[] = { { 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */ + { 4, 1, AIE2_RT_CFG_INIT }, /* Debug BO */ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ { 0 }, }; @@ -62,16 +63,23 @@ const struct dpm_clk_freq npu1_dpm_clk_table[] = { { 0 } }; +static const struct aie2_fw_feature_tbl npu1_fw_feature_table[] = { + { .feature = AIE2_NPU_COMMAND, .min_minor = 8 }, + { 0 } +}; + static const struct amdxdna_dev_priv npu1_dev_priv = { .fw_path = "amdnpu/1502_00/npu.sbin", .protocol_major = 0x5, .protocol_minor = 0x7, .rt_config = npu1_default_rt_cfg, .dpm_clk_tbl = npu1_dpm_clk_table, + .fw_feature_tbl = npu1_fw_feature_table, .col_align = COL_ALIGN_NONE, .mbox_dev_addr = NPU1_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU1_SRAM_BAR_BASE, + .hwctx_limit = 6, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15), diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c index a081cac75ee0..86f87d0d1354 100644 --- a/drivers/accel/amdxdna/npu2_regs.c +++ b/drivers/accel/amdxdna/npu2_regs.c @@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu2_dev_priv = { .protocol_minor = 0x6, .rt_config = npu4_default_rt_cfg, .dpm_clk_tbl = npu4_dpm_clk_table, + .fw_feature_tbl = npu4_fw_feature_table, .col_align = COL_ALIGN_NATURE, .mbox_dev_addr = NPU2_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU2_SRAM_BAR_BASE, + .hwctx_limit = 16, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15), diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c index 9f2e33182ec6..d90777275a9f 100644 --- a/drivers/accel/amdxdna/npu4_regs.c +++ b/drivers/accel/amdxdna/npu4_regs.c @@ -63,6 +63,7 @@ const struct rt_config npu4_default_rt_cfg[] = { { 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */ + { 10, 1, AIE2_RT_CFG_INIT }, /* DEBUG BUF */ { 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ { 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ { 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */ @@ -82,16 +83,23 @@ const struct dpm_clk_freq npu4_dpm_clk_table[] = { { 0 } }; +const struct aie2_fw_feature_tbl npu4_fw_feature_table[] = { + { .feature = AIE2_NPU_COMMAND, .min_minor = 15 }, + { 0 } +}; + static const struct amdxdna_dev_priv npu4_dev_priv = { .fw_path = "amdnpu/17f0_10/npu.sbin", .protocol_major = 0x6, .protocol_minor = 12, .rt_config = npu4_default_rt_cfg, .dpm_clk_tbl = npu4_dpm_clk_table, + .fw_feature_tbl = npu4_fw_feature_table, .col_align = COL_ALIGN_NATURE, .mbox_dev_addr = NPU4_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU4_SRAM_BAR_BASE, + .hwctx_limit = 16, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15), diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c index 5f1cf83461c4..75ad97f0b937 100644 --- a/drivers/accel/amdxdna/npu5_regs.c +++ b/drivers/accel/amdxdna/npu5_regs.c @@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu5_dev_priv = { .protocol_minor = 12, .rt_config = npu4_default_rt_cfg, .dpm_clk_tbl = npu4_dpm_clk_table, + .fw_feature_tbl = npu4_fw_feature_table, .col_align = COL_ALIGN_NATURE, .mbox_dev_addr = NPU5_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU5_SRAM_BAR_BASE, + .hwctx_limit = 16, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15), diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c index 94a7005685a7..758dc013fe13 100644 --- a/drivers/accel/amdxdna/npu6_regs.c +++ b/drivers/accel/amdxdna/npu6_regs.c @@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu6_dev_priv = { .protocol_minor = 12, .rt_config = npu4_default_rt_cfg, .dpm_clk_tbl = npu4_dpm_clk_table, + .fw_feature_tbl = npu4_fw_feature_table, .col_align = COL_ALIGN_NATURE, .mbox_dev_addr = NPU6_MBOX_BAR_BASE, .mbox_size = 0, /* Use BAR size */ .sram_dev_addr = NPU6_SRAM_BAR_BASE, + .hwctx_limit = 16, .sram_offs = { DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0), DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15), diff --git a/drivers/accel/ethosu/Kconfig b/drivers/accel/ethosu/Kconfig new file mode 100644 index 000000000000..d25f9b3eb317 --- /dev/null +++ b/drivers/accel/ethosu/Kconfig @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_ACCEL_ARM_ETHOSU + tristate "Arm Ethos-U65/U85 NPU" + depends on HAS_IOMEM + depends on DRM_ACCEL + select DRM_GEM_DMA_HELPER + select DRM_SCHED + select GENERIC_ALLOCATOR + help + Enables driver for Arm Ethos-U65/U85 NPUs diff --git a/drivers/accel/ethosu/Makefile b/drivers/accel/ethosu/Makefile new file mode 100644 index 000000000000..17db5a600416 --- /dev/null +++ b/drivers/accel/ethosu/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) := ethosu.o +ethosu-y += ethosu_drv.o ethosu_gem.o ethosu_job.o diff --git a/drivers/accel/ethosu/ethosu_device.h b/drivers/accel/ethosu/ethosu_device.h new file mode 100644 index 000000000000..b189fa783d6a --- /dev/null +++ b/drivers/accel/ethosu/ethosu_device.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0-only or MIT */ +/* Copyright 2025 Arm, Ltd. */ + +#ifndef __ETHOSU_DEVICE_H__ +#define __ETHOSU_DEVICE_H__ + +#include +#include +#include + +#include +#include + +#include + +struct clk; +struct gen_pool; + +#define NPU_REG_ID 0x0000 +#define NPU_REG_STATUS 0x0004 +#define NPU_REG_CMD 0x0008 +#define NPU_REG_RESET 0x000c +#define NPU_REG_QBASE 0x0010 +#define NPU_REG_QBASE_HI 0x0014 +#define NPU_REG_QREAD 0x0018 +#define NPU_REG_QCONFIG 0x001c +#define NPU_REG_QSIZE 0x0020 +#define NPU_REG_PROT 0x0024 +#define NPU_REG_CONFIG 0x0028 +#define NPU_REG_REGIONCFG 0x003c +#define NPU_REG_AXILIMIT0 0x0040 // U65 +#define NPU_REG_AXILIMIT1 0x0044 // U65 +#define NPU_REG_AXILIMIT2 0x0048 // U65 +#define NPU_REG_AXILIMIT3 0x004c // U65 +#define NPU_REG_MEM_ATTR0 0x0040 // U85 +#define NPU_REG_MEM_ATTR1 0x0044 // U85 +#define NPU_REG_MEM_ATTR2 0x0048 // U85 +#define NPU_REG_MEM_ATTR3 0x004c // U85 +#define NPU_REG_AXI_SRAM 0x0050 // U85 +#define NPU_REG_AXI_EXT 0x0054 // U85 + +#define NPU_REG_BASEP(x) (0x0080 + (x) * 8) +#define NPU_REG_BASEP_HI(x) (0x0084 + (x) * 8) +#define NPU_BASEP_REGION_MAX 8 + +#define ID_ARCH_MAJOR_MASK GENMASK(31, 28) +#define ID_ARCH_MINOR_MASK GENMASK(27, 20) +#define ID_ARCH_PATCH_MASK GENMASK(19, 16) +#define ID_VER_MAJOR_MASK GENMASK(11, 8) +#define ID_VER_MINOR_MASK GENMASK(7, 4) + +#define CONFIG_MACS_PER_CC_MASK GENMASK(3, 0) +#define CONFIG_CMD_STREAM_VER_MASK GENMASK(7, 4) + +#define STATUS_STATE_RUNNING BIT(0) +#define STATUS_IRQ_RAISED BIT(1) +#define STATUS_BUS_STATUS BIT(2) +#define STATUS_RESET_STATUS BIT(3) +#define STATUS_CMD_PARSE_ERR BIT(4) +#define STATUS_CMD_END_REACHED BIT(5) + +#define CMD_CLEAR_IRQ BIT(1) +#define CMD_TRANSITION_TO_RUN BIT(0) + +#define RESET_PENDING_CSL BIT(1) +#define RESET_PENDING_CPL BIT(0) + +#define PROT_ACTIVE_CSL BIT(1) + +enum ethosu_cmds { + NPU_OP_CONV = 0x2, + NPU_OP_DEPTHWISE = 0x3, + NPU_OP_POOL = 0x5, + NPU_OP_ELEMENTWISE = 0x6, + NPU_OP_RESIZE = 0x7, // U85 only + NPU_OP_DMA_START = 0x10, + NPU_SET_IFM_PAD_TOP = 0x100, + NPU_SET_IFM_PAD_LEFT = 0x101, + NPU_SET_IFM_PAD_RIGHT = 0x102, + NPU_SET_IFM_PAD_BOTTOM = 0x103, + NPU_SET_IFM_DEPTH_M1 = 0x104, + NPU_SET_IFM_PRECISION = 0x105, + NPU_SET_IFM_BROADCAST = 0x108, + NPU_SET_IFM_WIDTH0_M1 = 0x10a, + NPU_SET_IFM_HEIGHT0_M1 = 0x10b, + NPU_SET_IFM_HEIGHT1_M1 = 0x10c, + NPU_SET_IFM_REGION = 0x10f, + NPU_SET_OFM_WIDTH_M1 = 0x111, + NPU_SET_OFM_HEIGHT_M1 = 0x112, + NPU_SET_OFM_DEPTH_M1 = 0x113, + NPU_SET_OFM_PRECISION = 0x114, + NPU_SET_OFM_WIDTH0_M1 = 0x11a, + NPU_SET_OFM_HEIGHT0_M1 = 0x11b, + NPU_SET_OFM_HEIGHT1_M1 = 0x11c, + NPU_SET_OFM_REGION = 0x11f, + NPU_SET_KERNEL_WIDTH_M1 = 0x120, + NPU_SET_KERNEL_HEIGHT_M1 = 0x121, + NPU_SET_KERNEL_STRIDE = 0x122, + NPU_SET_WEIGHT_REGION = 0x128, + NPU_SET_SCALE_REGION = 0x129, + NPU_SET_DMA0_SRC_REGION = 0x130, + NPU_SET_DMA0_DST_REGION = 0x131, + NPU_SET_DMA0_SIZE0 = 0x132, + NPU_SET_DMA0_SIZE1 = 0x133, + NPU_SET_IFM2_BROADCAST = 0x180, + NPU_SET_IFM2_PRECISION = 0x185, + NPU_SET_IFM2_WIDTH0_M1 = 0x18a, + NPU_SET_IFM2_HEIGHT0_M1 = 0x18b, + NPU_SET_IFM2_HEIGHT1_M1 = 0x18c, + NPU_SET_IFM2_REGION = 0x18f, + NPU_SET_IFM_BASE0 = 0x4000, + NPU_SET_IFM_BASE1 = 0x4001, + NPU_SET_IFM_BASE2 = 0x4002, + NPU_SET_IFM_BASE3 = 0x4003, + NPU_SET_IFM_STRIDE_X = 0x4004, + NPU_SET_IFM_STRIDE_Y = 0x4005, + NPU_SET_IFM_STRIDE_C = 0x4006, + NPU_SET_OFM_BASE0 = 0x4010, + NPU_SET_OFM_BASE1 = 0x4011, + NPU_SET_OFM_BASE2 = 0x4012, + NPU_SET_OFM_BASE3 = 0x4013, + NPU_SET_OFM_STRIDE_X = 0x4014, + NPU_SET_OFM_STRIDE_Y = 0x4015, + NPU_SET_OFM_STRIDE_C = 0x4016, + NPU_SET_WEIGHT_BASE = 0x4020, + NPU_SET_WEIGHT_LENGTH = 0x4021, + NPU_SET_SCALE_BASE = 0x4022, + NPU_SET_SCALE_LENGTH = 0x4023, + NPU_SET_DMA0_SRC = 0x4030, + NPU_SET_DMA0_DST = 0x4031, + NPU_SET_DMA0_LEN = 0x4032, + NPU_SET_DMA0_SRC_STRIDE0 = 0x4033, + NPU_SET_DMA0_SRC_STRIDE1 = 0x4034, + NPU_SET_DMA0_DST_STRIDE0 = 0x4035, + NPU_SET_DMA0_DST_STRIDE1 = 0x4036, + NPU_SET_IFM2_BASE0 = 0x4080, + NPU_SET_IFM2_BASE1 = 0x4081, + NPU_SET_IFM2_BASE2 = 0x4082, + NPU_SET_IFM2_BASE3 = 0x4083, + NPU_SET_IFM2_STRIDE_X = 0x4084, + NPU_SET_IFM2_STRIDE_Y = 0x4085, + NPU_SET_IFM2_STRIDE_C = 0x4086, + NPU_SET_WEIGHT1_BASE = 0x4090, + NPU_SET_WEIGHT1_LENGTH = 0x4091, + NPU_SET_SCALE1_BASE = 0x4092, + NPU_SET_WEIGHT2_BASE = 0x4092, + NPU_SET_SCALE1_LENGTH = 0x4093, + NPU_SET_WEIGHT2_LENGTH = 0x4093, + NPU_SET_WEIGHT3_BASE = 0x4094, + NPU_SET_WEIGHT3_LENGTH = 0x4095, +}; + +#define ETHOSU_SRAM_REGION 2 /* Matching Vela compiler */ + +/** + * struct ethosu_device - Ethosu device + */ +struct ethosu_device { + /** @base: Base drm_device. */ + struct drm_device base; + + /** @iomem: CPU mapping of the registers. */ + void __iomem *regs; + + void __iomem *sram; + struct gen_pool *srampool; + dma_addr_t sramphys; + + struct clk_bulk_data *clks; + int num_clks; + int irq; + + struct drm_ethosu_npu_info npu_info; + + struct ethosu_job *in_flight_job; + /* For in_flight_job and ethosu_job_hw_submit() */ + struct mutex job_lock; + + /* For dma_fence */ + spinlock_t fence_lock; + + struct drm_gpu_scheduler sched; + /* For ethosu_job_do_push() */ + struct mutex sched_lock; + u64 fence_context; + u64 emit_seqno; +}; + +#define to_ethosu_device(drm_dev) \ + ((struct ethosu_device *)container_of(drm_dev, struct ethosu_device, base)) + +static inline bool ethosu_is_u65(const struct ethosu_device *ethosudev) +{ + return FIELD_GET(ID_ARCH_MAJOR_MASK, ethosudev->npu_info.id) == 1; +} + +#endif diff --git a/drivers/accel/ethosu/ethosu_drv.c b/drivers/accel/ethosu/ethosu_drv.c new file mode 100644 index 000000000000..e05a69bf5574 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_drv.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0-only or MIT +// Copyright (C) 2025 Arm, Ltd. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ethosu_drv.h" +#include "ethosu_device.h" +#include "ethosu_gem.h" +#include "ethosu_job.h" + +static int ethosu_ioctl_dev_query(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct ethosu_device *ethosudev = to_ethosu_device(ddev); + struct drm_ethosu_dev_query *args = data; + + if (!args->pointer) { + switch (args->type) { + case DRM_ETHOSU_DEV_QUERY_NPU_INFO: + args->size = sizeof(ethosudev->npu_info); + return 0; + default: + return -EINVAL; + } + } + + switch (args->type) { + case DRM_ETHOSU_DEV_QUERY_NPU_INFO: + if (args->size < offsetofend(struct drm_ethosu_npu_info, sram_size)) + return -EINVAL; + return copy_struct_to_user(u64_to_user_ptr(args->pointer), + args->size, + ðosudev->npu_info, + sizeof(ethosudev->npu_info), NULL); + default: + return -EINVAL; + } +} + +#define ETHOSU_BO_FLAGS DRM_ETHOSU_BO_NO_MMAP + +static int ethosu_ioctl_bo_create(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_ethosu_bo_create *args = data; + int cookie, ret; + + if (!drm_dev_enter(ddev, &cookie)) + return -ENODEV; + + if (!args->size || (args->flags & ~ETHOSU_BO_FLAGS)) { + ret = -EINVAL; + goto out_dev_exit; + } + + ret = ethosu_gem_create_with_handle(file, ddev, &args->size, + args->flags, &args->handle); + +out_dev_exit: + drm_dev_exit(cookie); + return ret; +} + +static int ethosu_ioctl_bo_wait(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_ethosu_bo_wait *args = data; + int cookie, ret; + unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns); + + if (args->pad) + return -EINVAL; + + if (!drm_dev_enter(ddev, &cookie)) + return -ENODEV; + + ret = drm_gem_dma_resv_wait(file, args->handle, true, timeout); + + drm_dev_exit(cookie); + return ret; +} + +static int ethosu_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_ethosu_bo_mmap_offset *args = data; + struct drm_gem_object *obj; + + if (args->pad) + return -EINVAL; + + obj = drm_gem_object_lookup(file, args->handle); + if (!obj) + return -ENOENT; + + args->offset = drm_vma_node_offset_addr(&obj->vma_node); + drm_gem_object_put(obj); + return 0; +} + +static int ethosu_ioctl_cmdstream_bo_create(struct drm_device *ddev, void *data, + struct drm_file *file) +{ + struct drm_ethosu_cmdstream_bo_create *args = data; + int cookie, ret; + + if (!drm_dev_enter(ddev, &cookie)) + return -ENODEV; + + if (!args->size || !args->data || args->pad || args->flags) { + ret = -EINVAL; + goto out_dev_exit; + } + + args->flags |= DRM_ETHOSU_BO_NO_MMAP; + + ret = ethosu_gem_cmdstream_create(file, ddev, args->size, args->data, + args->flags, &args->handle); + +out_dev_exit: + drm_dev_exit(cookie); + return ret; +} + +static int ethosu_open(struct drm_device *ddev, struct drm_file *file) +{ + int ret = 0; + + if (!try_module_get(THIS_MODULE)) + return -EINVAL; + + struct ethosu_file_priv __free(kfree) *priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err_put_mod; + } + priv->edev = to_ethosu_device(ddev); + + ret = ethosu_job_open(priv); + if (ret) + goto err_put_mod; + + file->driver_priv = no_free_ptr(priv); + return 0; + +err_put_mod: + module_put(THIS_MODULE); + return ret; +} + +static void ethosu_postclose(struct drm_device *ddev, struct drm_file *file) +{ + ethosu_job_close(file->driver_priv); + kfree(file->driver_priv); + module_put(THIS_MODULE); +} + +static const struct drm_ioctl_desc ethosu_drm_driver_ioctls[] = { +#define ETHOSU_IOCTL(n, func, flags) \ + DRM_IOCTL_DEF_DRV(ETHOSU_##n, ethosu_ioctl_##func, flags) + + ETHOSU_IOCTL(DEV_QUERY, dev_query, 0), + ETHOSU_IOCTL(BO_CREATE, bo_create, 0), + ETHOSU_IOCTL(BO_WAIT, bo_wait, 0), + ETHOSU_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, 0), + ETHOSU_IOCTL(CMDSTREAM_BO_CREATE, cmdstream_bo_create, 0), + ETHOSU_IOCTL(SUBMIT, submit, 0), +}; + +DEFINE_DRM_ACCEL_FOPS(ethosu_drm_driver_fops); + +/* + * Ethosu driver version: + * - 1.0 - initial interface + */ +static const struct drm_driver ethosu_drm_driver = { + .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM, + .open = ethosu_open, + .postclose = ethosu_postclose, + .ioctls = ethosu_drm_driver_ioctls, + .num_ioctls = ARRAY_SIZE(ethosu_drm_driver_ioctls), + .fops = ðosu_drm_driver_fops, + .name = "ethosu", + .desc = "Arm Ethos-U Accel driver", + .major = 1, + .minor = 0, + + .gem_create_object = ethosu_gem_create_object, +}; + +#define U65_DRAM_AXI_LIMIT_CFG 0x1f3f0002 +#define U65_SRAM_AXI_LIMIT_CFG 0x1f3f00b0 +#define U85_AXI_EXT_CFG 0x00021f3f +#define U85_AXI_SRAM_CFG 0x00021f3f +#define U85_MEM_ATTR0_CFG 0x00000000 +#define U85_MEM_ATTR2_CFG 0x000000b7 + +static int ethosu_reset(struct ethosu_device *ethosudev) +{ + int ret; + u32 reg; + + writel_relaxed(RESET_PENDING_CSL, ethosudev->regs + NPU_REG_RESET); + ret = readl_poll_timeout(ethosudev->regs + NPU_REG_STATUS, reg, + !FIELD_GET(STATUS_RESET_STATUS, reg), + USEC_PER_MSEC, USEC_PER_SEC); + if (ret) + return ret; + + if (!FIELD_GET(PROT_ACTIVE_CSL, readl_relaxed(ethosudev->regs + NPU_REG_PROT))) { + dev_warn(ethosudev->base.dev, "Could not reset to non-secure mode (PROT = %x)\n", + readl_relaxed(ethosudev->regs + NPU_REG_PROT)); + } + + /* + * Assign region 2 (SRAM) to AXI M0 (AXILIMIT0), + * everything else to AXI M1 (AXILIMIT2) + */ + writel_relaxed(0x0000aa8a, ethosudev->regs + NPU_REG_REGIONCFG); + if (ethosu_is_u65(ethosudev)) { + writel_relaxed(U65_SRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT0); + writel_relaxed(U65_DRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT2); + } else { + writel_relaxed(U85_AXI_SRAM_CFG, ethosudev->regs + NPU_REG_AXI_SRAM); + writel_relaxed(U85_AXI_EXT_CFG, ethosudev->regs + NPU_REG_AXI_EXT); + writel_relaxed(U85_MEM_ATTR0_CFG, ethosudev->regs + NPU_REG_MEM_ATTR0); // SRAM + writel_relaxed(U85_MEM_ATTR2_CFG, ethosudev->regs + NPU_REG_MEM_ATTR2); // DRAM + } + + if (ethosudev->sram) + memset_io(ethosudev->sram, 0, ethosudev->npu_info.sram_size); + + return 0; +} + +static int ethosu_device_resume(struct device *dev) +{ + struct ethosu_device *ethosudev = dev_get_drvdata(dev); + int ret; + + ret = clk_bulk_prepare_enable(ethosudev->num_clks, ethosudev->clks); + if (ret) + return ret; + + ret = ethosu_reset(ethosudev); + if (!ret) + return 0; + + clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks); + return ret; +} + +static int ethosu_device_suspend(struct device *dev) +{ + struct ethosu_device *ethosudev = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks); + return 0; +} + +static int ethosu_sram_init(struct ethosu_device *ethosudev) +{ + ethosudev->npu_info.sram_size = 0; + + ethosudev->srampool = of_gen_pool_get(ethosudev->base.dev->of_node, "sram", 0); + if (!ethosudev->srampool) + return 0; + + ethosudev->npu_info.sram_size = gen_pool_size(ethosudev->srampool); + + ethosudev->sram = (void __iomem *)gen_pool_dma_alloc(ethosudev->srampool, + ethosudev->npu_info.sram_size, + ðosudev->sramphys); + if (!ethosudev->sram) { + dev_err(ethosudev->base.dev, "failed to allocate from SRAM pool\n"); + return -ENOMEM; + } + + return 0; +} + +static int ethosu_init(struct ethosu_device *ethosudev) +{ + int ret; + u32 id, config; + + ret = ethosu_device_resume(ethosudev->base.dev); + if (ret) + return ret; + + pm_runtime_set_autosuspend_delay(ethosudev->base.dev, 50); + pm_runtime_use_autosuspend(ethosudev->base.dev); + ret = devm_pm_runtime_set_active_enabled(ethosudev->base.dev); + if (ret) + return ret; + pm_runtime_get_noresume(ethosudev->base.dev); + + ethosudev->npu_info.id = id = readl_relaxed(ethosudev->regs + NPU_REG_ID); + ethosudev->npu_info.config = config = readl_relaxed(ethosudev->regs + NPU_REG_CONFIG); + + ethosu_sram_init(ethosudev); + + dev_info(ethosudev->base.dev, + "Ethos-U NPU, arch v%ld.%ld.%ld, rev r%ldp%ld, cmd stream ver%ld, %d MACs, %dKB SRAM\n", + FIELD_GET(ID_ARCH_MAJOR_MASK, id), + FIELD_GET(ID_ARCH_MINOR_MASK, id), + FIELD_GET(ID_ARCH_PATCH_MASK, id), + FIELD_GET(ID_VER_MAJOR_MASK, id), + FIELD_GET(ID_VER_MINOR_MASK, id), + FIELD_GET(CONFIG_CMD_STREAM_VER_MASK, config), + 1 << FIELD_GET(CONFIG_MACS_PER_CC_MASK, config), + ethosudev->npu_info.sram_size / 1024); + + return 0; +} + +static int ethosu_probe(struct platform_device *pdev) +{ + int ret; + struct ethosu_device *ethosudev; + + ethosudev = devm_drm_dev_alloc(&pdev->dev, ðosu_drm_driver, + struct ethosu_device, base); + if (IS_ERR(ethosudev)) + return -ENOMEM; + platform_set_drvdata(pdev, ethosudev); + + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + + ethosudev->regs = devm_platform_ioremap_resource(pdev, 0); + + ethosudev->num_clks = devm_clk_bulk_get_all(&pdev->dev, ðosudev->clks); + if (ethosudev->num_clks < 0) + return ethosudev->num_clks; + + ret = ethosu_job_init(ethosudev); + if (ret) + return ret; + + ret = ethosu_init(ethosudev); + if (ret) + return ret; + + ret = drm_dev_register(ðosudev->base, 0); + if (ret) + pm_runtime_dont_use_autosuspend(ethosudev->base.dev); + + pm_runtime_put_autosuspend(ethosudev->base.dev); + return ret; +} + +static void ethosu_remove(struct platform_device *pdev) +{ + struct ethosu_device *ethosudev = dev_get_drvdata(&pdev->dev); + + drm_dev_unregister(ðosudev->base); + ethosu_job_fini(ethosudev); + if (ethosudev->sram) + gen_pool_free(ethosudev->srampool, (unsigned long)ethosudev->sram, + ethosudev->npu_info.sram_size); +} + +static const struct of_device_id dt_match[] = { + { .compatible = "arm,ethos-u65" }, + { .compatible = "arm,ethos-u85" }, + {} +}; +MODULE_DEVICE_TABLE(of, dt_match); + +static DEFINE_RUNTIME_DEV_PM_OPS(ethosu_pm_ops, + ethosu_device_suspend, + ethosu_device_resume, + NULL); + +static struct platform_driver ethosu_driver = { + .probe = ethosu_probe, + .remove = ethosu_remove, + .driver = { + .name = "ethosu", + .pm = pm_ptr(ðosu_pm_ops), + .of_match_table = dt_match, + }, +}; +module_platform_driver(ethosu_driver); + +MODULE_AUTHOR("Rob Herring "); +MODULE_DESCRIPTION("Arm Ethos-U Accel Driver"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/accel/ethosu/ethosu_drv.h b/drivers/accel/ethosu/ethosu_drv.h new file mode 100644 index 000000000000..9e21dfe94184 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_drv.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright 2025 Arm, Ltd. */ +#ifndef __ETHOSU_DRV_H__ +#define __ETHOSU_DRV_H__ + +#include + +struct ethosu_device; + +struct ethosu_file_priv { + struct ethosu_device *edev; + struct drm_sched_entity sched_entity; +}; + +#endif diff --git a/drivers/accel/ethosu/ethosu_gem.c b/drivers/accel/ethosu/ethosu_gem.c new file mode 100644 index 000000000000..473b5f5d7514 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_gem.c @@ -0,0 +1,704 @@ +// SPDX-License-Identifier: GPL-2.0-only or MIT +/* Copyright 2025 Arm, Ltd. */ + +#include +#include + +#include + +#include "ethosu_device.h" +#include "ethosu_gem.h" + +static void ethosu_gem_free_object(struct drm_gem_object *obj) +{ + struct ethosu_gem_object *bo = to_ethosu_bo(obj); + + kfree(bo->info); + drm_gem_free_mmap_offset(&bo->base.base); + drm_gem_dma_free(&bo->base); +} + +static int ethosu_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct ethosu_gem_object *bo = to_ethosu_bo(obj); + + /* Don't allow mmap on objects that have the NO_MMAP flag set. */ + if (bo->flags & DRM_ETHOSU_BO_NO_MMAP) + return -EINVAL; + + return drm_gem_dma_object_mmap(obj, vma); +} + +static const struct drm_gem_object_funcs ethosu_gem_funcs = { + .free = ethosu_gem_free_object, + .print_info = drm_gem_dma_object_print_info, + .get_sg_table = drm_gem_dma_object_get_sg_table, + .vmap = drm_gem_dma_object_vmap, + .mmap = ethosu_gem_mmap, + .vm_ops = &drm_gem_dma_vm_ops, +}; + +/** + * ethosu_gem_create_object - Implementation of driver->gem_create_object. + * @ddev: DRM device + * @size: Size in bytes of the memory the object will reference + * + * This lets the GEM helpers allocate object structs for us, and keep + * our BO stats correct. + */ +struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev, size_t size) +{ + struct ethosu_gem_object *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return ERR_PTR(-ENOMEM); + + obj->base.base.funcs = ðosu_gem_funcs; + return &obj->base.base; +} + +/** + * ethosu_gem_create_with_handle() - Create a GEM object and attach it to a handle. + * @file: DRM file. + * @ddev: DRM device. + * @size: Size of the GEM object to allocate. + * @flags: Combination of drm_ethosu_bo_flags flags. + * @handle: Pointer holding the handle pointing to the new GEM object. + * + * Return: Zero on success + */ +int ethosu_gem_create_with_handle(struct drm_file *file, + struct drm_device *ddev, + u64 *size, u32 flags, u32 *handle) +{ + struct drm_gem_dma_object *mem; + struct ethosu_gem_object *bo; + int ret; + + mem = drm_gem_dma_create(ddev, *size); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + bo = to_ethosu_bo(&mem->base); + bo->flags = flags; + + /* + * Allocate an id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file, &mem->base, handle); + if (!ret) + *size = bo->base.base.size; + + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put(&mem->base); + + return ret; +} + +struct dma { + s8 region; + u64 len; + u64 offset; + s64 stride[2]; +}; + +struct dma_state { + u16 size0; + u16 size1; + s8 mode; + struct dma src; + struct dma dst; +}; + +struct buffer { + u64 base; + u32 length; + s8 region; +}; + +struct feat_matrix { + u64 base[4]; + s64 stride_x; + s64 stride_y; + s64 stride_c; + s8 region; + u8 broadcast; + u16 stride_kernel; + u16 precision; + u16 depth; + u16 width; + u16 width0; + u16 height[3]; + u8 pad_top; + u8 pad_left; + u8 pad_bottom; + u8 pad_right; +}; + +struct cmd_state { + struct dma_state dma; + struct buffer scale[2]; + struct buffer weight[4]; + struct feat_matrix ofm; + struct feat_matrix ifm; + struct feat_matrix ifm2; +}; + +static void cmd_state_init(struct cmd_state *st) +{ + /* Initialize to all 1s to detect missing setup */ + memset(st, 0xff, sizeof(*st)); +} + +static u64 cmd_to_addr(u32 *cmd) +{ + return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1]; +} + +static u64 dma_length(struct ethosu_validated_cmdstream_info *info, + struct dma_state *dma_st, struct dma *dma) +{ + s8 mode = dma_st->mode; + u64 len = dma->len; + + if (mode >= 1) { + len += dma->stride[0]; + len *= dma_st->size0; + } + if (mode == 2) { + len += dma->stride[1]; + len *= dma_st->size1; + } + if (dma->region >= 0) + info->region_size[dma->region] = max(info->region_size[dma->region], + len + dma->offset); + + return len; +} + +static u64 feat_matrix_length(struct ethosu_validated_cmdstream_info *info, + struct feat_matrix *fm, + u32 x, u32 y, u32 c) +{ + u32 element_size, storage = fm->precision >> 14; + int tile = 0; + u64 addr; + + if (fm->region < 0) + return U64_MAX; + + switch (storage) { + case 0: + if (x >= fm->width0 + 1) { + x -= fm->width0 + 1; + tile += 1; + } + if (y >= fm->height[tile] + 1) { + y -= fm->height[tile] + 1; + tile += 2; + } + break; + case 1: + if (y >= fm->height[1] + 1) { + y -= fm->height[1] + 1; + tile = 2; + } else if (y >= fm->height[0] + 1) { + y -= fm->height[0] + 1; + tile = 1; + } + break; + } + if (fm->base[tile] == U64_MAX) + return U64_MAX; + + addr = fm->base[tile] + y * fm->stride_y; + + switch ((fm->precision >> 6) & 0x3) { // format + case 0: //nhwc: + addr += x * fm->stride_x + c; + break; + case 1: //nhcwb16: + element_size = BIT((fm->precision >> 1) & 0x3); + + addr += (c / 16) * fm->stride_c + (16 * x + (c & 0xf)) * element_size; + break; + } + + info->region_size[fm->region] = max(info->region_size[fm->region], addr + 1); + + return addr; +} + +static int calc_sizes(struct drm_device *ddev, + struct ethosu_validated_cmdstream_info *info, + u16 op, struct cmd_state *st, + bool ifm, bool ifm2, bool weight, bool scale) +{ + u64 len; + + if (ifm) { + if (st->ifm.stride_kernel == U16_MAX) + return -EINVAL; + u32 stride_y = ((st->ifm.stride_kernel >> 8) & 0x2) + + ((st->ifm.stride_kernel >> 1) & 0x1) + 1; + u32 stride_x = ((st->ifm.stride_kernel >> 5) & 0x2) + + (st->ifm.stride_kernel & 0x1) + 1; + u32 ifm_height = st->ofm.height[2] * stride_y + + st->ifm.height[2] - (st->ifm.pad_top + st->ifm.pad_bottom); + u32 ifm_width = st->ofm.width * stride_x + + st->ifm.width - (st->ifm.pad_left + st->ifm.pad_right); + + len = feat_matrix_length(info, &st->ifm, ifm_width, + ifm_height, st->ifm.depth); + dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n", + op, st->ifm.region, st->ifm.base[0], len); + if (len == U64_MAX) + return -EINVAL; + } + + if (ifm2) { + len = feat_matrix_length(info, &st->ifm2, st->ifm.depth, + 0, st->ofm.depth); + dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n", + op, st->ifm2.region, st->ifm2.base[0], len); + if (len == U64_MAX) + return -EINVAL; + } + + if (weight) { + dev_dbg(ddev->dev, "op %d: W:%d:0x%llx-0x%llx\n", + op, st->weight[0].region, st->weight[0].base, + st->weight[0].base + st->weight[0].length - 1); + if (st->weight[0].region < 0 || st->weight[0].base == U64_MAX || + st->weight[0].length == U32_MAX) + return -EINVAL; + info->region_size[st->weight[0].region] = + max(info->region_size[st->weight[0].region], + st->weight[0].base + st->weight[0].length); + } + + if (scale) { + dev_dbg(ddev->dev, "op %d: S:%d:0x%llx-0x%llx\n", + op, st->scale[0].region, st->scale[0].base, + st->scale[0].base + st->scale[0].length - 1); + if (st->scale[0].region < 0 || st->scale[0].base == U64_MAX || + st->scale[0].length == U32_MAX) + return -EINVAL; + info->region_size[st->scale[0].region] = + max(info->region_size[st->scale[0].region], + st->scale[0].base + st->scale[0].length); + } + + len = feat_matrix_length(info, &st->ofm, st->ofm.width, + st->ofm.height[2], st->ofm.depth); + dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n", + op, st->ofm.region, st->ofm.base[0], len); + if (len == U64_MAX) + return -EINVAL; + info->output_region[st->ofm.region] = true; + + return 0; +} + +static int calc_sizes_elemwise(struct drm_device *ddev, + struct ethosu_validated_cmdstream_info *info, + u16 op, struct cmd_state *st, + bool ifm, bool ifm2) +{ + u32 height, width, depth; + u64 len; + + if (ifm) { + height = st->ifm.broadcast & 0x1 ? 0 : st->ofm.height[2]; + width = st->ifm.broadcast & 0x2 ? 0 : st->ofm.width; + depth = st->ifm.broadcast & 0x4 ? 0 : st->ofm.depth; + + len = feat_matrix_length(info, &st->ifm, width, + height, depth); + dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n", + op, st->ifm.region, st->ifm.base[0], len); + if (len == U64_MAX) + return -EINVAL; + } + + if (ifm2) { + height = st->ifm2.broadcast & 0x1 ? 0 : st->ofm.height[2]; + width = st->ifm2.broadcast & 0x2 ? 0 : st->ofm.width; + depth = st->ifm2.broadcast & 0x4 ? 0 : st->ofm.depth; + + len = feat_matrix_length(info, &st->ifm2, width, + height, depth); + dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n", + op, st->ifm2.region, st->ifm2.base[0], len); + if (len == U64_MAX) + return -EINVAL; + } + + len = feat_matrix_length(info, &st->ofm, st->ofm.width, + st->ofm.height[2], st->ofm.depth); + dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n", + op, st->ofm.region, st->ofm.base[0], len); + if (len == U64_MAX) + return -EINVAL; + info->output_region[st->ofm.region] = true; + + return 0; +} + +static int ethosu_gem_cmdstream_copy_and_validate(struct drm_device *ddev, + u32 __user *ucmds, + struct ethosu_gem_object *bo, + u32 size) +{ + struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc(sizeof(*info), GFP_KERNEL); + struct ethosu_device *edev = to_ethosu_device(ddev); + u32 *bocmds = bo->base.vaddr; + struct cmd_state st; + int i, ret; + + if (!info) + return -ENOMEM; + info->cmd_size = size; + + cmd_state_init(&st); + + for (i = 0; i < size / 4; i++) { + bool use_ifm, use_ifm2, use_scale; + u64 dstlen, srclen; + u16 cmd, param; + u32 cmds[2]; + u64 addr; + + if (get_user(cmds[0], ucmds++)) + return -EFAULT; + + bocmds[i] = cmds[0]; + + cmd = cmds[0]; + param = cmds[0] >> 16; + + if (cmd & 0x4000) { + if (get_user(cmds[1], ucmds++)) + return -EFAULT; + + i++; + bocmds[i] = cmds[1]; + addr = cmd_to_addr(cmds); + } + + switch (cmd) { + case NPU_OP_DMA_START: + srclen = dma_length(info, &st.dma, &st.dma.src); + dstlen = dma_length(info, &st.dma, &st.dma.dst); + + if (st.dma.dst.region >= 0) + info->output_region[st.dma.dst.region] = true; + dev_dbg(ddev->dev, "cmd: DMA SRC:%d:0x%llx+0x%llx DST:%d:0x%llx+0x%llx\n", + st.dma.src.region, st.dma.src.offset, srclen, + st.dma.dst.region, st.dma.dst.offset, dstlen); + break; + case NPU_OP_CONV: + case NPU_OP_DEPTHWISE: + use_ifm2 = param & 0x1; // weights_ifm2 + use_scale = !(st.ofm.precision & 0x100); + ret = calc_sizes(ddev, info, cmd, &st, true, use_ifm2, + !use_ifm2, use_scale); + if (ret) + return ret; + break; + case NPU_OP_POOL: + use_ifm = param != 0x4; // pooling mode + use_scale = !(st.ofm.precision & 0x100); + ret = calc_sizes(ddev, info, cmd, &st, use_ifm, false, + false, use_scale); + if (ret) + return ret; + break; + case NPU_OP_ELEMENTWISE: + use_ifm2 = !((st.ifm2.broadcast == 8) || (param == 5) || + (param == 6) || (param == 7) || (param == 0x24)); + use_ifm = st.ifm.broadcast != 8; + ret = calc_sizes_elemwise(ddev, info, cmd, &st, use_ifm, use_ifm2); + if (ret) + return ret; + break; + case NPU_OP_RESIZE: // U85 only + WARN_ON(1); // TODO + break; + case NPU_SET_KERNEL_WIDTH_M1: + st.ifm.width = param; + break; + case NPU_SET_KERNEL_HEIGHT_M1: + st.ifm.height[2] = param; + break; + case NPU_SET_KERNEL_STRIDE: + st.ifm.stride_kernel = param; + break; + case NPU_SET_IFM_PAD_TOP: + st.ifm.pad_top = param & 0x7f; + break; + case NPU_SET_IFM_PAD_LEFT: + st.ifm.pad_left = param & 0x7f; + break; + case NPU_SET_IFM_PAD_RIGHT: + st.ifm.pad_right = param & 0xff; + break; + case NPU_SET_IFM_PAD_BOTTOM: + st.ifm.pad_bottom = param & 0xff; + break; + case NPU_SET_IFM_DEPTH_M1: + st.ifm.depth = param; + break; + case NPU_SET_IFM_PRECISION: + st.ifm.precision = param; + break; + case NPU_SET_IFM_BROADCAST: + st.ifm.broadcast = param; + break; + case NPU_SET_IFM_REGION: + st.ifm.region = param & 0x7f; + break; + case NPU_SET_IFM_WIDTH0_M1: + st.ifm.width0 = param; + break; + case NPU_SET_IFM_HEIGHT0_M1: + st.ifm.height[0] = param; + break; + case NPU_SET_IFM_HEIGHT1_M1: + st.ifm.height[1] = param; + break; + case NPU_SET_IFM_BASE0: + case NPU_SET_IFM_BASE1: + case NPU_SET_IFM_BASE2: + case NPU_SET_IFM_BASE3: + st.ifm.base[cmd & 0x3] = addr; + break; + case NPU_SET_IFM_STRIDE_X: + st.ifm.stride_x = addr; + break; + case NPU_SET_IFM_STRIDE_Y: + st.ifm.stride_y = addr; + break; + case NPU_SET_IFM_STRIDE_C: + st.ifm.stride_c = addr; + break; + + case NPU_SET_OFM_WIDTH_M1: + st.ofm.width = param; + break; + case NPU_SET_OFM_HEIGHT_M1: + st.ofm.height[2] = param; + break; + case NPU_SET_OFM_DEPTH_M1: + st.ofm.depth = param; + break; + case NPU_SET_OFM_PRECISION: + st.ofm.precision = param; + break; + case NPU_SET_OFM_REGION: + st.ofm.region = param & 0x7; + break; + case NPU_SET_OFM_WIDTH0_M1: + st.ofm.width0 = param; + break; + case NPU_SET_OFM_HEIGHT0_M1: + st.ofm.height[0] = param; + break; + case NPU_SET_OFM_HEIGHT1_M1: + st.ofm.height[1] = param; + break; + case NPU_SET_OFM_BASE0: + case NPU_SET_OFM_BASE1: + case NPU_SET_OFM_BASE2: + case NPU_SET_OFM_BASE3: + st.ofm.base[cmd & 0x3] = addr; + break; + case NPU_SET_OFM_STRIDE_X: + st.ofm.stride_x = addr; + break; + case NPU_SET_OFM_STRIDE_Y: + st.ofm.stride_y = addr; + break; + case NPU_SET_OFM_STRIDE_C: + st.ofm.stride_c = addr; + break; + + case NPU_SET_IFM2_BROADCAST: + st.ifm2.broadcast = param; + break; + case NPU_SET_IFM2_PRECISION: + st.ifm2.precision = param; + break; + case NPU_SET_IFM2_REGION: + st.ifm2.region = param & 0x7; + break; + case NPU_SET_IFM2_WIDTH0_M1: + st.ifm2.width0 = param; + break; + case NPU_SET_IFM2_HEIGHT0_M1: + st.ifm2.height[0] = param; + break; + case NPU_SET_IFM2_HEIGHT1_M1: + st.ifm2.height[1] = param; + break; + case NPU_SET_IFM2_BASE0: + case NPU_SET_IFM2_BASE1: + case NPU_SET_IFM2_BASE2: + case NPU_SET_IFM2_BASE3: + st.ifm2.base[cmd & 0x3] = addr; + break; + case NPU_SET_IFM2_STRIDE_X: + st.ifm2.stride_x = addr; + break; + case NPU_SET_IFM2_STRIDE_Y: + st.ifm2.stride_y = addr; + break; + case NPU_SET_IFM2_STRIDE_C: + st.ifm2.stride_c = addr; + break; + + case NPU_SET_WEIGHT_REGION: + st.weight[0].region = param & 0x7; + break; + case NPU_SET_SCALE_REGION: + st.scale[0].region = param & 0x7; + break; + case NPU_SET_WEIGHT_BASE: + st.weight[0].base = addr; + break; + case NPU_SET_WEIGHT_LENGTH: + st.weight[0].length = cmds[1]; + break; + case NPU_SET_SCALE_BASE: + st.scale[0].base = addr; + break; + case NPU_SET_SCALE_LENGTH: + st.scale[0].length = cmds[1]; + break; + case NPU_SET_WEIGHT1_BASE: + st.weight[1].base = addr; + break; + case NPU_SET_WEIGHT1_LENGTH: + st.weight[1].length = cmds[1]; + break; + case NPU_SET_SCALE1_BASE: // NPU_SET_WEIGHT2_BASE (U85) + if (ethosu_is_u65(edev)) + st.scale[1].base = addr; + else + st.weight[2].base = addr; + break; + case NPU_SET_SCALE1_LENGTH: // NPU_SET_WEIGHT2_LENGTH (U85) + if (ethosu_is_u65(edev)) + st.scale[1].length = cmds[1]; + else + st.weight[1].length = cmds[1]; + break; + case NPU_SET_WEIGHT3_BASE: + st.weight[3].base = addr; + break; + case NPU_SET_WEIGHT3_LENGTH: + st.weight[3].length = cmds[1]; + break; + + case NPU_SET_DMA0_SRC_REGION: + if (param & 0x100) + st.dma.src.region = -1; + else + st.dma.src.region = param & 0x7; + st.dma.mode = (param >> 9) & 0x3; + break; + case NPU_SET_DMA0_DST_REGION: + if (param & 0x100) + st.dma.dst.region = -1; + else + st.dma.dst.region = param & 0x7; + break; + case NPU_SET_DMA0_SIZE0: + st.dma.size0 = param; + break; + case NPU_SET_DMA0_SIZE1: + st.dma.size1 = param; + break; + case NPU_SET_DMA0_SRC_STRIDE0: + st.dma.src.stride[0] = ((s64)addr << 24) >> 24; + break; + case NPU_SET_DMA0_SRC_STRIDE1: + st.dma.src.stride[1] = ((s64)addr << 24) >> 24; + break; + case NPU_SET_DMA0_DST_STRIDE0: + st.dma.dst.stride[0] = ((s64)addr << 24) >> 24; + break; + case NPU_SET_DMA0_DST_STRIDE1: + st.dma.dst.stride[1] = ((s64)addr << 24) >> 24; + break; + case NPU_SET_DMA0_SRC: + st.dma.src.offset = addr; + break; + case NPU_SET_DMA0_DST: + st.dma.dst.offset = addr; + break; + case NPU_SET_DMA0_LEN: + st.dma.src.len = st.dma.dst.len = addr; + break; + default: + break; + } + } + + for (i = 0; i < NPU_BASEP_REGION_MAX; i++) { + if (!info->region_size[i]) + continue; + dev_dbg(ddev->dev, "region %d max size: 0x%llx\n", + i, info->region_size[i]); + } + + bo->info = no_free_ptr(info); + return 0; +} + +/** + * ethosu_gem_cmdstream_create() - Create a GEM object and attach it to a handle. + * @file: DRM file. + * @ddev: DRM device. + * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared. + * @size: Size of the GEM object to allocate. + * @flags: Combination of drm_ethosu_bo_flags flags. + * @handle: Pointer holding the handle pointing to the new GEM object. + * + * Return: Zero on success + */ +int ethosu_gem_cmdstream_create(struct drm_file *file, + struct drm_device *ddev, + u32 size, u64 data, u32 flags, u32 *handle) +{ + int ret; + struct drm_gem_dma_object *mem; + struct ethosu_gem_object *bo; + + mem = drm_gem_dma_create(ddev, size); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + bo = to_ethosu_bo(&mem->base); + bo->flags = flags; + + ret = ethosu_gem_cmdstream_copy_and_validate(ddev, + (void __user *)(uintptr_t)data, + bo, size); + if (ret) + goto fail; + + /* + * Allocate an id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file, &mem->base, handle); + +fail: + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put(&mem->base); + + return ret; +} diff --git a/drivers/accel/ethosu/ethosu_gem.h b/drivers/accel/ethosu/ethosu_gem.h new file mode 100644 index 000000000000..3922895a60fb --- /dev/null +++ b/drivers/accel/ethosu/ethosu_gem.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ +/* Copyright 2025 Arm, Ltd. */ + +#ifndef __ETHOSU_GEM_H__ +#define __ETHOSU_GEM_H__ + +#include "ethosu_device.h" +#include + +struct ethosu_validated_cmdstream_info { + u32 cmd_size; + u64 region_size[NPU_BASEP_REGION_MAX]; + bool output_region[NPU_BASEP_REGION_MAX]; +}; + +/** + * struct ethosu_gem_object - Driver specific GEM object. + */ +struct ethosu_gem_object { + /** @base: Inherit from drm_gem_shmem_object. */ + struct drm_gem_dma_object base; + + struct ethosu_validated_cmdstream_info *info; + + /** @flags: Combination of drm_ethosu_bo_flags flags. */ + u32 flags; +}; + +static inline +struct ethosu_gem_object *to_ethosu_bo(struct drm_gem_object *obj) +{ + return container_of(to_drm_gem_dma_obj(obj), struct ethosu_gem_object, base); +} + +struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev, + size_t size); + +int ethosu_gem_create_with_handle(struct drm_file *file, + struct drm_device *ddev, + u64 *size, u32 flags, uint32_t *handle); + +int ethosu_gem_cmdstream_create(struct drm_file *file, + struct drm_device *ddev, + u32 size, u64 data, u32 flags, u32 *handle); + +#endif /* __ETHOSU_GEM_H__ */ diff --git a/drivers/accel/ethosu/ethosu_job.c b/drivers/accel/ethosu/ethosu_job.c new file mode 100644 index 000000000000..26e7a2f64d71 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_job.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* Copyright 2024-2025 Tomeu Vizoso */ +/* Copyright 2025 Arm, Ltd. */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "ethosu_device.h" +#include "ethosu_drv.h" +#include "ethosu_gem.h" +#include "ethosu_job.h" + +#define JOB_TIMEOUT_MS 500 + +static struct ethosu_job *to_ethosu_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct ethosu_job, base); +} + +static const char *ethosu_fence_get_driver_name(struct dma_fence *fence) +{ + return "ethosu"; +} + +static const char *ethosu_fence_get_timeline_name(struct dma_fence *fence) +{ + return "ethosu-npu"; +} + +static const struct dma_fence_ops ethosu_fence_ops = { + .get_driver_name = ethosu_fence_get_driver_name, + .get_timeline_name = ethosu_fence_get_timeline_name, +}; + +static void ethosu_job_hw_submit(struct ethosu_device *dev, struct ethosu_job *job) +{ + struct drm_gem_dma_object *cmd_bo = to_drm_gem_dma_obj(job->cmd_bo); + struct ethosu_validated_cmdstream_info *cmd_info = to_ethosu_bo(job->cmd_bo)->info; + + for (int i = 0; i < job->region_cnt; i++) { + struct drm_gem_dma_object *bo; + int region = job->region_bo_num[i]; + + bo = to_drm_gem_dma_obj(job->region_bo[i]); + writel_relaxed(lower_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP(region)); + writel_relaxed(upper_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP_HI(region)); + dev_dbg(dev->base.dev, "Region %d base addr = %pad\n", region, &bo->dma_addr); + } + + if (job->sram_size) { + writel_relaxed(lower_32_bits(dev->sramphys), + dev->regs + NPU_REG_BASEP(ETHOSU_SRAM_REGION)); + writel_relaxed(upper_32_bits(dev->sramphys), + dev->regs + NPU_REG_BASEP_HI(ETHOSU_SRAM_REGION)); + dev_dbg(dev->base.dev, "Region %d base addr = %pad (SRAM)\n", + ETHOSU_SRAM_REGION, &dev->sramphys); + } + + writel_relaxed(lower_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE); + writel_relaxed(upper_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE_HI); + writel_relaxed(cmd_info->cmd_size, dev->regs + NPU_REG_QSIZE); + + writel(CMD_TRANSITION_TO_RUN, dev->regs + NPU_REG_CMD); + + dev_dbg(dev->base.dev, + "Submitted cmd at %pad to core\n", &cmd_bo->dma_addr); +} + +static int ethosu_acquire_object_fences(struct ethosu_job *job) +{ + int i, ret; + struct drm_gem_object **bos = job->region_bo; + struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info; + + for (i = 0; i < job->region_cnt; i++) { + bool is_write; + + if (!bos[i]) + break; + + ret = dma_resv_reserve_fences(bos[i]->resv, 1); + if (ret) + return ret; + + is_write = info->output_region[job->region_bo_num[i]]; + ret = drm_sched_job_add_implicit_dependencies(&job->base, bos[i], + is_write); + if (ret) + return ret; + } + + return 0; +} + +static void ethosu_attach_object_fences(struct ethosu_job *job) +{ + int i; + struct dma_fence *fence = job->inference_done_fence; + struct drm_gem_object **bos = job->region_bo; + struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info; + + for (i = 0; i < job->region_cnt; i++) + if (info->output_region[job->region_bo_num[i]]) + dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE); +} + +static int ethosu_job_push(struct ethosu_job *job) +{ + struct ww_acquire_ctx acquire_ctx; + int ret; + + ret = drm_gem_lock_reservations(job->region_bo, job->region_cnt, &acquire_ctx); + if (ret) + return ret; + + ret = ethosu_acquire_object_fences(job); + if (ret) + goto out; + + ret = pm_runtime_resume_and_get(job->dev->base.dev); + if (!ret) { + guard(mutex)(&job->dev->sched_lock); + + drm_sched_job_arm(&job->base); + job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished); + kref_get(&job->refcount); /* put by scheduler job completion */ + drm_sched_entity_push_job(&job->base); + ethosu_attach_object_fences(job); + } + +out: + drm_gem_unlock_reservations(job->region_bo, job->region_cnt, &acquire_ctx); + return ret; +} + +static void ethosu_job_cleanup(struct kref *ref) +{ + struct ethosu_job *job = container_of(ref, struct ethosu_job, + refcount); + unsigned int i; + + pm_runtime_put_autosuspend(job->dev->base.dev); + + dma_fence_put(job->done_fence); + dma_fence_put(job->inference_done_fence); + + for (i = 0; i < job->region_cnt; i++) + drm_gem_object_put(job->region_bo[i]); + + drm_gem_object_put(job->cmd_bo); + + kfree(job); +} + +static void ethosu_job_put(struct ethosu_job *job) +{ + kref_put(&job->refcount, ethosu_job_cleanup); +} + +static void ethosu_job_free(struct drm_sched_job *sched_job) +{ + struct ethosu_job *job = to_ethosu_job(sched_job); + + drm_sched_job_cleanup(sched_job); + ethosu_job_put(job); +} + +static struct dma_fence *ethosu_job_run(struct drm_sched_job *sched_job) +{ + struct ethosu_job *job = to_ethosu_job(sched_job); + struct ethosu_device *dev = job->dev; + struct dma_fence *fence = job->done_fence; + + if (unlikely(job->base.s_fence->finished.error)) + return NULL; + + dma_fence_init(fence, ðosu_fence_ops, &dev->fence_lock, + dev->fence_context, ++dev->emit_seqno); + dma_fence_get(fence); + + scoped_guard(mutex, &dev->job_lock) { + dev->in_flight_job = job; + ethosu_job_hw_submit(dev, job); + } + + return fence; +} + +static void ethosu_job_handle_irq(struct ethosu_device *dev) +{ + u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS); + + if (status & (STATUS_BUS_STATUS | STATUS_CMD_PARSE_ERR)) { + dev_err(dev->base.dev, "Error IRQ - %x\n", status); + drm_sched_fault(&dev->sched); + return; + } + + scoped_guard(mutex, &dev->job_lock) { + if (dev->in_flight_job) { + dma_fence_signal(dev->in_flight_job->done_fence); + dev->in_flight_job = NULL; + } + } +} + +static irqreturn_t ethosu_job_irq_handler_thread(int irq, void *data) +{ + struct ethosu_device *dev = data; + + ethosu_job_handle_irq(dev); + + return IRQ_HANDLED; +} + +static irqreturn_t ethosu_job_irq_handler(int irq, void *data) +{ + struct ethosu_device *dev = data; + u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS); + + if (!(status & STATUS_IRQ_RAISED)) + return IRQ_NONE; + + writel_relaxed(CMD_CLEAR_IRQ, dev->regs + NPU_REG_CMD); + return IRQ_WAKE_THREAD; +} + +static enum drm_gpu_sched_stat ethosu_job_timedout(struct drm_sched_job *bad) +{ + struct ethosu_job *job = to_ethosu_job(bad); + struct ethosu_device *dev = job->dev; + bool running; + u32 *bocmds = to_drm_gem_dma_obj(job->cmd_bo)->vaddr; + u32 cmdaddr; + + cmdaddr = readl_relaxed(dev->regs + NPU_REG_QREAD); + running = FIELD_GET(STATUS_STATE_RUNNING, readl_relaxed(dev->regs + NPU_REG_STATUS)); + + if (running) { + int ret; + u32 reg; + + ret = readl_relaxed_poll_timeout(dev->regs + NPU_REG_QREAD, + reg, + reg != cmdaddr, + USEC_PER_MSEC, 100 * USEC_PER_MSEC); + + /* If still running and progress is being made, just return */ + if (!ret) + return DRM_GPU_SCHED_STAT_NO_HANG; + } + + dev_err(dev->base.dev, "NPU sched timed out: NPU %s, cmdstream offset 0x%x: 0x%x\n", + running ? "running" : "stopped", + cmdaddr, bocmds[cmdaddr / 4]); + + drm_sched_stop(&dev->sched, bad); + + scoped_guard(mutex, &dev->job_lock) + dev->in_flight_job = NULL; + + /* Proceed with reset now. */ + pm_runtime_force_suspend(dev->base.dev); + pm_runtime_force_resume(dev->base.dev); + + /* Restart the scheduler */ + drm_sched_start(&dev->sched, 0); + + return DRM_GPU_SCHED_STAT_RESET; +} + +static const struct drm_sched_backend_ops ethosu_sched_ops = { + .run_job = ethosu_job_run, + .timedout_job = ethosu_job_timedout, + .free_job = ethosu_job_free +}; + +int ethosu_job_init(struct ethosu_device *edev) +{ + struct device *dev = edev->base.dev; + struct drm_sched_init_args args = { + .ops = ðosu_sched_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = 1, + .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), + .name = dev_name(dev), + .dev = dev, + }; + int ret; + + spin_lock_init(&edev->fence_lock); + ret = devm_mutex_init(dev, &edev->job_lock); + if (ret) + return ret; + ret = devm_mutex_init(dev, &edev->sched_lock); + if (ret) + return ret; + + edev->irq = platform_get_irq(to_platform_device(dev), 0); + if (edev->irq < 0) + return edev->irq; + + ret = devm_request_threaded_irq(dev, edev->irq, + ethosu_job_irq_handler, + ethosu_job_irq_handler_thread, + IRQF_SHARED, KBUILD_MODNAME, + edev); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + edev->fence_context = dma_fence_context_alloc(1); + + ret = drm_sched_init(&edev->sched, &args); + if (ret) { + dev_err(dev, "Failed to create scheduler: %d\n", ret); + goto err_sched; + } + + return 0; + +err_sched: + drm_sched_fini(&edev->sched); + return ret; +} + +void ethosu_job_fini(struct ethosu_device *dev) +{ + drm_sched_fini(&dev->sched); +} + +int ethosu_job_open(struct ethosu_file_priv *ethosu_priv) +{ + struct ethosu_device *dev = ethosu_priv->edev; + struct drm_gpu_scheduler *sched = &dev->sched; + int ret; + + ret = drm_sched_entity_init(ðosu_priv->sched_entity, + DRM_SCHED_PRIORITY_NORMAL, + &sched, 1, NULL); + return WARN_ON(ret); +} + +void ethosu_job_close(struct ethosu_file_priv *ethosu_priv) +{ + struct drm_sched_entity *entity = ðosu_priv->sched_entity; + + drm_sched_entity_destroy(entity); +} + +static int ethosu_ioctl_submit_job(struct drm_device *dev, struct drm_file *file, + struct drm_ethosu_job *job) +{ + struct ethosu_device *edev = to_ethosu_device(dev); + struct ethosu_file_priv *file_priv = file->driver_priv; + struct ethosu_job *ejob = NULL; + struct ethosu_validated_cmdstream_info *cmd_info; + int ret = 0; + + /* BO region 2 is reserved if SRAM is used */ + if (job->region_bo_handles[ETHOSU_SRAM_REGION] && job->sram_size) + return -EINVAL; + + if (edev->npu_info.sram_size < job->sram_size) + return -EINVAL; + + ejob = kzalloc(sizeof(*ejob), GFP_KERNEL); + if (!ejob) + return -ENOMEM; + + kref_init(&ejob->refcount); + + ejob->dev = edev; + ejob->sram_size = job->sram_size; + + ejob->done_fence = kzalloc(sizeof(*ejob->done_fence), GFP_KERNEL); + if (!ejob->done_fence) { + ret = -ENOMEM; + goto out_cleanup_job; + } + + ret = drm_sched_job_init(&ejob->base, + &file_priv->sched_entity, + 1, NULL, file->client_id); + if (ret) + goto out_put_job; + + ejob->cmd_bo = drm_gem_object_lookup(file, job->cmd_bo); + if (!ejob->cmd_bo) { + ret = -ENOENT; + goto out_cleanup_job; + } + cmd_info = to_ethosu_bo(ejob->cmd_bo)->info; + if (!cmd_info) { + ret = -EINVAL; + goto out_cleanup_job; + } + + for (int i = 0; i < NPU_BASEP_REGION_MAX; i++) { + struct drm_gem_object *gem; + + /* Can only omit a BO handle if the region is not used or used for SRAM */ + if (!job->region_bo_handles[i] && + (!cmd_info->region_size[i] || (i == ETHOSU_SRAM_REGION && job->sram_size))) + continue; + + if (job->region_bo_handles[i] && !cmd_info->region_size[i]) { + dev_err(dev->dev, + "Cmdstream BO handle %d set for unused region %d\n", + job->region_bo_handles[i], i); + ret = -EINVAL; + goto out_cleanup_job; + } + + gem = drm_gem_object_lookup(file, job->region_bo_handles[i]); + if (!gem) { + dev_err(dev->dev, + "Invalid BO handle %d for region %d\n", + job->region_bo_handles[i], i); + ret = -ENOENT; + goto out_cleanup_job; + } + + ejob->region_bo[ejob->region_cnt] = gem; + ejob->region_bo_num[ejob->region_cnt] = i; + ejob->region_cnt++; + + if (to_ethosu_bo(gem)->info) { + dev_err(dev->dev, + "Cmdstream BO handle %d used for region %d\n", + job->region_bo_handles[i], i); + ret = -EINVAL; + goto out_cleanup_job; + } + + /* Verify the command stream doesn't have accesses outside the BO */ + if (cmd_info->region_size[i] > gem->size) { + dev_err(dev->dev, + "cmd stream region %d size greater than BO size (%llu > %zu)\n", + i, cmd_info->region_size[i], gem->size); + ret = -EOVERFLOW; + goto out_cleanup_job; + } + } + ret = ethosu_job_push(ejob); + +out_cleanup_job: + if (ret) + drm_sched_job_cleanup(&ejob->base); +out_put_job: + ethosu_job_put(ejob); + + return ret; +} + +int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_ethosu_submit *args = data; + int ret = 0; + unsigned int i = 0; + + if (args->pad) { + drm_dbg(dev, "Reserved field in drm_ethosu_submit struct should be 0.\n"); + return -EINVAL; + } + + struct drm_ethosu_job __free(kvfree) *jobs = + kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL); + if (!jobs) + return -ENOMEM; + + if (copy_from_user(jobs, + (void __user *)(uintptr_t)args->jobs, + args->job_count * sizeof(*jobs))) { + drm_dbg(dev, "Failed to copy incoming job array\n"); + return -EFAULT; + } + + for (i = 0; i < args->job_count; i++) { + ret = ethosu_ioctl_submit_job(dev, file, &jobs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/accel/ethosu/ethosu_job.h b/drivers/accel/ethosu/ethosu_job.h new file mode 100644 index 000000000000..ff1cf448d094 --- /dev/null +++ b/drivers/accel/ethosu/ethosu_job.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ +/* Copyright 2024-2025 Tomeu Vizoso */ +/* Copyright 2025 Arm, Ltd. */ + +#ifndef __ETHOSU_JOB_H__ +#define __ETHOSU_JOB_H__ + +#include +#include + +struct ethosu_device; +struct ethosu_file_priv; + +struct ethosu_job { + struct drm_sched_job base; + struct ethosu_device *dev; + + struct drm_gem_object *cmd_bo; + struct drm_gem_object *region_bo[NPU_BASEP_REGION_MAX]; + u8 region_bo_num[NPU_BASEP_REGION_MAX]; + u8 region_cnt; + u32 sram_size; + + /* Fence to be signaled by drm-sched once its done with the job */ + struct dma_fence *inference_done_fence; + + /* Fence to be signaled by IRQ handler when the job is complete. */ + struct dma_fence *done_fence; + + struct kref refcount; +}; + +int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file); + +int ethosu_job_init(struct ethosu_device *dev); +void ethosu_job_fini(struct ethosu_device *dev); +int ethosu_job_open(struct ethosu_file_priv *ethosu_priv); +void ethosu_job_close(struct ethosu_file_priv *ethosu_priv); + +#endif diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile index 1029e0bab061..dbf76b8a5b4c 100644 --- a/drivers/accel/ivpu/Makefile +++ b/drivers/accel/ivpu/Makefile @@ -6,6 +6,7 @@ intel_vpu-y := \ ivpu_fw.o \ ivpu_fw_log.o \ ivpu_gem.o \ + ivpu_gem_userptr.o \ ivpu_hw.o \ ivpu_hw_btrs.o \ ivpu_hw_ip.o \ diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index cd24ccd20ba6..3bd85ee6c26b 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent) DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n"); +static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw, + int band, const char *name) +{ + seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n", + name, + hw->hws.grace_period[band], + hw->hws.process_grace_period[band], + hw->hws.process_quantum[band]); +} + static int priority_bands_show(struct seq_file *s, void *v) { struct ivpu_device *vdev = s->private; struct ivpu_hw_info *hw = vdev->hw; - for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE; - band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) { - switch (band) { - case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE: - seq_puts(s, "Idle: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL: - seq_puts(s, "Normal: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS: - seq_puts(s, "Focus: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME: - seq_puts(s, "Realtime: "); - break; - } - - seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n", - hw->hws.grace_period[band], hw->hws.process_grace_period[band], - hw->hws.process_quantum[band]); - } + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime"); return 0; } diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 3289751b4757..b305effcf003 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -57,7 +57,7 @@ MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency"); int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO; module_param_named(sched_mode, ivpu_sched_mode, int, 0444); -MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler"); +MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler"); bool ivpu_disable_mmu_cont_pages; module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444); @@ -134,6 +134,8 @@ bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability) return true; case DRM_IVPU_CAP_DMA_MEMORY_RANGE: return true; + case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR: + return true; case DRM_IVPU_CAP_MANAGE_CMDQ: return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW; default: @@ -200,6 +202,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f case DRM_IVPU_PARAM_CAPABILITIES: args->value = ivpu_is_capable(vdev, args->index); break; + case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE: + args->value = ivpu_fw_preempt_buf_size(vdev); + break; default: ret = -EINVAL; break; @@ -310,6 +315,7 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0), }; static int ivpu_wait_for_ready(struct ivpu_device *vdev) @@ -377,8 +383,7 @@ int ivpu_boot(struct ivpu_device *vdev) drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter)); drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); - /* Update boot params located at first 4KB of FW memory */ - ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem)); + ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp)); ret = ivpu_hw_boot_fw(vdev); if (ret) { @@ -705,6 +710,7 @@ static struct pci_device_id ivpu_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) }, { } }; MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 62ab1c654e63..5b34b6f50e69 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -27,6 +27,7 @@ #define PCI_DEVICE_ID_LNL 0x643e #define PCI_DEVICE_ID_PTL_P 0xb03e #define PCI_DEVICE_ID_WCL 0xfd3e +#define PCI_DEVICE_ID_NVL 0xd71d #define IVPU_HW_IP_37XX 37 #define IVPU_HW_IP_40XX 40 @@ -78,6 +79,7 @@ #define IVPU_DBG_KREF BIT(11) #define IVPU_DBG_RPM BIT(12) #define IVPU_DBG_MMU_MAP BIT(13) +#define IVPU_DBG_IOCTL BIT(14) #define ivpu_err(vdev, fmt, ...) \ drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) @@ -245,6 +247,8 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev) case PCI_DEVICE_ID_PTL_P: case PCI_DEVICE_ID_WCL: return IVPU_HW_IP_50XX; + case PCI_DEVICE_ID_NVL: + return IVPU_HW_IP_60XX; default: dump_stack(); ivpu_err(vdev, "Unknown NPU IP generation\n"); @@ -261,6 +265,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev) case PCI_DEVICE_ID_LNL: case PCI_DEVICE_ID_PTL_P: case PCI_DEVICE_ID_WCL: + case PCI_DEVICE_ID_NVL: return IVPU_HW_BTRS_LNL; default: dump_stack(); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 9db741695401..48386d2cddbb 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -17,15 +17,10 @@ #include "ivpu_ipc.h" #include "ivpu_pm.h" -#define FW_GLOBAL_MEM_START (2ull * SZ_1G) -#define FW_GLOBAL_MEM_END (3ull * SZ_1G) -#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */ -#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */ -#define FW_RUNTIME_MAX_SIZE SZ_512M #define FW_SHAVE_NN_MAX_SIZE SZ_2M -#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START) -#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE) #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE) +#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K +#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M #define WATCHDOG_MSS_REDIRECT 32 #define WATCHDOG_NCE_REDIRECT 33 @@ -61,12 +56,14 @@ static struct { { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" }, { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" }, + { IVPU_HW_IP_60XX, "intel/vpu/vpu_60xx_v1.bin" }, }; /* Production fw_names from the table above */ MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin"); MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin"); MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin"); +MODULE_FIRMWARE("intel/vpu/vpu_60xx_v1.bin"); static int ivpu_fw_request(struct ivpu_device *vdev) { @@ -131,9 +128,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea return false; } -static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size) +bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range) { - if (addr < range_start || addr + size > range_start + range_size) + u64 addr_end; + + if (!range || check_add_overflow(addr, size, &addr_end)) + return false; + + if (addr < range->start || addr_end > range->end) return false; return true; @@ -142,6 +144,12 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range static u32 ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) { + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX && + ivpu_sched_mode == VPU_SCHEDULING_MODE_OS) { + ivpu_warn(vdev, "OS sched mode is not supported, using HW mode\n"); + return VPU_SCHEDULING_MODE_HW; + } + if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO) return ivpu_sched_mode; @@ -151,11 +159,56 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he return VPU_SCHEDULING_MODE_HW; } +static void +ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) +{ + struct ivpu_fw_info *fw = vdev->fw; + u32 primary_preempt_buf_size, secondary_preempt_buf_size; + + if (fw_hdr->preemption_buffer_1_max_size) + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; + else + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; + + if (fw_hdr->preemption_buffer_2_max_size) + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; + else + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; + + ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n", + primary_preempt_buf_size, secondary_preempt_buf_size); + + if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE || + secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) { + ivpu_warn(vdev, "Preemption buffers size too small\n"); + return; + } + + if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE || + secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) { + ivpu_warn(vdev, "Preemption buffers size too big\n"); + return; + } + + if (fw->sched_mode != VPU_SCHEDULING_MODE_HW) + return; + + if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) + return; + + vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE); + vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE); +} + static int ivpu_fw_parse(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data; - u64 runtime_addr, image_load_addr, runtime_size, image_size; + struct ivpu_addr_range fw_image_range; + u64 boot_params_addr, boot_params_size; + u64 fw_version_addr, fw_version_size; + u64 runtime_addr, runtime_size; + u64 image_load_addr, image_size; if (fw->file->size <= FW_FILE_IMAGE_OFFSET) { ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size); @@ -167,18 +220,37 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) return -EINVAL; } - runtime_addr = fw_hdr->boot_params_load_address; - runtime_size = fw_hdr->runtime_size; - image_load_addr = fw_hdr->image_load_address; - image_size = fw_hdr->image_size; + boot_params_addr = fw_hdr->boot_params_load_address; + boot_params_size = SZ_4K; - if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) { - ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr); + if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr); return -EINVAL; } - if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) { - ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size); + fw_version_addr = fw_hdr->firmware_version_load_address; + fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K); + + if (fw_version_size != SZ_4K) { + ivpu_err(vdev, "Invalid firmware version size: %u\n", + fw_hdr->firmware_version_size); + return -EINVAL; + } + + if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr); + return -EINVAL; + } + + runtime_addr = fw_hdr->image_load_address; + runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size; + + image_load_addr = fw_hdr->image_load_address; + image_size = fw_hdr->image_size; + + if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n", + runtime_addr, runtime_size); return -EINVAL; } @@ -187,23 +259,25 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) return -EINVAL; } - if (image_load_addr < runtime_addr || - image_load_addr + image_size > runtime_addr + runtime_size) { - ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n", + if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n", image_load_addr, image_size); return -EINVAL; } + if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size)) + return -EINVAL; + + if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) { + ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point); + return -EINVAL; + } + if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) { ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size); return -EINVAL; } - if (fw_hdr->entry_point < image_load_addr || - fw_hdr->entry_point >= image_load_addr + image_size) { - ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point); - return -EINVAL; - } ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n", fw_hdr->header_version, fw_hdr->image_format); @@ -217,6 +291,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3)) return -EINVAL; + fw->boot_params_addr = boot_params_addr; + fw->boot_params_size = boot_params_size; + fw->fw_version_addr = fw_version_addr; + fw->fw_version_size = fw_version_size; fw->runtime_addr = runtime_addr; fw->runtime_size = runtime_size; fw->image_load_offset = image_load_addr - runtime_addr; @@ -235,22 +313,13 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr); ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS"); - if (fw_hdr->preemption_buffer_1_max_size) - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; - else - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; + ivpu_preemption_config_parse(vdev, fw_hdr); + ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n", + ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not"); - if (fw_hdr->preemption_buffer_2_max_size) - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; - else - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; - ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n", - fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size); - - if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address, - fw_hdr->ro_section_size, - fw_hdr->image_load_address, - fw_hdr->image_size)) { + if (fw_hdr->ro_section_start_address && + !ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size, + &fw_image_range)) { ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n", fw_hdr->ro_section_start_address, fw_hdr->ro_section_size); return -EINVAL; @@ -259,12 +328,18 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->read_only_addr = fw_hdr->ro_section_start_address; fw->read_only_size = fw_hdr->ro_section_size; - ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", - fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); - ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", - fw->runtime_addr, image_load_addr, fw->entry_point); + ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n", + fw->boot_params_addr, fw->boot_params_size); + ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n", + fw->fw_version_addr, fw->fw_version_size); + ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n", + fw->runtime_addr, fw->runtime_size); + ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n", + fw->image_load_offset, fw->image_size); ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n", fw->read_only_addr, fw->read_only_size); + ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point); + ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size); return 0; } @@ -291,39 +366,33 @@ ivpu_fw_init_wa(struct ivpu_device *vdev) IVPU_PRINT_WA(disable_d0i3_msg); } -static int ivpu_fw_update_global_range(struct ivpu_device *vdev) -{ - struct ivpu_fw_info *fw = vdev->fw; - u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT); - u64 size = FW_SHARED_MEM_SIZE; - - if (start + size > FW_GLOBAL_MEM_END) { - ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size); - return -EINVAL; - } - - ivpu_hw_range_init(&vdev->hw->ranges.global, start, size); - return 0; -} - static int ivpu_fw_mem_init(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; - struct ivpu_addr_range fw_range; int log_verb_size; int ret; - ret = ivpu_fw_update_global_range(vdev); - if (ret) - return ret; + fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + if (!fw->mem_bp) { + ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n"); + return -ENOMEM; + } - fw_range.start = fw->runtime_addr; - fw_range.end = fw->runtime_addr + fw->runtime_size; - fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size, - DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + if (!fw->mem_fw_ver) { + ivpu_err(vdev, "Failed to create firmware version memory buffer\n"); + ret = -ENOMEM; + goto err_free_bp; + } + + fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); if (!fw->mem) { ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_free_fw_ver; } ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr, @@ -372,6 +441,10 @@ err_free_log_crit: ivpu_bo_free(fw->mem_log_crit); err_free_fw_mem: ivpu_bo_free(fw->mem); +err_free_fw_ver: + ivpu_bo_free(fw->mem_fw_ver); +err_free_bp: + ivpu_bo_free(fw->mem_bp); return ret; } @@ -387,10 +460,14 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev) ivpu_bo_free(fw->mem_log_verb); ivpu_bo_free(fw->mem_log_crit); ivpu_bo_free(fw->mem); + ivpu_bo_free(fw->mem_fw_ver); + ivpu_bo_free(fw->mem_bp); fw->mem_log_verb = NULL; fw->mem_log_crit = NULL; fw->mem = NULL; + fw->mem_fw_ver = NULL; + fw->mem_bp = NULL; } int ivpu_fw_init(struct ivpu_device *vdev) @@ -483,11 +560,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n", boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg); - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n", - boot_params->global_memory_allocator_base); - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n", - boot_params->global_memory_allocator_size); - ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n", boot_params->shave_nn_fw_base); @@ -495,10 +567,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->watchdog_irq_mss); ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n", boot_params->watchdog_irq_nce); - ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n", - boot_params->host_to_vpu_irq); - ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n", - boot_params->job_done_irq); ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n", boot_params->host_version_id); @@ -546,6 +614,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->system_time_us); ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n", boot_params->power_profile); + ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n", + boot_params->vpu_uses_ecc_mca_signal); } void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) @@ -572,6 +642,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params return; } + memset(boot_params, 0, sizeof(*boot_params)); vdev->pm->is_warmboot = false; boot_params->magic = VPU_BOOT_PARAMS_MAGIC; @@ -647,6 +718,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->d0i3_entry_vpu_ts = 0; if (IVPU_WA(disable_d0i2)) boot_params->power_profile |= BIT(1); + boot_params->vpu_uses_ecc_mca_signal = + ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0; boot_params->system_time_us = ktime_to_us(ktime_get_real()); wmb(); /* Flush WC buffers after writing bootparams */ diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 7081913fb0dd..00945892b55e 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_FW_H__ @@ -19,10 +19,16 @@ struct ivpu_fw_info { const struct firmware *file; const char *name; char version[FW_VERSION_STR_SIZE]; + struct ivpu_bo *mem_bp; + struct ivpu_bo *mem_fw_ver; struct ivpu_bo *mem; struct ivpu_bo *mem_shave_nn; struct ivpu_bo *mem_log_crit; struct ivpu_bo *mem_log_verb; + u64 boot_params_addr; + u64 boot_params_size; + u64 fw_version_addr; + u64 fw_version_size; u64 runtime_addr; u32 runtime_size; u64 image_load_offset; @@ -42,6 +48,7 @@ struct ivpu_fw_info { u64 last_heartbeat; }; +bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range); int ivpu_fw_init(struct ivpu_device *vdev); void ivpu_fw_fini(struct ivpu_device *vdev); void ivpu_fw_load(struct ivpu_device *vdev); @@ -52,4 +59,9 @@ static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev) return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point; } +static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev) +{ + return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size; +} + #endif /* __IVPU_FW_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 59cfcf3eaded..74b12c7e6caf 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -15,6 +15,7 @@ #include #include "ivpu_drv.h" +#include "ivpu_fw.h" #include "ivpu_gem.h" #include "ivpu_hw.h" #include "ivpu_mmu.h" @@ -27,8 +28,8 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs; static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action) { ivpu_dbg(vdev, BO, - "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n", - action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id, + "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n", + action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr, (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, (bool)drm_gem_is_imported(&bo->base.base)); } @@ -43,22 +44,47 @@ static inline void ivpu_bo_unlock(struct ivpu_bo *bo) dma_resv_unlock(bo->base.base.resv); } +static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo) +{ + struct sg_table *sgt; + + drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach); + + ivpu_bo_lock(bo); + + sgt = bo->base.sgt; + if (!sgt) { + sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt)); + else + bo->base.sgt = sgt; + } + + ivpu_bo_unlock(bo); + + return sgt; +} + /* - * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. + * ivpu_bo_bind() - pin the backing physical pages and map them to VPU. * * This function pins physical memory pages, then maps the physical pages * to IOMMU address space and finally updates the VPU MMU page tables * to allow the VPU to translate VPU address to IOMMU address. */ -int __must_check ivpu_bo_pin(struct ivpu_bo *bo) +int __must_check ivpu_bo_bind(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); struct sg_table *sgt; int ret = 0; - ivpu_dbg_bo(vdev, bo, "pin"); + ivpu_dbg_bo(vdev, bo, "bind"); - sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + if (bo->base.base.import_attach) + sgt = ivpu_bo_map_attachment(vdev, bo); + else + sgt = drm_gem_shmem_get_pages_sgt(&bo->base); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); @@ -70,7 +96,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo) if (!bo->mmu_mapped) { drm_WARN_ON(&vdev->drm, !bo->ctx); ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt, - ivpu_bo_is_snooped(bo)); + ivpu_bo_is_snooped(bo), ivpu_bo_is_read_only(bo)); if (ret) { ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); goto unlock; @@ -99,9 +125,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node); if (!ret) { bo->ctx = ctx; + bo->ctx_id = ctx->id; bo->vpu_addr = bo->mm_node.start; - } else { - ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); + ivpu_dbg_bo(vdev, bo, "vaddr"); } ivpu_bo_unlock(bo); @@ -115,7 +141,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount)); + dma_resv_assert_held(bo->base.base.resv); if (bo->mmu_mapped) { drm_WARN_ON(&vdev->drm, !bo->ctx); @@ -130,13 +156,15 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) bo->ctx = NULL; } - if (drm_gem_is_imported(&bo->base.base)) - return; - if (bo->base.sgt) { - dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); - sg_free_table(bo->base.sgt); - kfree(bo->base.sgt); + if (bo->base.base.import_attach) { + dma_buf_unmap_attachment(bo->base.base.import_attach, + bo->base.sgt, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); + sg_free_table(bo->base.sgt); + kfree(bo->base.sgt); + } bo->base.sgt = NULL; } } @@ -182,10 +210,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { + struct ivpu_device *vdev = to_ivpu_device(dev); struct device *attach_dev = dev->dev; struct dma_buf_attachment *attach; - struct sg_table *sgt; struct drm_gem_object *obj; + struct ivpu_bo *bo; int ret; attach = dma_buf_attach(dma_buf, attach_dev); @@ -194,25 +223,25 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, get_dma_buf(dma_buf); - sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - goto fail_detach; - } - - obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL); if (IS_ERR(obj)) { ret = PTR_ERR(obj); - goto fail_unmap; + goto fail_detach; } obj->import_attach = attach; obj->resv = dma_buf->resv; + bo = to_ivpu_bo(obj); + + mutex_lock(&vdev->bo_list_lock); + list_add_tail(&bo->bo_list_node, &vdev->bo_list); + mutex_unlock(&vdev->bo_list_lock); + + ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo)); + return obj; -fail_unmap: - dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); @@ -220,7 +249,7 @@ fail_detach: return ERR_PTR(ret); } -static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id) +static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags) { struct drm_gem_shmem_object *shmem; struct ivpu_bo *bo; @@ -238,7 +267,6 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla return ERR_CAST(shmem); bo = to_ivpu_bo(&shmem->base); - bo->ctx_id = ctx_id; bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags; @@ -246,7 +274,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla list_add_tail(&bo->bo_list_node, &vdev->bo_list); mutex_unlock(&vdev->bo_list_lock); - ivpu_dbg_bo(vdev, bo, "alloc"); + ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size); return bo; } @@ -259,8 +287,8 @@ static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file) struct ivpu_addr_range *range; if (bo->ctx) { - ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n", - file_priv->ctx.id, bo->ctx->id); + ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n", + bo, file_priv->ctx.id, bo->ctx->id); return -EALREADY; } @@ -281,20 +309,27 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj) ivpu_dbg_bo(vdev, bo, "free"); + drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node)); + mutex_lock(&vdev->bo_list_lock); list_del(&bo->bo_list_node); - mutex_unlock(&vdev->bo_list_lock); drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) && !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0); drm_WARN_ON(&vdev->drm, bo->base.vaddr); + ivpu_bo_lock(bo); ivpu_bo_unbind_locked(bo); + ivpu_bo_unlock(bo); + + mutex_unlock(&vdev->bo_list_lock); + drm_WARN_ON(&vdev->drm, bo->mmu_mapped); drm_WARN_ON(&vdev->drm, bo->ctx); drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); + drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node); drm_gem_shmem_free(&bo->base); } @@ -320,25 +355,33 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct ivpu_bo *bo; int ret; - if (args->flags & ~DRM_IVPU_BO_FLAGS) + if (args->flags & ~DRM_IVPU_BO_FLAGS) { + ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags); return -EINVAL; + } - if (size == 0) + if (size == 0) { + ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size); return -EINVAL; + } - bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id); + bo = ivpu_bo_alloc(vdev, size, args->flags); if (IS_ERR(bo)) { - ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", + ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n", bo, file_priv->ctx.id, args->size, args->flags); return PTR_ERR(bo); } + drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0); + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); - if (ret) - ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)", + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n", bo, file_priv->ctx.id, args->size, args->flags); - else + } else { args->vpu_addr = bo->vpu_addr; + drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1); + } drm_gem_object_put(&bo->base.base); @@ -360,18 +403,21 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end)); drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); - bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID); + bo = ivpu_bo_alloc(vdev, size, flags); if (IS_ERR(bo)) { - ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", + ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n", bo, range->start, size, flags); return NULL; } ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range); - if (ret) + if (ret) { + ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n", + bo, ctx->id, size, ret); goto err_put; + } - ret = ivpu_bo_pin(bo); + ret = ivpu_bo_bind(bo); if (ret) goto err_put; @@ -391,6 +437,21 @@ err_put: return NULL; } +struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags) +{ + struct ivpu_addr_range range; + + if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size); + return NULL; + } + + if (ivpu_hw_range_init(vdev, &range, addr, size)) + return NULL; + + return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags); +} + struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags) { return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags); diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index aa8ff14f7aae..2dcd7eba9cb7 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_GEM_H__ #define __IVPU_GEM_H__ @@ -24,19 +24,22 @@ struct ivpu_bo { bool mmu_mapped; }; -int ivpu_bo_pin(struct ivpu_bo *bo); +int ivpu_bo_bind(struct ivpu_bo *bo); void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, struct ivpu_addr_range *range, u64 size, u32 flags); +struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags); struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags); void ivpu_bo_free(struct ivpu_bo *bo); int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); void ivpu_bo_list_print(struct drm_device *dev); @@ -74,6 +77,11 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; } +static inline bool ivpu_bo_is_read_only(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_READ_ONLY; +} + static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr) { if (vpu_addr < bo->vpu_addr) @@ -96,4 +104,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr) return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo)); } +static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_MAPPABLE; +} + #endif /* __IVPU_GEM_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem_userptr.c b/drivers/accel/ivpu/ivpu_gem_userptr.c new file mode 100644 index 000000000000..25ba606164c0 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_gem_userptr.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2025 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ivpu_drv.h" +#include "ivpu_gem.h" + +static struct sg_table * +ivpu_gem_userptr_dmabuf_map(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct sg_table *sgt = attachment->dmabuf->priv; + int ret; + + ret = dma_map_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC); + if (ret) + return ERR_PTR(ret); + + return sgt; +} + +static void ivpu_gem_userptr_dmabuf_unmap(struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction) +{ + dma_unmap_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC); +} + +static void ivpu_gem_userptr_dmabuf_release(struct dma_buf *dma_buf) +{ + struct sg_table *sgt = dma_buf->priv; + struct sg_page_iter page_iter; + struct page *page; + + for_each_sgtable_page(sgt, &page_iter, 0) { + page = sg_page_iter_page(&page_iter); + unpin_user_page(page); + } + + sg_free_table(sgt); + kfree(sgt); +} + +static const struct dma_buf_ops ivpu_gem_userptr_dmabuf_ops = { + .map_dma_buf = ivpu_gem_userptr_dmabuf_map, + .unmap_dma_buf = ivpu_gem_userptr_dmabuf_unmap, + .release = ivpu_gem_userptr_dmabuf_release, +}; + +static struct dma_buf * +ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr, + size_t size, uint32_t flags) +{ + struct dma_buf_export_info exp_info = {}; + struct dma_buf *dma_buf; + struct sg_table *sgt; + struct page **pages; + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned int gup_flags = FOLL_LONGTERM; + int ret, i, pinned; + + /* Add FOLL_WRITE only if the BO is not read-only */ + if (!(flags & DRM_IVPU_BO_READ_ONLY)) + gup_flags |= FOLL_WRITE; + + pages = kvmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return ERR_PTR(-ENOMEM); + + pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages); + if (pinned < 0) { + ret = pinned; + ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret); + goto free_pages_array; + } + + if (pinned != nr_pages) { + ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages); + ret = -EFAULT; + goto unpin_pages; + } + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + ret = -ENOMEM; + goto unpin_pages; + } + + ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL); + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret); + goto free_sgt; + } + + exp_info.exp_name = "ivpu_userptr_dmabuf"; + exp_info.owner = THIS_MODULE; + exp_info.ops = &ivpu_gem_userptr_dmabuf_ops; + exp_info.size = size; + exp_info.flags = O_RDWR | O_CLOEXEC; + exp_info.priv = sgt; + + dma_buf = dma_buf_export(&exp_info); + if (IS_ERR(dma_buf)) { + ret = PTR_ERR(dma_buf); + ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret); + goto free_sg_table; + } + + kvfree(pages); + return dma_buf; + +free_sg_table: + sg_free_table(sgt); +free_sgt: + kfree(sgt); +unpin_pages: + for (i = 0; i < pinned; i++) + unpin_user_page(pages[i]); +free_pages_array: + kvfree(pages); + return ERR_PTR(ret); +} + +static struct ivpu_bo * +ivpu_bo_create_from_userptr(struct ivpu_device *vdev, void __user *user_ptr, + size_t size, uint32_t flags) +{ + struct dma_buf *dma_buf; + struct drm_gem_object *obj; + struct ivpu_bo *bo; + + dma_buf = ivpu_create_userptr_dmabuf(vdev, user_ptr, size, flags); + if (IS_ERR(dma_buf)) + return ERR_CAST(dma_buf); + + obj = ivpu_gem_prime_import(&vdev->drm, dma_buf); + if (IS_ERR(obj)) { + dma_buf_put(dma_buf); + return ERR_CAST(obj); + } + + dma_buf_put(dma_buf); + + bo = to_ivpu_bo(obj); + bo->flags = flags; + + return bo; +} + +int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_ivpu_bo_create_from_userptr *args = data; + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = to_ivpu_device(dev); + void __user *user_ptr = u64_to_user_ptr(args->user_ptr); + struct ivpu_bo *bo; + int ret; + + if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) { + ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags); + return -EINVAL; + } + + if (!args->user_ptr || !args->size) { + ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EINVAL; + } + + if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) { + ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EINVAL; + } + + if (!access_ok(user_ptr, args->size)) { + ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EFAULT; + } + + bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n", + bo, file_priv->ctx.id, args->size, args->flags); + } else { + ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n", + args->handle, bo->vpu_addr, args->size, bo->flags); + args->vpu_addr = bo->vpu_addr; + } + + drm_gem_object_put(&bo->base.base); + + return ret; +} diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index 08dcc31b56f4..d69cd0d93569 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -8,6 +8,8 @@ #include "ivpu_hw_btrs.h" #include "ivpu_hw_ip.h" +#include +#include #include #include #include @@ -20,6 +22,10 @@ module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444); MODULE_PARM_DESC(fail_hw, ",,,"); #endif +#define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */ + +#define ECC_MCA_SIGNAL_ENABLE_MASK 0xff + static char *platform_to_str(u32 platform) { switch (platform) { @@ -147,19 +153,39 @@ static void priority_bands_init(struct ivpu_device *vdev) vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000; } +int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size) +{ + u64 end; + + if (!range || check_add_overflow(start, size, &end)) { + ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size); + return -EINVAL; + } + + range->start = start; + range->end = end; + + return 0; +} + static void memory_ranges_init(struct ivpu_device *vdev) { if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { - ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M); - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G); - ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G); } else { - ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G); - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G); vdev->hw->ranges.dma = vdev->hw->ranges.user; } + + drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start, + FW_SHARED_MEM_ALIGNMENT)); } static int wp_enable(struct ivpu_device *vdev) @@ -373,3 +399,22 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr) pm_runtime_mark_last_busy(vdev->drm.dev); return IRQ_HANDLED; } + +bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev) +{ + unsigned long long msr_integrity_caps; + int ret; + + if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX) + return false; + + ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps); + if (ret) { + ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret); + return false; + } + + ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps); + + return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK; +} diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index d79668fe1609..b6d0f0d0dccc 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -21,6 +21,7 @@ struct ivpu_hw_info { bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq); } irq; struct { + struct ivpu_addr_range runtime; struct ivpu_addr_range global; struct ivpu_addr_range user; struct ivpu_addr_range shave; @@ -51,6 +52,8 @@ struct ivpu_hw_info { }; int ivpu_hw_init(struct ivpu_device *vdev); +int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, + u64 size); int ivpu_hw_power_up(struct ivpu_device *vdev); int ivpu_hw_power_down(struct ivpu_device *vdev); int ivpu_hw_reset(struct ivpu_device *vdev); @@ -60,6 +63,7 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev); void ivpu_hw_irq_enable(struct ivpu_device *vdev); void ivpu_hw_irq_disable(struct ivpu_device *vdev); irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr); +bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev); static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq) { @@ -71,12 +75,6 @@ static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq) return vdev->hw->irq.ip_irq_handler(vdev, irq); } -static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size) -{ - range->start = start; - range->end = start + size; -} - static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range) { return range->end - range->start; diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c index afdb3b2aa72a..06e65c592618 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.c +++ b/drivers/accel/ivpu/ivpu_hw_btrs.c @@ -321,6 +321,14 @@ static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable) return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); } +static int wait_for_cdyn_deassert(struct ivpu_device *vdev) +{ + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) + return 0; + + return REGB_POLL_FLD(VPU_HW_BTRS_LNL_CDYN, CDYN, 0, PLL_TIMEOUT_US); +} + int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) { struct wp_request wp; @@ -354,6 +362,14 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) return ret; } + if (!enable) { + ret = wait_for_cdyn_deassert(vdev); + if (ret) { + ivpu_err(vdev, "Timed out waiting for CDYN deassert\n"); + return ret; + } + } + return 0; } @@ -673,7 +689,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq) if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) { ivpu_dbg(vdev, IRQ, "Survivability IRQ\n"); - queue_work(system_wq, &vdev->irq_dct_work); + queue_work(system_percpu_wq, &vdev->irq_dct_work); } if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) { @@ -752,7 +768,7 @@ int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable) } } -void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent) +void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent) { u32 val = 0; u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE; diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h index 032c384ac3d4..c4c10e22f30f 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.h +++ b/drivers/accel/ivpu/ivpu_hw_btrs.h @@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev); bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq); bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq); int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable); -void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent); +void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent); u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev); u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev); u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h index fff2ef2cada6..a81a9ba540fa 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h @@ -74,6 +74,9 @@ #define VPU_HW_BTRS_LNL_PLL_FREQ 0x00000148u #define VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK GENMASK(15, 0) +#define VPU_HW_BTRS_LNL_CDYN 0x0000014cu +#define VPU_HW_BTRS_LNL_CDYN_CDYN_MASK GENMASK(15, 0) + #define VPU_HW_BTRS_LNL_TILE_FUSE 0x00000150u #define VPU_HW_BTRS_LNL_TILE_FUSE_VALID_MASK BIT_MASK(0) #define VPU_HW_BTRS_LNL_TILE_FUSE_CONFIG_MASK GENMASK(6, 1) diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c index 2bf9882ab52e..06aa1e7dc50b 100644 --- a/drivers/accel/ivpu/ivpu_hw_ip.c +++ b/drivers/accel/ivpu/ivpu_hw_ip.c @@ -691,6 +691,13 @@ static void pwr_island_delay_set(struct ivpu_device *vdev) status = high ? 46 : 3; break; + case PCI_DEVICE_ID_NVL: + post = high ? 198 : 17; + post1 = 0; + post2 = high ? 198 : 17; + status = 0; + break; + default: dump_stack(); ivpu_err(vdev, "Unknown device ID\n"); @@ -889,6 +896,9 @@ static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable) static int soc_cpu_enable(struct ivpu_device *vdev) { + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX) + return 0; + return soc_cpu_drive_40xx(vdev, true); } diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 5f00809d448a..1f13bf95b2b3 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -459,7 +459,7 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev) } } - queue_work(system_wq, &vdev->irq_ipc_work); + queue_work(system_percpu_wq, &vdev->irq_ipc_work); } void ivpu_ipc_irq_work_fn(struct work_struct *work) diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 060f1fc031d3..4f8564e2878a 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE); - u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE); - - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW || - ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) + if (ivpu_fw_preempt_buf_size(vdev) == 0) return 0; cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, - primary_size, DRM_IVPU_BO_WC); + vdev->fw->primary_preempt_buf_size, + DRM_IVPU_BO_WC); if (!cmdq->primary_preempt_buf) { ivpu_err(vdev, "Failed to create primary preemption buffer\n"); return -ENOMEM; } cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, - secondary_size, DRM_IVPU_BO_WC); + vdev->fw->secondary_preempt_buf_size, + DRM_IVPU_BO_WC); if (!cmdq->secondary_preempt_buf) { ivpu_err(vdev, "Failed to create secondary preemption buffer\n"); goto err_free_primary; @@ -66,20 +64,39 @@ err_free_primary: static void ivpu_preemption_buffers_free(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) - return; - if (cmdq->primary_preempt_buf) ivpu_bo_free(cmdq->primary_preempt_buf); if (cmdq->secondary_preempt_buf) ivpu_bo_free(cmdq->secondary_preempt_buf); } +static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, + struct ivpu_cmdq *cmdq, struct ivpu_job *job) +{ + int ret; + + /* Use preemption buffer provided by the user space */ + if (job->primary_preempt_buf) + return 0; + + if (!cmdq->primary_preempt_buf) { + /* Allocate per command queue preemption buffers */ + ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); + if (ret) + return ret; + } + + /* Use preemption buffers allocated by the kernel */ + job->primary_preempt_buf = cmdq->primary_preempt_buf; + job->secondary_preempt_buf = cmdq->secondary_preempt_buf; + + return 0; +} + static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_cmdq *cmdq; - int ret; cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); if (!cmdq) @@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) if (!cmdq->mem) goto err_free_cmdq; - ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); - if (ret) - ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n"); - return cmdq; err_free_cmdq: @@ -219,11 +232,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq * ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); - if (!ret) + if (!ret) { ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n", cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority); - else + } else { xa_erase(&vdev->db_xa, cmdq->db_id); + cmdq->db_id = 0; + } return ret; } @@ -333,7 +348,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id); if (!cmdq) { - ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id); + ivpu_dbg(vdev, IOCTL, "Failed to find command queue with ID: %u\n", cmdq_id); return NULL; } @@ -427,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION)) entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; - if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { - if (cmdq->primary_preempt_buf) { - entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; - entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); - } + if (job->primary_preempt_buf) { + entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr; + entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf); + } - if (cmdq->secondary_preempt_buf) { - entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; - entry->secondary_preempt_buf_size = - ivpu_bo_size(cmdq->secondary_preempt_buf); - } + if (job->secondary_preempt_buf) { + entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr; + entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf); } wmb(); /* Ensure that tail is updated after filling entry */ @@ -522,7 +534,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) job->bo_count = bo_count; job->done_fence = ivpu_fence_create(vdev); if (!job->done_fence) { - ivpu_warn_ratelimited(vdev, "Failed to create a fence\n"); + ivpu_err(vdev, "Failed to create a fence\n"); goto err_free_job; } @@ -552,6 +564,44 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device * return job; } +bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status) +{ + lockdep_assert_held(&vdev->submitted_jobs_lock); + + switch (job_status) { + case VPU_JSM_STATUS_PROCESSING_ERR: + case VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN ... VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX: + { + struct ivpu_job *job = xa_load(&vdev->submitted_jobs_xa, job_id); + + if (!job) + return false; + + /* Trigger an engine reset */ + guard(mutex)(&job->file_priv->lock); + + job->job_status = job_status; + + if (job->file_priv->has_mmu_faults) + return false; + + /* + * Mark context as faulty and defer destruction of the job to jobs abort thread + * handler to synchronize between both faults and jobs returning context violation + * status and ensure both are handled in the same way + */ + job->file_priv->has_mmu_faults = true; + queue_work(system_percpu_wq, &vdev->context_abort_work); + return true; + } + default: + /* Complete job with error status, engine reset not required */ + break; + } + + return false; +} + static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status) { struct ivpu_job *job; @@ -562,35 +612,22 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 if (!job) return -ENOENT; - if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) { - guard(mutex)(&job->file_priv->lock); + ivpu_job_remove_from_submitted_jobs(vdev, job_id); + if (job->job_status == VPU_JSM_STATUS_SUCCESS) { if (job->file_priv->has_mmu_faults) - return 0; - - /* - * Mark context as faulty and defer destruction of the job to jobs abort thread - * handler to synchronize between both faults and jobs returning context violation - * status and ensure both are handled in the same way - */ - job->file_priv->has_mmu_faults = true; - queue_work(system_wq, &vdev->context_abort_work); - return 0; + job->job_status = DRM_IVPU_JOB_STATUS_ABORTED; + else + job->job_status = job_status; } - job = ivpu_job_remove_from_submitted_jobs(vdev, job_id); - if (!job) - return -ENOENT; - - if (job->file_priv->has_mmu_faults) - job_status = DRM_IVPU_JOB_STATUS_ABORTED; - - job->bos[CMD_BUF_IDX]->job_status = job_status; + job->bos[CMD_BUF_IDX]->job_status = job->job_status; dma_fence_signal(job->done_fence); trace_job("done", job); ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n", - job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status); + job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, + job->job_status); ivpu_job_destroy(job); ivpu_stop_job_timeout_detection(vdev); @@ -650,7 +687,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) else cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id); if (!cmdq) { - ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id); ret = -EINVAL; goto err_unlock; } @@ -661,6 +697,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) goto err_unlock; } + ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job); + if (ret) { + ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n", + job->job_id, ret); + goto err_unlock; + } + job->cmdq_id = cmdq->id; is_first_job = xa_empty(&vdev->submitted_jobs_xa); @@ -714,7 +757,7 @@ err_unlock: static int ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, - u32 buf_count, u32 commands_offset) + u32 buf_count, u32 commands_offset, u32 preempt_buffer_index) { struct ivpu_file_priv *file_priv = job->file_priv; struct ivpu_device *vdev = file_priv->vdev; @@ -727,40 +770,58 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 for (i = 0; i < buf_count; i++) { struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]); - if (!obj) + if (!obj) { + ivpu_dbg(vdev, IOCTL, "Failed to lookup GEM object with handle %u\n", + buf_handles[i]); return -ENOENT; + } job->bos[i] = to_ivpu_bo(obj); - ret = ivpu_bo_pin(job->bos[i]); + ret = ivpu_bo_bind(job->bos[i]); if (ret) return ret; } bo = job->bos[CMD_BUF_IDX]; if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) { - ivpu_warn(vdev, "Buffer is already in use\n"); + ivpu_dbg(vdev, IOCTL, "Buffer is already in use by another job\n"); return -EBUSY; } if (commands_offset >= ivpu_bo_size(bo)) { - ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset); + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u for buffer size %zu\n", + commands_offset, ivpu_bo_size(bo)); return -EINVAL; } job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; + if (preempt_buffer_index) { + struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index]; + + if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) { + ivpu_dbg(vdev, IOCTL, "Preemption buffer is too small\n"); + return -EINVAL; + } + if (ivpu_bo_is_mappable(preempt_bo)) { + ivpu_dbg(vdev, IOCTL, "Preemption buffer cannot be mappable\n"); + return -EINVAL; + } + job->primary_preempt_buf = preempt_bo; + } + ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); if (ret) { - ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); + ivpu_warn_ratelimited(vdev, "Failed to lock reservations: %d\n", ret); return ret; } for (i = 0; i < buf_count; i++) { ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1); if (ret) { - ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); + ivpu_warn_ratelimited(vdev, "Failed to reserve fences: %d\n", ret); goto unlock_reservations; } } @@ -780,7 +841,7 @@ unlock_reservations: static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id, u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset, - u8 priority) + u32 preempt_buffer_index, u8 priority) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_job *job; @@ -807,16 +868,14 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, job = ivpu_job_create(file_priv, engine, buffer_count); if (!job) { - ivpu_err(vdev, "Failed to create job\n"); ret = -ENOMEM; goto err_exit_dev; } - ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset); - if (ret) { - ivpu_err(vdev, "Failed to prepare job: %d\n", ret); + ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset, + preempt_buffer_index); + if (ret) goto err_destroy_job; - } down_read(&vdev->pm->reset_lock); ret = ivpu_job_submit(job, priority, cmdq_id); @@ -842,58 +901,91 @@ err_free_handles: int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_submit *args = data; u8 priority; - if (args->engine != DRM_IVPU_ENGINE_COMPUTE) + if (args->engine != DRM_IVPU_ENGINE_COMPUTE) { + ivpu_dbg(vdev, IOCTL, "Invalid engine %d\n", args->engine); return -EINVAL; + } - if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) + if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) { + ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority); return -EINVAL; + } - if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) + if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) { + ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count); return -EINVAL; + } - if (!IS_ALIGNED(args->commands_offset, 8)) + if (!IS_ALIGNED(args->commands_offset, 8)) { + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset); return -EINVAL; + } - if (!file_priv->ctx.id) + if (!file_priv->ctx.id) { + ivpu_dbg(vdev, IOCTL, "Context not initialized\n"); return -EINVAL; + } - if (file_priv->has_mmu_faults) + if (file_priv->has_mmu_faults) { + ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id); return -EBADFD; + } priority = ivpu_job_to_jsm_priority(args->priority); return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine, - (void __user *)args->buffers_ptr, args->commands_offset, priority); + (void __user *)args->buffers_ptr, args->commands_offset, 0, priority); } int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_cmdq_submit *args = data; - if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } - if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) + if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) { + ivpu_dbg(vdev, IOCTL, "Invalid command queue ID %u\n", args->cmdq_id); return -EINVAL; + } - if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) + if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) { + ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count); return -EINVAL; + } - if (!IS_ALIGNED(args->commands_offset, 8)) + if (args->preempt_buffer_index >= args->buffer_count) { + ivpu_dbg(vdev, IOCTL, "Invalid preemption buffer index %u\n", + args->preempt_buffer_index); return -EINVAL; + } - if (!file_priv->ctx.id) + if (!IS_ALIGNED(args->commands_offset, 8)) { + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset); return -EINVAL; + } - if (file_priv->has_mmu_faults) + if (!file_priv->ctx.id) { + ivpu_dbg(vdev, IOCTL, "Context not initialized\n"); + return -EINVAL; + } + + if (file_priv->has_mmu_faults) { + ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id); return -EBADFD; + } return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE, - (void __user *)args->buffers_ptr, args->commands_offset, 0); + (void __user *)args->buffers_ptr, args->commands_offset, + args->preempt_buffer_index, 0); } int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -904,11 +996,15 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file * struct ivpu_cmdq *cmdq; int ret; - if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } - if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) + if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) { + ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority); return -EINVAL; + } ret = ivpu_rpm_get(vdev); if (ret < 0) @@ -936,8 +1032,10 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file u32 cmdq_id = 0; int ret; - if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } ret = ivpu_rpm_get(vdev); if (ret < 0) @@ -984,7 +1082,9 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload; mutex_lock(&vdev->submitted_jobs_lock); - ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status); + if (!ivpu_job_handle_engine_error(vdev, payload->job_id, payload->job_status)) + /* No engine error, complete the job normally */ + ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status); mutex_unlock(&vdev->submitted_jobs_lock); } @@ -1012,7 +1112,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work) if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) if (ivpu_jsm_reset_engine(vdev, 0)) - return; + goto runtime_put; mutex_lock(&vdev->context_list_lock); xa_for_each(&vdev->context_xa, ctx_id, file_priv) { @@ -1036,7 +1136,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work) goto runtime_put; if (ivpu_jsm_hws_resume_engine(vdev, 0)) - return; + goto runtime_put; /* * In hardware scheduling mode NPU already has stopped processing jobs * and won't send us any further notifications, thus we have to free job related resources @@ -1049,6 +1149,5 @@ void ivpu_context_abort_work_fn(struct work_struct *work) mutex_unlock(&vdev->submitted_jobs_lock); runtime_put: - pm_runtime_mark_last_busy(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev); } diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h index 2e301c2eea7b..3ab61e6a5616 100644 --- a/drivers/accel/ivpu/ivpu_job.h +++ b/drivers/accel/ivpu/ivpu_job.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_JOB_H__ @@ -15,12 +15,17 @@ struct ivpu_device; struct ivpu_file_priv; /** - * struct ivpu_cmdq - Object representing device queue used to send jobs. - * @jobq: Pointer to job queue memory shared with the device - * @mem: Memory allocated for the job queue, shared with device - * @entry_count Number of job entries in the queue - * @db_id: Doorbell assigned to this job queue - * @db_registered: True if doorbell is registered in device + * struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU. + * Tracks queue memory, preemption buffers, and metadata for job management. + * @jobq: Pointer to job queue memory shared with the device + * @primary_preempt_buf: Primary preemption buffer for this queue (optional) + * @secondary_preempt_buf: Secondary preemption buffer for this queue (optional) + * @mem: Memory allocated for the job queue, shared with device + * @entry_count: Number of job entries in the queue + * @id: Unique command queue ID + * @db_id: Doorbell ID assigned to this job queue + * @priority: Priority level of the command queue + * @is_legacy: True if this is a legacy command queue */ struct ivpu_cmdq { struct vpu_job_queue *jobq; @@ -35,16 +40,22 @@ struct ivpu_cmdq { }; /** - * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer. - * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW. - * This is a unit of execution, and be tracked by the job_id for - * any status reporting from VPU FW through IPC JOB RET/DONE message. - * @file_priv: The client that submitted this job - * @job_id: Job ID for KMD tracking and job status reporting from VPU FW - * @status: Status of the Job from IPC JOB RET/DONE message - * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job - * @submit_status_offset: Offset within batch buffer where job completion handler - will update the job status + * struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU. + * Each job is a unit of execution, tracked by job_id for status reporting from VPU FW. + * The structure holds all resources and metadata needed for job submission, execution, + * and completion handling. + * @vdev: Pointer to the VPU device + * @file_priv: The client context that submitted this job + * @done_fence: Fence signaled when job completes + * @cmd_buf_vpu_addr: VPU address of the command buffer for this job + * @cmdq_id: Command queue ID used for submission + * @job_id: Unique job ID for tracking and status reporting + * @engine_idx: Engine index for job execution + * @job_status: Status reported by firmware for this job + * @primary_preempt_buf: Primary preemption buffer for job + * @secondary_preempt_buf: Secondary preemption buffer for job (optional) + * @bo_count: Number of buffer objects associated with this job + * @bos: Array of buffer objects used by the job (batch buffer is at index 0) */ struct ivpu_job { struct ivpu_device *vdev; @@ -54,6 +65,9 @@ struct ivpu_job { u32 cmdq_id; u32 job_id; u32 engine_idx; + u32 job_status; + struct ivpu_bo *primary_preempt_buf; + struct ivpu_bo *secondary_preempt_buf; size_t bo_count; struct ivpu_bo *bos[] __counted_by(bo_count); }; @@ -71,6 +85,7 @@ void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id) void ivpu_job_done_consumer_init(struct ivpu_device *vdev); void ivpu_job_done_consumer_fini(struct ivpu_device *vdev); +bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status); void ivpu_context_abort_work_fn(struct work_struct *work); void ivpu_jobs_abort_all(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index 5ea010568faa..e1baf6b64935 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -970,7 +970,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) } } - queue_work(system_wq, &vdev->context_abort_work); + queue_work(system_percpu_wq, &vdev->context_abort_work); } void ivpu_mmu_evtq_dump(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index f0267efa55aa..87ad593ef47d 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -430,7 +430,7 @@ static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_a int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, - u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) + u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only) { size_t start_vpu_addr = vpu_addr; struct scatterlist *sg; @@ -450,6 +450,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, prot = IVPU_MMU_ENTRY_MAPPED; if (llc_coherent) prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; + if (read_only) + prot |= IVPU_MMU_ENTRY_FLAG_RO; mutex_lock(&ctx->lock); @@ -527,7 +529,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); if (ret) - ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); + ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n", + ctx->id, ret); } int @@ -568,7 +571,7 @@ void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ct mutex_init(&ctx->lock); if (!context_id) { - start = vdev->hw->ranges.global.start; + start = vdev->hw->ranges.runtime.start; end = vdev->hw->ranges.shave.end; } else { start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start); diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h index f255310968cf..663a11a9db11 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.h +++ b/drivers/accel/ivpu/ivpu_mmu_context.h @@ -42,7 +42,7 @@ int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node); int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, - u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); + u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only); void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, struct sg_table *sgt); int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c index 2a043baf10ca..1d9c1cb17924 100644 --- a/drivers/accel/ivpu/ivpu_ms.c +++ b/drivers/accel/ivpu/ivpu_ms.c @@ -8,6 +8,7 @@ #include "ivpu_drv.h" #include "ivpu_gem.h" +#include "ivpu_hw.h" #include "ivpu_jsm_msg.h" #include "ivpu_ms.h" #include "ivpu_pm.h" @@ -37,8 +38,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_ivpu_metric_streamer_start *args = data; struct ivpu_device *vdev = file_priv->vdev; struct ivpu_ms_instance *ms; - u64 single_buff_size; u32 sample_size; + u64 buf_size; int ret; if (!args->metric_group_mask || !args->read_period_samples || @@ -52,7 +53,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil mutex_lock(&file_priv->ms_lock); if (get_instance_by_mask(file_priv, args->metric_group_mask)) { - ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask); + ivpu_dbg(vdev, IOCTL, "Instance already exists (mask %#llx)\n", + args->metric_group_mask); ret = -EALREADY; goto unlock; } @@ -69,12 +71,18 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil if (ret) goto err_free_ms; - single_buff_size = sample_size * - ((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER); - ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS), - DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); + buf_size = PAGE_ALIGN((u64)args->read_period_samples * sample_size * + MS_READ_PERIOD_MULTIPLIER * MS_NUM_BUFFERS); + if (buf_size > ivpu_hw_range_size(&vdev->hw->ranges.global)) { + ivpu_dbg(vdev, IOCTL, "Requested MS buffer size %llu exceeds range size %llu\n", + buf_size, ivpu_hw_range_size(&vdev->hw->ranges.global)); + ret = -EINVAL; + goto err_free_ms; + } + + ms->bo = ivpu_bo_create_global(vdev, buf_size, DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); if (!ms->bo) { - ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size); + ivpu_dbg(vdev, IOCTL, "Failed to allocate MS buffer (size %llu)\n", buf_size); ret = -ENOMEM; goto err_free_ms; } @@ -175,7 +183,8 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file * ms = get_instance_by_mask(file_priv, args->metric_group_mask); if (!ms) { - ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask); + ivpu_dbg(vdev, IOCTL, "Instance doesn't exist for mask: %#llx\n", + args->metric_group_mask); ret = -EINVAL; goto unlock; } diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 475ddc94f1cf..480c075d87f6 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -54,7 +54,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; - struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem); + struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp); if (!bp->save_restore_ret_address) { ivpu_pm_prepare_cold_boot(vdev); @@ -186,7 +186,7 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason) if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) { ivpu_hw_diagnose_failure(vdev); ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */ - queue_work(system_unbound_wq, &vdev->pm->recovery_work); + queue_work(system_dfl_wq, &vdev->pm->recovery_work); } } @@ -226,7 +226,8 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev) unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; /* No-op if already queued */ - queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms)); + queue_delayed_work(system_percpu_wq, &vdev->pm->job_timeout_work, + msecs_to_jiffies(timeout_ms)); } void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev) @@ -359,7 +360,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev) void ivpu_rpm_put(struct ivpu_device *vdev) { - pm_runtime_mark_last_busy(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev); } @@ -428,7 +428,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev) struct device *dev = vdev->drm.dev; pm_runtime_allow(dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -502,6 +501,11 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work) else ret = ivpu_pm_dct_disable(vdev); - if (!ret) - ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent); + if (!ret) { + /* Convert percent to U1.7 format */ + u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100); + + ivpu_hw_btrs_dct_set_status(vdev, enable, val); + } + } diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h index 4b6b2b3d2583..bca6a44dc041 100644 --- a/drivers/accel/ivpu/vpu_jsm_api.h +++ b/drivers/accel/ivpu/vpu_jsm_api.h @@ -1,15 +1,16 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright (c) 2020-2024, Intel Corporation. + * Copyright (c) 2020-2025, Intel Corporation. + */ + +/** + * @addtogroup Jsm + * @{ */ /** * @file * @brief JSM shared definitions - * - * @ingroup Jsm - * @brief JSM shared definitions - * @{ */ #ifndef VPU_JSM_API_H #define VPU_JSM_API_H @@ -22,7 +23,7 @@ /* * Minor version changes when API backward compatibility is preserved. */ -#define VPU_JSM_API_VER_MINOR 29 +#define VPU_JSM_API_VER_MINOR 33 /* * API header changed (field names, documentation, formatting) but API itself has not been changed @@ -71,9 +72,15 @@ #define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU #define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU #define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU -/* Job status returned when the job was preempted mid-inference */ +/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */ #define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU +/* Job status returned when the job was preempted mid-command */ +#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU +/* Range of status codes that require engine reset */ +#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN 0xEU #define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU +#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU +#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX 0x1FU /* * Host <-> VPU IPC channels. @@ -134,11 +141,21 @@ enum { * 2. Native fence queues are only supported on VPU 40xx onwards. */ VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U), - /* * Enable turbo mode for testing NPU performance; not recommended for regular usage. */ - VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U) + VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U), + /* + * Queue error detection mode flag + * For 'interactive' queues (this bit not set), the FW will identify queues that have not + * completed a job inside the TDR timeout as in error as part of engine reset sequence. + * For 'non-interactive' queues (this bit set), the FW will identify queues that have not + * progressed the heartbeat inside the non-interactive no-progress timeout as in error as + * part of engine reset sequence. Additionally, there is an upper limit applied to these + * queues: even if they progress the heartbeat, if they run longer than non-interactive + * timeout, then the FW will also identify them as in error. + */ + VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U) }; /* @@ -209,7 +226,7 @@ enum { */ #define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2 -/* +/** * Job scheduling priority bands for both hardware scheduling and OS scheduling. */ enum vpu_job_scheduling_priority_band { @@ -220,16 +237,16 @@ enum vpu_job_scheduling_priority_band { VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4, }; -/* +/** * Job format. * Jobs defines the actual workloads to be executed by a given engine. */ struct vpu_job_queue_entry { - /**< Address of VPU commands batch buffer */ + /** Address of VPU commands batch buffer */ u64 batch_buf_addr; - /**< Job ID */ + /** Job ID */ u32 job_id; - /**< Flags bit field, see VPU_JOB_FLAGS_* above */ + /** Flags bit field, see VPU_JOB_FLAGS_* above */ u32 flags; /** * Doorbell ring timestamp taken by KMD from SoC's global system clock, in @@ -237,20 +254,20 @@ struct vpu_job_queue_entry { * to match other profiling timestamps. */ u64 doorbell_timestamp; - /**< Extra id for job tracking, used only in the firmware perf traces */ + /** Extra id for job tracking, used only in the firmware perf traces */ u64 host_tracking_id; - /**< Address of the primary preemption buffer to use for this job */ + /** Address of the primary preemption buffer to use for this job */ u64 primary_preempt_buf_addr; - /**< Size of the primary preemption buffer to use for this job */ + /** Size of the primary preemption buffer to use for this job */ u32 primary_preempt_buf_size; - /**< Size of secondary preemption buffer to use for this job */ + /** Size of secondary preemption buffer to use for this job */ u32 secondary_preempt_buf_size; - /**< Address of secondary preemption buffer to use for this job */ + /** Address of secondary preemption buffer to use for this job */ u64 secondary_preempt_buf_addr; u64 reserved_0; }; -/* +/** * Inline command format. * Inline commands are the commands executed at scheduler level (typically, * synchronization directives). Inline command and job objects must be of @@ -258,34 +275,36 @@ struct vpu_job_queue_entry { */ struct vpu_inline_cmd { u64 reserved_0; - /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ + /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ u32 type; - /* Flags bit field, see VPU_JOB_FLAGS_* above. */ + /** Flags bit field, see VPU_JOB_FLAGS_* above. */ u32 flags; - /* Inline command payload. Depends on inline command type. */ - union { - /* Fence (wait and signal) commands' payload. */ - struct { - /* Fence object handle. */ + /** Inline command payload. Depends on inline command type. */ + union payload { + /** Fence (wait and signal) commands' payload. */ + struct fence { + /** Fence object handle. */ u64 fence_handle; - /* User VA of the current fence value. */ + /** User VA of the current fence value. */ u64 current_value_va; - /* User VA of the monitored fence value (read-only). */ + /** User VA of the monitored fence value (read-only). */ u64 monitored_value_va; - /* Value to wait for or write in fence location. */ + /** Value to wait for or write in fence location. */ u64 value; - /* User VA of the log buffer in which to add log entry on completion. */ + /** User VA of the log buffer in which to add log entry on completion. */ u64 log_buffer_va; - /* NPU private data. */ + /** NPU private data. */ u64 npu_private_data; } fence; - /* Other commands do not have a payload. */ - /* Payload definition for future inline commands can be inserted here. */ + /** + * Other commands do not have a payload: + * Payload definition for future inline commands can be inserted here. + */ u64 reserved_1[6]; } payload; }; -/* +/** * Job queue slots can be populated either with job objects or inline command objects. */ union vpu_jobq_slot { @@ -293,7 +312,7 @@ union vpu_jobq_slot { struct vpu_inline_cmd inline_cmd; }; -/* +/** * Job queue control registers. */ struct vpu_job_queue_header { @@ -301,18 +320,18 @@ struct vpu_job_queue_header { u32 head; u32 tail; u32 flags; - /* Set to 1 to indicate priority_band field is valid */ + /** Set to 1 to indicate priority_band field is valid */ u32 priority_band_valid; - /* + /** * Priority for the work of this job queue, valid only if the HWS is NOT used - * and the `priority_band_valid` is set to 1. It is applied only during - * the VPU_JSM_MSG_REGISTER_DB message processing. - * The device firmware might use the `priority_band` to optimize the power + * and the @ref priority_band_valid is set to 1. It is applied only during + * the @ref VPU_JSM_MSG_REGISTER_DB message processing. + * The device firmware might use the priority_band to optimize the power * management logic, but it will not affect the order of jobs. * Available priority bands: @see enum vpu_job_scheduling_priority_band */ u32 priority_band; - /* Inside realtime band assigns a further priority, limited to 0..31 range */ + /** Inside realtime band assigns a further priority, limited to 0..31 range */ u32 realtime_priority_level; u32 reserved_0[9]; }; @@ -337,16 +356,16 @@ enum vpu_trace_entity_type { VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2, }; -/* +/** * HWS specific log buffer header details. * Total size is 32 bytes. */ struct vpu_hws_log_buffer_header { - /* Written by VPU after adding a log entry. Initialised by host to 0. */ + /** Written by VPU after adding a log entry. Initialised by host to 0. */ u32 first_free_entry_index; - /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ + /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ u32 wraparound_count; - /* + /** * This is the number of buffers that can be stored in the log buffer provided by the host. * It is written by host before passing buffer to VPU. VPU should consider it read-only. */ @@ -354,14 +373,14 @@ struct vpu_hws_log_buffer_header { u64 reserved[2]; }; -/* +/** * HWS specific log buffer entry details. * Total size is 32 bytes. */ struct vpu_hws_log_buffer_entry { - /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ + /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ u64 vpu_timestamp; - /* + /** * Operation type: * 0 - context state change * 1 - queue new work @@ -371,7 +390,7 @@ struct vpu_hws_log_buffer_entry { */ u32 operation_type; u32 reserved; - /* Operation data depends on operation type */ + /** Operation data depends on operation type */ u64 operation_data[2]; }; @@ -381,51 +400,54 @@ enum vpu_hws_native_fence_log_type { VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2 }; -/* HWS native fence log buffer header. */ +/** HWS native fence log buffer header. */ struct vpu_hws_native_fence_log_header { union { struct { - /* Index of the first free entry in buffer. */ + /** Index of the first free entry in buffer. */ u32 first_free_entry_idx; - /* Incremented each time NPU wraps around the buffer to write next entry. */ + /** + * Incremented whenever the NPU wraps around the buffer and writes + * to the first entry again. + */ u32 wraparound_count; }; - /* Field allowing atomic update of both fields above. */ + /** Field allowing atomic update of both fields above. */ u64 atomic_wraparound_and_entry_idx; }; - /* Log buffer type, see enum vpu_hws_native_fence_log_type. */ + /** Log buffer type, see enum vpu_hws_native_fence_log_type. */ u64 type; - /* Allocated number of entries in the log buffer. */ + /** Allocated number of entries in the log buffer. */ u64 entry_nb; u64 reserved[2]; }; -/* Native fence log operation types. */ +/** Native fence log operation types. */ enum vpu_hws_native_fence_log_op { VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0, VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1 }; -/* HWS native fence log entry. */ +/** HWS native fence log entry. */ struct vpu_hws_native_fence_log_entry { - /* Newly signaled/unblocked fence value. */ + /** Newly signaled/unblocked fence value. */ u64 fence_value; - /* Native fence object handle to which this operation belongs. */ + /** Native fence object handle to which this operation belongs. */ u64 fence_handle; - /* Operation type, see enum vpu_hws_native_fence_log_op. */ + /** Operation type, see enum vpu_hws_native_fence_log_op. */ u64 op_type; u64 reserved_0; - /* + /** * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence * wait was started (in NPU SysTime). */ u64 fence_wait_start_ts; u64 reserved_1; - /* Timestamp at which fence operation was completed (in NPU SysTime). */ + /** Timestamp at which fence operation was completed (in NPU SysTime). */ u64 fence_end_ts; }; -/* Native fence log buffer. */ +/** Native fence log buffer. */ struct vpu_hws_native_fence_log_buffer { struct vpu_hws_native_fence_log_header header; struct vpu_hws_native_fence_log_entry entry[]; @@ -435,10 +457,17 @@ struct vpu_hws_native_fence_log_buffer { * Host <-> VPU IPC messages types. */ enum vpu_ipc_msg_type { + /** Unsupported command */ VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF, - /* IPC Host -> Device, Async commands */ + /** IPC Host -> Device, base id for async commands */ VPU_JSM_MSG_ASYNC_CMD = 0x1100, + /** + * Reset engine. The NPU cancels all the jobs currently executing on the target + * engine making the engine become idle and then does a HW reset, before returning + * to the host. + * @see struct vpu_ipc_msg_payload_engine_reset + */ VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD, /** * Preempt engine. The NPU stops (preempts) all the jobs currently @@ -448,10 +477,24 @@ enum vpu_ipc_msg_type { * the target engine, but it stops processing them (until the queue doorbell * is rung again); the host is responsible to reset the job queue, either * after preemption or when resubmitting jobs to the queue. + * @see vpu_ipc_msg_payload_engine_preempt */ VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101, + /** + * OS scheduling doorbell register command + * @see vpu_ipc_msg_payload_register_db + */ VPU_JSM_MSG_REGISTER_DB = 0x1102, + /** + * OS scheduling doorbell unregister command + * @see vpu_ipc_msg_payload_unregister_db + */ VPU_JSM_MSG_UNREGISTER_DB = 0x1103, + /** + * Query engine heartbeat. Heartbeat is expected to increase monotonically + * and increase while work is being progressed by NPU. + * @see vpu_ipc_msg_payload_query_engine_hb + */ VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104, VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105, VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106, @@ -477,6 +520,7 @@ enum vpu_ipc_msg_type { * aborted and removed from internal scheduling queues. All doorbells assigned * to the host_ssid are unregistered and any internal FW resources belonging to * the host_ssid are released. + * @see vpu_ipc_msg_payload_ssid_release */ VPU_JSM_MSG_SSID_RELEASE = 0x110e, /** @@ -504,43 +548,78 @@ enum vpu_ipc_msg_type { * @see vpu_jsm_metric_streamer_start */ VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112, - /** Control command: Priority band setup */ + /** + * Control command: Priority band setup + * @see vpu_ipc_msg_payload_hws_priority_band_setup + */ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113, - /** Control command: Create command queue */ + /** + * Control command: Create command queue + * @see vpu_ipc_msg_payload_hws_create_cmdq + */ VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114, - /** Control command: Destroy command queue */ + /** + * Control command: Destroy command queue + * @see vpu_ipc_msg_payload_hws_destroy_cmdq + */ VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115, - /** Control command: Set context scheduling properties */ + /** + * Control command: Set context scheduling properties + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties + */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116, - /* + /** * Register a doorbell to notify VPU of new work. The doorbell may later be * deallocated or reassigned to another context. + * @see vpu_jsm_hws_register_db */ VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117, - /** Control command: Log buffer setting */ + /** + * Control command: Log buffer setting + * @see vpu_ipc_msg_payload_hws_set_scheduling_log + */ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118, - /* Control command: Suspend command queue. */ + /** + * Control command: Suspend command queue. + * @see vpu_ipc_msg_payload_hws_suspend_cmdq + */ VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119, - /* Control command: Resume command queue */ + /** + * Control command: Resume command queue + * @see vpu_ipc_msg_payload_hws_resume_cmdq + */ VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a, - /* Control command: Resume engine after reset */ + /** + * Control command: Resume engine after reset + * @see vpu_ipc_msg_payload_hws_resume_engine + */ VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b, - /* Control command: Enable survivability/DCT mode */ + /** + * Control command: Enable survivability/DCT mode + * @see vpu_ipc_msg_payload_pwr_dct_control + */ VPU_JSM_MSG_DCT_ENABLE = 0x111c, - /* Control command: Disable survivability/DCT mode */ + /** + * Control command: Disable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_DISABLE = 0x111d, /** * Dump VPU state. To be used for debug purposes only. - * NOTE: Please introduce new ASYNC commands before this one. * + * This command has no payload. + * NOTE: Please introduce new ASYNC commands before this one. */ VPU_JSM_MSG_STATE_DUMP = 0x11FF, - /* IPC Host -> Device, General commands */ + /** IPC Host -> Device, base id for general commands */ VPU_JSM_MSG_GENERAL_CMD = 0x1200, + /** Unsupported command */ VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD, /** * Control dyndbg behavior by executing a dyndbg command; equivalent to - * Linux command: `echo '' > /dynamic_debug/control`. + * Linux command: + * @verbatim echo '' > /dynamic_debug/control @endverbatim + * @see vpu_ipc_msg_payload_dyndbg_control */ VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201, /** @@ -548,17 +627,35 @@ enum vpu_ipc_msg_type { */ VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202, - /* IPC Device -> Host, Job completion */ + /** + * IPC Device -> Host, Job completion + * @see struct vpu_ipc_msg_payload_job_done + */ VPU_JSM_MSG_JOB_DONE = 0x2100, - /* IPC Device -> Host, Fence signalled */ + /** + * IPC Device -> Host, Fence signalled + * @see vpu_ipc_msg_payload_native_fence_signalled + */ VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101, /* IPC Device -> Host, Async command completion */ VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200, + /** + * IPC Device -> Host, engine reset complete + * @see vpu_ipc_msg_payload_engine_reset_done + */ VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE, + /** + * Preempt complete message + * @see vpu_ipc_msg_payload_engine_preempt_done + */ VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201, VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202, VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203, + /** + * Response to query engine heartbeat. + * @see vpu_ipc_msg_payload_query_engine_hb_done + */ VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204, VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205, VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206, @@ -575,7 +672,10 @@ enum vpu_ipc_msg_type { VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c, /** Response to VPU_JSM_MSG_TRACE_GET_NAME. */ VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d, - /** Response to VPU_JSM_MSG_SSID_RELEASE. */ + /** + * Response to VPU_JSM_MSG_SSID_RELEASE. + * @see vpu_ipc_msg_payload_ssid_release + */ VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e, /** * Response to VPU_JSM_MSG_METRIC_STREAMER_START. @@ -605,37 +705,71 @@ enum vpu_ipc_msg_type { /** * Asynchronous event sent from the VPU to the host either when the current * metric buffer is full or when the VPU has collected a multiple of - * @notify_sample_count samples as indicated through the start command - * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected - * metric data. + * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated + * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns + * information about collected metric data. * @see vpu_jsm_metric_streamer_done */ VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213, - /** Response to control command: Priority band setup */ + /** + * Response to control command: Priority band setup + * @see vpu_ipc_msg_payload_hws_priority_band_setup + */ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214, - /** Response to control command: Create command queue */ + /** + * Response to control command: Create command queue + * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp + */ VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215, - /** Response to control command: Destroy command queue */ + /** + * Response to control command: Destroy command queue + * @see vpu_ipc_msg_payload_hws_destroy_cmdq + */ VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216, - /** Response to control command: Set context scheduling properties */ + /** + * Response to control command: Set context scheduling properties + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties + */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217, - /** Response to control command: Log buffer setting */ + /** + * Response to control command: Log buffer setting + * @see vpu_ipc_msg_payload_hws_set_scheduling_log + */ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218, - /* IPC Device -> Host, HWS notify index entry of log buffer written */ + /** + * IPC Device -> Host, HWS notify index entry of log buffer written + * @see vpu_ipc_msg_payload_hws_scheduling_log_notification + */ VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219, - /* IPC Device -> Host, HWS completion of a context suspend request */ + /** + * IPC Device -> Host, HWS completion of a context suspend request + * @see vpu_ipc_msg_payload_hws_suspend_cmdq + */ VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a, - /* Response to control command: Resume command queue */ + /** + * Response to control command: Resume command queue + * @see vpu_ipc_msg_payload_hws_resume_cmdq + */ VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b, - /* Response to control command: Resume engine command response */ + /** + * Response to control command: Resume engine command response + * @see vpu_ipc_msg_payload_hws_resume_engine + */ VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c, - /* Response to control command: Enable survivability/DCT mode */ + /** + * Response to control command: Enable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d, - /* Response to control command: Disable survivability/DCT mode */ + /** + * Response to control command: Disable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e, /** * Response to state dump control command. - * NOTE: Please introduce new ASYNC responses before this one. * + * This command has no payload. + * NOTE: Please introduce new ASYNC responses before this one. */ VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF, @@ -653,57 +787,66 @@ enum vpu_ipc_msg_type { enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED }; -/* - * Host <-> LRT IPC message payload definitions +/** + * Engine reset request payload + * @see VPU_JSM_MSG_ENGINE_RESET */ struct vpu_ipc_msg_payload_engine_reset { - /* Engine to be reset. */ + /** Engine to be reset. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; +/** + * Engine preemption request struct + * @see VPU_JSM_MSG_ENGINE_PREEMPT + */ struct vpu_ipc_msg_payload_engine_preempt { - /* Engine to be preempted. */ + /** Engine to be preempted. */ u32 engine_idx; - /* ID of the preemption request. */ + /** ID of the preemption request. */ u32 preempt_id; }; -/* - * @brief Register doorbell command structure. +/** + * Register doorbell command structure. * This structure supports doorbell registration for only OS scheduling. * @see VPU_JSM_MSG_REGISTER_DB */ struct vpu_ipc_msg_payload_register_db { - /* Index of the doorbell to register. */ + /** Index of the doorbell to register. */ u32 db_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; - /* Virtual address in Global GTT pointing to the start of job queue. */ + /** Virtual address in Global GTT pointing to the start of job queue. */ u64 jobq_base; - /* Size of the job queue in bytes. */ + /** Size of the job queue in bytes. */ u32 jobq_size; - /* Host sub-stream ID for the context assigned to the doorbell. */ + /** Host sub-stream ID for the context assigned to the doorbell. */ u32 host_ssid; }; /** - * @brief Unregister doorbell command structure. + * Unregister doorbell command structure. * Request structure to unregister a doorbell for both HW and OS scheduling. * @see VPU_JSM_MSG_UNREGISTER_DB */ struct vpu_ipc_msg_payload_unregister_db { - /* Index of the doorbell to unregister. */ + /** Index of the doorbell to unregister. */ u32 db_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; +/** + * Heartbeat request structure + * @see VPU_JSM_MSG_QUERY_ENGINE_HB + */ struct vpu_ipc_msg_payload_query_engine_hb { - /* Engine to return heartbeat value. */ + /** Engine to return heartbeat value. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -723,10 +866,14 @@ struct vpu_ipc_msg_payload_power_level { u32 reserved_0; }; +/** + * Structure for requesting ssid release + * @see VPU_JSM_MSG_SSID_RELEASE + */ struct vpu_ipc_msg_payload_ssid_release { - /* Host sub-stream ID for the context to be released. */ + /** Host sub-stream ID for the context to be released. */ u32 host_ssid; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -752,7 +899,7 @@ struct vpu_jsm_metric_streamer_start { u64 sampling_rate; /** * If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message - * after every @notify_sample_count samples is collected or dropped by the VPU. + * after every @ref notify_sample_count samples is collected or dropped by the VPU. * If set to UINT_MAX the VPU will only generate a notification when the metric * buffer is full. If set to 0 the VPU will never generate a notification. */ @@ -762,9 +909,9 @@ struct vpu_jsm_metric_streamer_start { * Address and size of the buffer where the VPU will write metric data. The * VPU writes all counters from enabled metric groups one after another. If * there is no space left to write data at the next sample period the VPU - * will switch to the next buffer (@see next_buffer_addr) and will optionally - * send a notification to the host driver if @notify_sample_count is non-zero. - * If @next_buffer_addr is NULL the VPU will stop collecting metric data. + * will switch to the next buffer (@ref next_buffer_addr) and will optionally + * send a notification to the host driver if @ref notify_sample_count is non-zero. + * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data. */ u64 buffer_addr; u64 buffer_size; @@ -827,63 +974,80 @@ struct vpu_jsm_metric_streamer_update { u64 next_buffer_size; }; +/** + * Device -> host job completion message. + * @see VPU_JSM_MSG_JOB_DONE + */ struct vpu_ipc_msg_payload_job_done { - /* Engine to which the job was submitted. */ + /** Engine to which the job was submitted. */ u32 engine_idx; - /* Index of the doorbell to which the job was submitted */ + /** Index of the doorbell to which the job was submitted */ u32 db_idx; - /* ID of the completed job */ + /** ID of the completed job */ u32 job_id; - /* Status of the completed job */ + /** Status of the completed job */ u32 job_status; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* +/** * Notification message upon native fence signalling. * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED */ struct vpu_ipc_msg_payload_native_fence_signalled { - /* Engine ID. */ + /** Engine ID. */ u32 engine_idx; - /* Host SSID. */ + /** Host SSID. */ u32 host_ssid; - /* CMDQ ID */ + /** CMDQ ID */ u64 cmdq_id; - /* Fence object handle. */ + /** Fence object handle. */ u64 fence_handle; }; +/** + * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure + * which contains which queues caused reset if FW was able to detect any error. + * @see vpu_ipc_msg_payload_engine_reset_done + */ struct vpu_jsm_engine_reset_context { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* See VPU_ENGINE_RESET_CONTEXT_* defines */ + /** See VPU_ENGINE_RESET_CONTEXT_* defines */ u64 flags; }; +/** + * Engine reset response. + * @see VPU_JSM_MSG_ENGINE_RESET_DONE + */ struct vpu_ipc_msg_payload_engine_reset_done { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Number of impacted contexts */ + /** Number of impacted contexts */ u32 num_impacted_contexts; - /* Array of impacted command queue ids and their flags */ + /** Array of impacted command queue ids and their flags */ struct vpu_jsm_engine_reset_context impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS]; }; +/** + * Preemption response struct + * @see VPU_JSM_MSG_ENGINE_PREEMPT_DONE + */ struct vpu_ipc_msg_payload_engine_preempt_done { - /* Engine preempted. */ + /** Engine preempted. */ u32 engine_idx; - /* ID of the preemption request. */ + /** ID of the preemption request. */ u32 preempt_id; }; @@ -912,12 +1076,16 @@ struct vpu_ipc_msg_payload_unregister_db_done { u32 reserved_0; }; +/** + * Structure for heartbeat response + * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE + */ struct vpu_ipc_msg_payload_query_engine_hb_done { - /* Engine returning heartbeat value. */ + /** Engine returning heartbeat value. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; - /* Heartbeat value. */ + /** Heartbeat value. */ u64 heartbeat; }; @@ -937,7 +1105,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done { u8 power_limit[16]; }; -/* HWS priority band setup request / response */ +/** + * HWS priority band setup request / response + * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP + */ struct vpu_ipc_msg_payload_hws_priority_band_setup { /* * Grace period in 100ns units when preempting another priority band for @@ -964,15 +1135,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup { * TDR timeout value in milliseconds. Default value of 0 meaning no timeout. */ u32 tdr_timeout; + /* Non-interactive queue timeout for no progress of heartbeat in milliseconds. + * Default value of 0 meaning no timeout. + */ + u32 non_interactive_no_progress_timeout; + /* + * Non-interactive queue upper limit timeout value in milliseconds. Default + * value of 0 meaning no timeout. + */ + u32 non_interactive_timeout; }; -/* +/** * @brief HWS create command queue request. * Host will create a command queue via this command. * Note: Cmdq group is a handle of an object which * may contain one or more command queues. * @see VPU_JSM_MSG_CREATE_CMD_QUEUE - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP */ struct vpu_ipc_msg_payload_hws_create_cmdq { /* Process id */ @@ -993,66 +1172,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq { u32 reserved_0; }; -/* - * @brief HWS create command queue response. - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE +/** + * HWS create command queue response. * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP */ struct vpu_ipc_msg_payload_hws_create_cmdq_rsp { - /* Process id */ + /** Process id */ u64 process_id; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Engine for which queue is being created */ + /** Engine for which queue is being created */ u32 engine_idx; - /* Command queue group */ + /** Command queue group */ u64 cmdq_group; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* HWS destroy command queue request / response */ +/** + * HWS destroy command queue request / response + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP + */ struct vpu_ipc_msg_payload_hws_destroy_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* HWS set context scheduling properties request / response */ +/** + * HWS set context scheduling properties request / response + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP + */ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* + /** * Priority band to assign to work of this context. * Available priority bands: @see enum vpu_job_scheduling_priority_band */ u32 priority_band; - /* Inside realtime band assigns a further priority */ + /** Inside realtime band assigns a further priority */ u32 realtime_priority_level; - /* Priority relative to other contexts in the same process */ + /** Priority relative to other contexts in the same process */ s32 in_process_priority; - /* Zero padding / Reserved */ + /** Zero padding / Reserved */ u32 reserved_1; - /* + /** * Context quantum relative to other contexts of same priority in the same process * Minimum value supported by NPU is 1ms (10000 in 100ns units). */ u64 context_quantum; - /* Grace period when preempting context of the same priority within the same process */ + /** Grace period when preempting context of the same priority within the same process */ u64 grace_period_same_priority; - /* Grace period when preempting context of a lower priority within the same process */ + /** Grace period when preempting context of a lower priority within the same process */ u64 grace_period_lower_priority; }; -/* - * @brief Register doorbell command structure. +/** + * Register doorbell command structure. * This structure supports doorbell registration for both HW and OS scheduling. * Note: Queue base and size are added here so that the same structure can be used for * OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored @@ -1061,27 +1247,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { * @see VPU_JSM_MSG_HWS_REGISTER_DB */ struct vpu_jsm_hws_register_db { - /* Index of the doorbell to register. */ + /** Index of the doorbell to register. */ u32 db_id; - /* Host sub-stream ID for the context assigned to the doorbell. */ + /** Host sub-stream ID for the context assigned to the doorbell. */ u32 host_ssid; - /* ID of the command queue associated with the doorbell. */ + /** ID of the command queue associated with the doorbell. */ u64 cmdq_id; - /* Virtual address pointing to the start of command queue. */ + /** Virtual address pointing to the start of command queue. */ u64 cmdq_base; - /* Size of the command queue in bytes. */ + /** Size of the command queue in bytes. */ u64 cmdq_size; }; -/* - * @brief Structure to set another buffer to be used for scheduling-related logging. +/** + * Structure to set another buffer to be used for scheduling-related logging. * The size of the logging buffer and the number of entries is defined as part of the * buffer itself as described next. * The log buffer received from the host is made up of; - * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'. + * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header. * The header contains the number of log entries in the buffer. * - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in - * 'struct vpu_hws_log_buffer_entry'. + * @ref vpu_hws_log_buffer_entry. * The entry contains the VPU timestamp, operation type and data. * The host should provide the notify index value of log buffer to VPU. This is a * value defined within the log buffer and when written to will generate the @@ -1095,30 +1281,30 @@ struct vpu_jsm_hws_register_db { * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION */ struct vpu_ipc_msg_payload_hws_set_scheduling_log { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* + /** * VPU log buffer virtual address. * Set to 0 to disable logging for this engine. */ u64 vpu_log_buffer_va; - /* + /** * Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION * is generated when an event log is written to this index. */ u64 notify_index; - /* + /** * Field is now deprecated, will be removed when KMD is updated to support removal */ u32 enable_extra_events; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; }; -/* - * @brief The scheduling log notification is generated by VPU when it writes +/** + * The scheduling log notification is generated by VPU when it writes * an event into the log buffer at the notify_index. VPU notifies host with * VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous * message from VPU to host. @@ -1126,14 +1312,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log { * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG */ struct vpu_ipc_msg_payload_hws_scheduling_log_notification { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; }; -/* - * @brief HWS suspend command queue request and done structure. +/** + * HWS suspend command queue request and done structure. * Host will request the suspend of contexts and VPU will; * - Suspend all work on this context * - Preempt any running work @@ -1152,21 +1338,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification { * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE */ struct vpu_ipc_msg_payload_hws_suspend_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* + /** * Suspend fence value - reported by the VPU suspend context * completed once suspend is complete. */ u64 suspend_fence_value; }; -/* - * @brief HWS Resume command queue request / response structure. +/** + * HWS Resume command queue request / response structure. * Host will request the resume of a context; * - VPU will resume all work on this context * - Scheduler will allow this context to be scheduled @@ -1174,25 +1360,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq { * @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP */ struct vpu_ipc_msg_payload_hws_resume_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* - * @brief HWS Resume engine request / response structure. - * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume. +/** + * HWS Resume engine request / response structure. + * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume. * Host shall send this command to resume scheduling of any valid queue. - * @see VPU_JSM_MSG_HWS_RESUME_ENGINE + * @see VPU_JSM_MSG_HWS_ENGINE_RESUME * @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE */ struct vpu_ipc_msg_payload_hws_resume_engine { - /* Engine to be resumed */ + /** Engine to be resumed */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -1326,7 +1512,7 @@ struct vpu_jsm_metric_streamer_done { /** * Metric group description placed in the metric buffer after successful completion * of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more - * @vpu_jsm_metric_counter_descriptor records. + * @ref vpu_jsm_metric_counter_descriptor records. * @see VPU_JSM_MSG_METRIC_STREAMER_INFO */ struct vpu_jsm_metric_group_descriptor { @@ -1413,29 +1599,24 @@ struct vpu_jsm_metric_counter_descriptor { }; /** - * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests. + * Payload for @ref VPU_JSM_MSG_DYNDBG_CONTROL requests. * - * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug - * feature, which allows developers to selectively enable / disable MVLOG_DEBUG - * messages. This is equivalent to the Dynamic Debug functionality provided by - * Linux - * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html) - * The host can control Dynamic Debug behavior by sending dyndbg commands, which - * have the same syntax as Linux - * dyndbg commands. + * VPU_JSM_MSG_DYNDBG_CONTROL requests are used to control the VPU FW dynamic debug + * feature, which allows developers to selectively enable/disable code to obtain + * additional FW information. This is equivalent to the dynamic debug functionality + * provided by Linux. The host can control dynamic debug behavior by sending dyndbg + * commands, using the same syntax as for Linux dynamic debug commands. * - * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host - * still has to set the logging level to MVLOG_DEBUG, using the - * VPU_JSM_MSG_TRACE_SET_CONFIG command. + * @see https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html. * - * The host can see the current dynamic debug configuration by executing a - * special 'show' command. The dyndbg configuration will be printed to the - * configured logging destination using MVLOG_INFO logging level. + * NOTE: + * As the dynamic debug feature uses MVLOG messages to provide information, the host + * must first set the logging level to MVLOG_DEBUG, using the @ref VPU_JSM_MSG_TRACE_SET_CONFIG + * command. */ struct vpu_ipc_msg_payload_dyndbg_control { /** - * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated - * string. + * Dyndbg command to be executed. */ char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN]; }; @@ -1456,7 +1637,7 @@ struct vpu_ipc_msg_payload_pwr_d0i3_enter { }; /** - * Payload for VPU_JSM_MSG_DCT_ENABLE message. + * Payload for @ref VPU_JSM_MSG_DCT_ENABLE message. * * Default values for DCT active/inactive times are 5.3ms and 30ms respectively, * corresponding to a 85% duty cycle. This payload allows the host to tune these @@ -1513,28 +1694,28 @@ union vpu_ipc_msg_payload { struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control; }; -/* - * Host <-> LRT IPC message base structure. +/** + * Host <-> NPU IPC message base structure. * * NOTE: All instances of this object must be aligned on a 64B boundary * to allow proper handling of VPU cache operations. */ struct vpu_jsm_msg { - /* Reserved */ + /** Reserved */ u64 reserved_0; - /* Message type, see vpu_ipc_msg_type enum. */ + /** Message type, see @ref vpu_ipc_msg_type. */ u32 type; - /* Buffer status, see vpu_ipc_msg_status enum. */ + /** Buffer status, see @ref vpu_ipc_msg_status. */ u32 status; - /* + /** * Request ID, provided by the host in a request message and passed * back by VPU in the response message. */ u32 request_id; - /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */ + /** Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */ u32 result; u64 reserved_1; - /* Message payload depending on message type, see vpu_ipc_msg_payload union. */ + /** Message payload depending on message type, see vpu_ipc_msg_payload union. */ union vpu_ipc_msg_payload payload; }; diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h index c31081e42cee..820d133236dd 100644 --- a/drivers/accel/qaic/qaic.h +++ b/drivers/accel/qaic/qaic.h @@ -97,6 +97,8 @@ struct dma_bridge_chan { * response queue's head and tail pointer of this DBC. */ void __iomem *dbc_base; + /* Synchronizes access to Request queue's head and tail pointer */ + struct mutex req_lock; /* Head of list where each node is a memory handle queued in request queue */ struct list_head xfer_list; /* Synchronizes DBC readers during cleanup */ diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c index d8bdab69f800..49b6e75ef82a 100644 --- a/drivers/accel/qaic/qaic_control.c +++ b/drivers/accel/qaic/qaic_control.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -30,7 +31,7 @@ #define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */ #define QAIC_DBC_Q_GAP SZ_256 #define QAIC_DBC_Q_BUF_ALIGN SZ_4K -#define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */ +#define QAIC_MANAGE_WIRE_MSG_LENGTH SZ_64K /* Max DMA message length */ #define QAIC_WRAPPER_MAX_SIZE SZ_4K #define QAIC_MHI_RETRY_WAIT_MS 100 #define QAIC_MHI_RETRY_MAX 20 @@ -367,7 +368,7 @@ static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrap if (in_trans->hdr.len % 8 != 0) return -EINVAL; - if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH) + if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH) return -ENOSPC; trans_wrapper = add_wrapper(wrappers, @@ -407,7 +408,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev, return -EINVAL; remaining = in_trans->size - resources->xferred_dma_size; if (remaining == 0) - return 0; + return -EINVAL; if (check_add_overflow(xfer_start_addr, remaining, &end)) return -EINVAL; @@ -495,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr nents = sgt->nents; nents_dma = nents; - *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); + *size = QAIC_MANAGE_WIRE_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); for_each_sgtable_dma_sg(sgt, sg, i) { *size -= sizeof(*asp); /* Save 1K for possible follow-up transactions. */ @@ -576,7 +577,7 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list /* There should be enough space to hold at least one ASP entry. */ if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) > - QAIC_MANAGE_EXT_MSG_LENGTH) + QAIC_MANAGE_WIRE_MSG_LENGTH) return -ENOMEM; xfer = kmalloc(sizeof(*xfer), GFP_KERNEL); @@ -645,7 +646,7 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper msg = &wrapper->msg; msg_hdr_len = le32_to_cpu(msg->hdr.len); - if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH) + if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_WIRE_MSG_LENGTH) return -ENOSPC; if (!in_trans->queue_size) @@ -655,8 +656,9 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper return -EINVAL; nelem = in_trans->queue_size; - size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem; - if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) + if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()), + nelem, + &size)) return -EINVAL; if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size) @@ -729,7 +731,7 @@ static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_l msg = &wrapper->msg; msg_hdr_len = le32_to_cpu(msg->hdr.len); - if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH) + if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH) return -ENOSPC; trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper)); @@ -810,7 +812,7 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg, } if (ret) - break; + goto out; } if (user_len != user_msg->len) @@ -1052,7 +1054,7 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u init_completion(&elem.xfer_done); if (likely(!qdev->cntl_lost_buf)) { /* - * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH. + * The max size of request to device is QAIC_MANAGE_WIRE_MSG_LENGTH. * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH. */ out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL); @@ -1079,7 +1081,6 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u list_for_each_entry(w, &wrappers->list, list) { kref_get(&w->ref_count); - retry_count = 0; ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len, list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN); if (ret) { diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c index 797289e9d780..fa723a2bdfa9 100644 --- a/drivers/accel/qaic/qaic_data.c +++ b/drivers/accel/qaic/qaic_data.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -165,7 +166,7 @@ static void free_slice(struct kref *kref) drm_gem_object_put(&slice->bo->base); sg_free_table(slice->sgt); kfree(slice->sgt); - kfree(slice->reqs); + kvfree(slice->reqs); kfree(slice); } @@ -404,7 +405,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, goto free_sgt; } - slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); + slice->reqs = kvcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); if (!slice->reqs) { ret = -ENOMEM; goto free_slice; @@ -430,7 +431,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, return 0; free_req: - kfree(slice->reqs); + kvfree(slice->reqs); free_slice: kfree(slice); free_sgt: @@ -643,8 +644,36 @@ static void qaic_free_object(struct drm_gem_object *obj) kfree(bo); } +static struct sg_table *qaic_get_sg_table(struct drm_gem_object *obj) +{ + struct qaic_bo *bo = to_qaic_bo(obj); + struct scatterlist *sg, *sg_in; + struct sg_table *sgt, *sgt_in; + int i; + + sgt_in = bo->sgt; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + if (sg_alloc_table(sgt, sgt_in->orig_nents, GFP_KERNEL)) { + kfree(sgt); + return ERR_PTR(-ENOMEM); + } + + sg = sgt->sgl; + for_each_sgtable_sg(sgt_in, sg_in, i) { + memcpy(sg, sg_in, sizeof(*sg)); + sg = sg_next(sg); + } + + return sgt; +} + static const struct drm_gem_object_funcs qaic_gem_funcs = { .free = qaic_free_object, + .get_sg_table = qaic_get_sg_table, .print_info = qaic_gem_print_info, .mmap = qaic_gem_object_mmap, .vm_ops = &drm_vm_ops, @@ -953,8 +982,9 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi if (args->hdr.count == 0) return -EINVAL; - arg_size = args->hdr.count * sizeof(*slice_ent); - if (arg_size / args->hdr.count != sizeof(*slice_ent)) + if (check_mul_overflow((unsigned long)args->hdr.count, + (unsigned long)sizeof(*slice_ent), + &arg_size)) return -EINVAL; if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) @@ -984,18 +1014,12 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi user_data = u64_to_user_ptr(args->data); - slice_ent = kzalloc(arg_size, GFP_KERNEL); - if (!slice_ent) { - ret = -EINVAL; + slice_ent = memdup_user(user_data, arg_size); + if (IS_ERR(slice_ent)) { + ret = PTR_ERR(slice_ent); goto unlock_dev_srcu; } - ret = copy_from_user(slice_ent, user_data, arg_size); - if (ret) { - ret = -EFAULT; - goto free_slice_ent; - } - obj = drm_gem_object_lookup(file_priv, args->hdr.handle); if (!obj) { ret = -ENOENT; @@ -1300,8 +1324,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr int usr_rcu_id, qdev_rcu_id; struct qaic_device *qdev; struct qaic_user *usr; - u8 __user *user_data; - unsigned long n; u64 received_ts; u32 queue_level; u64 submit_ts; @@ -1314,20 +1336,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr received_ts = ktime_get_ns(); size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec); - n = (unsigned long)size * args->hdr.count; - if (args->hdr.count == 0 || n / args->hdr.count != size) + if (args->hdr.count == 0) return -EINVAL; - user_data = u64_to_user_ptr(args->data); - - exec = kcalloc(args->hdr.count, size, GFP_KERNEL); - if (!exec) - return -ENOMEM; - - if (copy_from_user(exec, user_data, n)) { - ret = -EFAULT; - goto free_exec; - } + exec = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, size); + if (IS_ERR(exec)) + return PTR_ERR(exec); usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); @@ -1356,13 +1370,17 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr goto release_ch_rcu; } + ret = mutex_lock_interruptible(&dbc->req_lock); + if (ret) + goto release_ch_rcu; + head = readl(dbc->dbc_base + REQHP_OFF); tail = readl(dbc->dbc_base + REQTP_OFF); if (head == U32_MAX || tail == U32_MAX) { /* PCI link error */ ret = -ENODEV; - goto release_ch_rcu; + goto unlock_req_lock; } queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); @@ -1370,11 +1388,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, head, &tail); if (ret) - goto release_ch_rcu; + goto unlock_req_lock; /* Finalize commit to hardware */ submit_ts = ktime_get_ns(); writel(tail, dbc->dbc_base + REQTP_OFF); + mutex_unlock(&dbc->req_lock); update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts, submit_ts, queue_level); @@ -1382,13 +1401,15 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr if (datapath_polling) schedule_work(&dbc->poll_work); +unlock_req_lock: + if (ret) + mutex_unlock(&dbc->req_lock); release_ch_rcu: srcu_read_unlock(&dbc->ch_lock, rcu_id); unlock_dev_srcu: srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); unlock_usr_srcu: srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); -free_exec: kfree(exec); return ret; } @@ -1741,7 +1762,8 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file struct qaic_device *qdev; struct qaic_user *usr; struct qaic_bo *bo; - int ret, i; + int ret = 0; + int i; usr = file_priv->driver_priv; usr_rcu_id = srcu_read_lock(&usr->qddev_lock); @@ -1762,18 +1784,12 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file goto unlock_dev_srcu; } - ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL); - if (!ent) { - ret = -EINVAL; + ent = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, sizeof(*ent)); + if (IS_ERR(ent)) { + ret = PTR_ERR(ent); goto unlock_dev_srcu; } - ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent)); - if (ret) { - ret = -EFAULT; - goto free_ent; - } - for (i = 0; i < args->hdr.count; i++) { obj = drm_gem_object_lookup(file_priv, ent[i].handle); if (!obj) { @@ -1781,6 +1797,16 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file goto free_ent; } bo = to_qaic_bo(obj); + if (!bo->sliced) { + drm_gem_object_put(obj); + ret = -EINVAL; + goto free_ent; + } + if (bo->dbc->id != args->hdr.dbc_id) { + drm_gem_object_put(obj); + ret = -EINVAL; + goto free_ent; + } /* * perf stats ioctl is called before wait ioctl is complete then * the latency information is invalid. @@ -1933,7 +1959,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of * user. Add user context back to DBC to enable it. This function trusts the * DBC ID passed and expects the DBC to be disabled. - * @qdev: Qranium device handle + * @qdev: qaic device handle * @dbc_id: ID of the DBC * @usr: User context */ diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c index a991b8198dc4..8dc4fe5bb560 100644 --- a/drivers/accel/qaic/qaic_debugfs.c +++ b/drivers/accel/qaic/qaic_debugfs.c @@ -218,6 +218,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d if (ret) goto destroy_workqueue; + dev_set_drvdata(&mhi_dev->dev, qdev); + qdev->bootlog_ch = mhi_dev; + for (i = 0; i < BOOTLOG_POOL_SIZE; i++) { msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL); if (!msg) { @@ -233,8 +236,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d goto mhi_unprepare; } - dev_set_drvdata(&mhi_dev->dev, qdev); - qdev->bootlog_ch = mhi_dev; return 0; mhi_unprepare: diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c index e31bcb0ecfc9..e162f4b8a262 100644 --- a/drivers/accel/qaic/qaic_drv.c +++ b/drivers/accel/qaic/qaic_drv.c @@ -454,6 +454,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, return NULL; init_waitqueue_head(&qdev->dbc[i].dbc_release); INIT_LIST_HEAD(&qdev->dbc[i].bo_lists); + ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock); + if (ret) + return NULL; } return qdev; diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c index 914ffc4a9970..f1d52a710136 100644 --- a/drivers/accel/qaic/qaic_ras.c +++ b/drivers/accel/qaic/qaic_ras.c @@ -514,21 +514,21 @@ static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr, { struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev)); - return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ce_count); + return sysfs_emit(buf, "%d\n", qdev->ce_count); } static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev)); - return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_count); + return sysfs_emit(buf, "%d\n", qdev->ue_count); } static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev)); - return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_nf_count); + return sysfs_emit(buf, "%d\n", qdev->ue_nf_count); } static DEVICE_ATTR_RO(ce_count); diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c index 3ebcc1f7ff58..fd3c3b2d1fd3 100644 --- a/drivers/accel/qaic/sahara.c +++ b/drivers/accel/qaic/sahara.c @@ -159,6 +159,7 @@ struct sahara_context { struct sahara_packet *rx; struct work_struct fw_work; struct work_struct dump_work; + struct work_struct read_data_work; struct mhi_device *mhi_dev; const char * const *image_table; u32 table_size; @@ -174,7 +175,10 @@ struct sahara_context { u64 dump_image_offset; void *mem_dump_freespace; u64 dump_images_left; + u32 read_data_offset; + u32 read_data_length; bool is_mem_dump_mode; + bool non_streaming; }; static const char * const aic100_image_table[] = { @@ -194,6 +198,7 @@ static const char * const aic200_image_table[] = { [23] = "qcom/aic200/aop.mbn", [32] = "qcom/aic200/tz.mbn", [33] = "qcom/aic200/hypvm.mbn", + [38] = "qcom/aic200/xbl_config.elf", [39] = "qcom/aic200/aic200_abl.elf", [40] = "qcom/aic200/apdp.mbn", [41] = "qcom/aic200/devcfg.mbn", @@ -202,6 +207,7 @@ static const char * const aic200_image_table[] = { [49] = "qcom/aic200/shrm.elf", [50] = "qcom/aic200/cpucp.elf", [51] = "qcom/aic200/aop_devcfg.mbn", + [54] = "qcom/aic200/qupv3fw.elf", [57] = "qcom/aic200/cpucp_dtbs.elf", [62] = "qcom/aic200/uefi_dtbs.elf", [63] = "qcom/aic200/xbl_ac_config.mbn", @@ -213,9 +219,15 @@ static const char * const aic200_image_table[] = { [69] = "qcom/aic200/dcd.mbn", [73] = "qcom/aic200/gearvm.mbn", [74] = "qcom/aic200/sti.bin", - [75] = "qcom/aic200/pvs.bin", + [76] = "qcom/aic200/tz_qti_config.mbn", + [78] = "qcom/aic200/pvs.bin", }; +static bool is_streaming(struct sahara_context *context) +{ + return !context->non_streaming; +} + static int sahara_find_image(struct sahara_context *context, u32 image_id) { int ret; @@ -265,6 +277,8 @@ static void sahara_send_reset(struct sahara_context *context) int ret; context->is_mem_dump_mode = false; + context->read_data_offset = 0; + context->read_data_length = 0; context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD); context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH); @@ -319,9 +333,39 @@ static void sahara_hello(struct sahara_context *context) dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret); } +static int read_data_helper(struct sahara_context *context, int buf_index) +{ + enum mhi_flags mhi_flag; + u32 pkt_data_len; + int ret; + + pkt_data_len = min(context->read_data_length, SAHARA_PACKET_MAX_SIZE); + + memcpy(context->tx[buf_index], + &context->firmware->data[context->read_data_offset], + pkt_data_len); + + context->read_data_offset += pkt_data_len; + context->read_data_length -= pkt_data_len; + + if (is_streaming(context) || !context->read_data_length) + mhi_flag = MHI_EOT; + else + mhi_flag = MHI_CHAIN; + + ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, + context->tx[buf_index], pkt_data_len, mhi_flag); + if (ret) { + dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", ret); + return ret; + } + + return 0; +} + static void sahara_read_data(struct sahara_context *context) { - u32 image_id, data_offset, data_len, pkt_data_len; + u32 image_id, data_offset, data_len; int ret; int i; @@ -357,7 +401,7 @@ static void sahara_read_data(struct sahara_context *context) * and is not needed here on error. */ - if (data_len > SAHARA_TRANSFER_MAX_SIZE) { + if (context->non_streaming && data_len > SAHARA_TRANSFER_MAX_SIZE) { dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n", data_len, SAHARA_TRANSFER_MAX_SIZE); sahara_send_reset(context); @@ -378,22 +422,18 @@ static void sahara_read_data(struct sahara_context *context) return; } - for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) { - pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE); + context->read_data_offset = data_offset; + context->read_data_length = data_len; - memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len); + if (is_streaming(context)) { + schedule_work(&context->read_data_work); + return; + } - data_offset += pkt_data_len; - data_len -= pkt_data_len; - - ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, - context->tx[i], pkt_data_len, - !data_len ? MHI_EOT : MHI_CHAIN); - if (ret) { - dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", - ret); - return; - } + for (i = 0; i < SAHARA_NUM_TX_BUF && context->read_data_length; ++i) { + ret = read_data_helper(context, i); + if (ret) + break; } } @@ -538,6 +578,7 @@ static void sahara_parse_dump_table(struct sahara_context *context) struct sahara_memory_dump_meta_v1 *dump_meta; u64 table_nents; u64 dump_length; + u64 mul_bytes; int ret; u64 i; @@ -551,8 +592,9 @@ static void sahara_parse_dump_table(struct sahara_context *context) dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0; dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0; - dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length)); - if (dump_length == SIZE_MAX) { + if (check_add_overflow(dump_length, + le64_to_cpu(dev_table[i].length), + &dump_length)) { /* Discard the dump */ sahara_send_reset(context); return; @@ -568,14 +610,17 @@ static void sahara_parse_dump_table(struct sahara_context *context) dev_table[i].filename); } - dump_length = size_add(dump_length, sizeof(*dump_meta)); - if (dump_length == SIZE_MAX) { + if (check_add_overflow(dump_length, (u64)sizeof(*dump_meta), &dump_length)) { /* Discard the dump */ sahara_send_reset(context); return; } - dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents)); - if (dump_length == SIZE_MAX) { + if (check_mul_overflow((u64)sizeof(*image_out_table), table_nents, &mul_bytes)) { + /* Discard the dump */ + sahara_send_reset(context); + return; + } + if (check_add_overflow(dump_length, mul_bytes, &dump_length)) { /* Discard the dump */ sahara_send_reset(context); return; @@ -615,7 +660,7 @@ static void sahara_parse_dump_table(struct sahara_context *context) /* Request the first chunk of the first image */ context->dump_image = &image_out_table[0]; - dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE); + dump_length = min_t(u64, context->dump_image->length, SAHARA_READ_MAX_SIZE); /* Avoid requesting EOI sized data so that we can identify errors */ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH) dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2; @@ -663,7 +708,7 @@ static void sahara_parse_dump_image(struct sahara_context *context) /* Get next image chunk */ dump_length = context->dump_image->length - context->dump_image_offset; - dump_length = min(dump_length, SAHARA_READ_MAX_SIZE); + dump_length = min_t(u64, dump_length, SAHARA_READ_MAX_SIZE); /* Avoid requesting EOI sized data so that we can identify errors */ if (dump_length == SAHARA_END_OF_IMAGE_LENGTH) dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2; @@ -742,6 +787,13 @@ error: sahara_send_reset(context); } +static void sahara_read_data_processing(struct work_struct *work) +{ + struct sahara_context *context = container_of(work, struct sahara_context, read_data_work); + + read_data_helper(context, 0); +} + static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { struct sahara_context *context; @@ -756,35 +808,57 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_ if (!context->rx) return -ENOMEM; - /* - * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it - * will request for READ_DATA. This is larger than - * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to - * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a - * READ_DATA, it requires a transfer of the exact size requested. We - * can use MHI_CHAIN to link multiple buffers into a single transfer - * but the remote side will not consume the buffers until it sees an - * EOT, thus we need to allocate enough buffers to put in the tx fifo - * to cover an entire READ_DATA request of the max size. - */ - for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) { - context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL); - if (!context->tx[i]) - return -ENOMEM; - } - - context->mhi_dev = mhi_dev; - INIT_WORK(&context->fw_work, sahara_processing); - INIT_WORK(&context->dump_work, sahara_dump_processing); - if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) { context->image_table = aic200_image_table; context->table_size = ARRAY_SIZE(aic200_image_table); } else { context->image_table = aic100_image_table; context->table_size = ARRAY_SIZE(aic100_image_table); + context->non_streaming = true; } + /* + * There are two firmware implementations for READ_DATA handling. + * The older "SBL" implementation defines a Sahara transfer size, and + * expects that the response is a single transport transfer. If the + * FW wants to transfer a file that is larger than the transfer size, + * the FW will issue multiple READ_DATA commands. For this + * implementation, we need to allocate enough buffers to contain the + * entire Sahara transfer size. + * + * The newer "XBL" implementation does not define a maximum transfer + * size and instead expects the data to be streamed over using the + * transport level MTU. The FW will issue a single READ_DATA command + * of whatever size, and consume multiple transport level transfers + * until the expected amount of data is consumed. For this + * implementation we only need a single buffer of the transport MTU + * but we'll need to be able to use it multiple times for a single + * READ_DATA request. + * + * AIC100 is the SBL implementation and defines SAHARA_TRANSFER_MAX_SIZE + * and we need 9x SAHARA_PACKET_MAX_SIZE to cover that. We can use + * MHI_CHAIN to link multiple buffers into a single transfer but the + * remote side will not consume the buffers until it sees an EOT, thus + * we need to allocate enough buffers to put in the tx fifo to cover an + * entire READ_DATA request of the max size. + * + * AIC200 is the XBL implementation, and so a single buffer will work. + */ + for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) { + context->tx[i] = devm_kzalloc(&mhi_dev->dev, + SAHARA_PACKET_MAX_SIZE, + GFP_KERNEL); + if (!context->tx[i]) + return -ENOMEM; + if (is_streaming(context)) + break; + } + + context->mhi_dev = mhi_dev; + INIT_WORK(&context->fw_work, sahara_processing); + INIT_WORK(&context->dump_work, sahara_dump_processing); + INIT_WORK(&context->read_data_work, sahara_read_data_processing); + context->active_image_id = SAHARA_IMAGE_ID_NONE; dev_set_drvdata(&mhi_dev->dev, context); @@ -814,6 +888,10 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev) static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) { + struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev); + + if (!mhi_result->transaction_status && context->read_data_length && is_streaming(context)) + schedule_work(&context->read_data_work); } static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c index 0551e11cc184..624c4ecf5a34 100644 --- a/drivers/accel/rocket/rocket_gem.c +++ b/drivers/accel/rocket/rocket_gem.c @@ -2,6 +2,7 @@ /* Copyright 2024-2025 Tomeu Vizoso */ #include +#include #include #include #include diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ff53f5f029b4..2a210719c4ce 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2174,13 +2174,10 @@ static int ata_read_log_directory(struct ata_device *dev) } version = get_unaligned_le16(&dev->gp_log_dir[0]); - if (version != 0x0001) { - ata_dev_err(dev, "Invalid log directory version 0x%04x\n", - version); - ata_clear_log_directory(dev); - dev->quirks |= ATA_QUIRK_NO_LOG_DIR; - return -EINVAL; - } + if (version != 0x0001) + ata_dev_warn_once(dev, + "Invalid log directory version 0x%04x\n", + version); return 0; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index a0b67a35a5f0..3700ab4eba3e 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2301,8 +2301,11 @@ static int i_ipmi_request(struct ipmi_user *user, if (supplied_recv) { recv_msg = supplied_recv; recv_msg->user = user; - if (user) + if (user) { atomic_inc(&user->nr_msgs); + /* The put happens when the message is freed. */ + kref_get(&user->refcount); + } } else { recv_msg = ipmi_alloc_recv_msg(user); if (IS_ERR(recv_msg)) diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index ed97344f2324..c75a531cfb98 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -133,8 +133,7 @@ static inline bool tpm_crb_has_idle(u32 start_method) { return !(start_method == ACPI_TPM2_START_METHOD || start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD || - start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC || - start_method == ACPI_TPM2_CRB_WITH_ARM_FFA); + start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); } static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, @@ -191,7 +190,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete * * Return: 0 always */ -static int __crb_go_idle(struct device *dev, struct crb_priv *priv) +static int __crb_go_idle(struct device *dev, struct crb_priv *priv, int loc) { int rc; @@ -200,6 +199,12 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv) iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req); + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc); + if (rc) + return rc; + } + rc = crb_try_pluton_doorbell(priv, true); if (rc) return rc; @@ -220,7 +225,7 @@ static int crb_go_idle(struct tpm_chip *chip) struct device *dev = &chip->dev; struct crb_priv *priv = dev_get_drvdata(dev); - return __crb_go_idle(dev, priv); + return __crb_go_idle(dev, priv, chip->locality); } /** @@ -238,7 +243,7 @@ static int crb_go_idle(struct tpm_chip *chip) * * Return: 0 on success -ETIME on timeout; */ -static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) +static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv, int loc) { int rc; @@ -247,6 +252,12 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv) iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req); + if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { + rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc); + if (rc) + return rc; + } + rc = crb_try_pluton_doorbell(priv, true); if (rc) return rc; @@ -267,7 +278,7 @@ static int crb_cmd_ready(struct tpm_chip *chip) struct device *dev = &chip->dev; struct crb_priv *priv = dev_get_drvdata(dev); - return __crb_cmd_ready(dev, priv); + return __crb_cmd_ready(dev, priv, chip->locality); } static int __crb_request_locality(struct device *dev, @@ -444,7 +455,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len) /* Seems to be necessary for every command */ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) - __crb_cmd_ready(&chip->dev, priv); + __crb_cmd_ready(&chip->dev, priv, chip->locality); memcpy_toio(priv->cmd, buf, len); @@ -672,7 +683,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ - ret = __crb_cmd_ready(dev, priv); + ret = __crb_cmd_ready(dev, priv, 0); if (ret) goto out_relinquish_locality; @@ -744,7 +755,7 @@ out: if (!ret) priv->cmd_size = cmd_size; - __crb_go_idle(dev, priv); + __crb_go_idle(dev, priv, 0); out_relinquish_locality: diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index d7a5539d07d4..bd2e282ca93a 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -348,7 +348,7 @@ static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd) struct resource res; int nid, rc; - res = DEFINE_RES(start, size, 0); + res = DEFINE_RES_MEM(start, size); nid = phys_to_target_node(start); rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size); diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c index 7c750599ea69..4bc484b46f43 100644 --- a/drivers/cxl/core/features.c +++ b/drivers/cxl/core/features.c @@ -371,6 +371,9 @@ cxl_feature_info(struct cxl_features_state *cxlfs, { struct cxl_feat_entry *feat; + if (!cxlfs || !cxlfs->entries) + return ERR_PTR(-EOPNOTSUPP); + for (int i = 0; i < cxlfs->entries->num_features; i++) { feat = &cxlfs->entries->ent[i]; if (uuid_equal(uuid, &feat->uuid)) diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index d5f71eb1ade8..8128fd2b5b31 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1182,6 +1182,20 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, if (rc) return ERR_PTR(rc); + /* + * Setup port register if this is the first dport showed up. Having + * a dport also means that there is at least 1 active link. + */ + if (port->nr_dports == 1 && + port->component_reg_phys != CXL_RESOURCE_NONE) { + rc = cxl_port_setup_regs(port, port->component_reg_phys); + if (rc) { + xa_erase(&port->dports, (unsigned long)dport->dport_dev); + return ERR_PTR(rc); + } + port->component_reg_phys = CXL_RESOURCE_NONE; + } + get_device(dport_dev); rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); if (rc) @@ -1200,18 +1214,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, cxl_debugfs_create_dport_dir(dport); - /* - * Setup port register if this is the first dport showed up. Having - * a dport also means that there is at least 1 active link. - */ - if (port->nr_dports == 1 && - port->component_reg_phys != CXL_RESOURCE_NONE) { - rc = cxl_port_setup_regs(port, port->component_reg_phys); - if (rc) - return ERR_PTR(rc); - port->component_reg_phys = CXL_RESOURCE_NONE; - } - return dport; } diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index e14c1d305b22..b06fee1978ba 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -839,7 +839,7 @@ static int match_free_decoder(struct device *dev, const void *data) } static bool region_res_match_cxl_range(const struct cxl_region_params *p, - struct range *range) + const struct range *range) { if (!p->res) return false; @@ -3398,10 +3398,7 @@ static int match_region_by_range(struct device *dev, const void *data) p = &cxlr->params; guard(rwsem_read)(&cxl_rwsem.region); - if (p->res && p->res->start == r->start && p->res->end == r->end) - return 1; - - return 0; + return region_res_match_cxl_range(p, r); } static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr, @@ -3666,14 +3663,14 @@ static int validate_region_offset(struct cxl_region *cxlr, u64 offset) if (offset < p->cache_size) { dev_err(&cxlr->dev, - "Offset %#llx is within extended linear cache %pr\n", + "Offset %#llx is within extended linear cache %pa\n", offset, &p->cache_size); return -EINVAL; } region_size = resource_size(p->res); if (offset >= region_size) { - dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pr\n", + dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pa\n", offset, ®ion_size); return -EINVAL; } diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index a53ec4798b12..a972e4ef1936 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -1068,7 +1068,7 @@ TRACE_EVENT(cxl_poison, __entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd, __entry->dpa); if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size) - __entry->hpa_alias0 = __entry->hpa + + __entry->hpa_alias0 = __entry->hpa - cxlr->params.cache_size; else __entry->hpa_alias0 = ULLONG_MAX; diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c index 5a2c9badcc64..5e6e7e900bda 100644 --- a/drivers/devfreq/event/rockchip-dfi.c +++ b/drivers/devfreq/event/rockchip-dfi.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -30,8 +31,6 @@ #define DMC_MAX_CHANNELS 4 -#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) - /* DDRMON_CTRL */ #define DDRMON_CTRL 0x04 #define DDRMON_CTRL_LPDDR5 BIT(6) @@ -41,10 +40,6 @@ #define DDRMON_CTRL_LPDDR23 BIT(2) #define DDRMON_CTRL_SOFTWARE_EN BIT(1) #define DDRMON_CTRL_TIMER_CNT_EN BIT(0) -#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_LPDDR5 | \ - DDRMON_CTRL_DDR4 | \ - DDRMON_CTRL_LPDDR4 | \ - DDRMON_CTRL_LPDDR23) #define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7) #define DDRMON_CH0_WR_NUM 0x20 @@ -124,27 +119,31 @@ struct rockchip_dfi { unsigned int count_multiplier; /* number of data clocks per count */ }; -static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl, - u32 *mask) +static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl) { u32 ddrmon_ver; - *mask = DDRMON_CTRL_DDR_TYPE_MASK; - switch (dfi->ddr_type) { case ROCKCHIP_DDRTYPE_LPDDR2: case ROCKCHIP_DDRTYPE_LPDDR3: - *ctrl = DDRMON_CTRL_LPDDR23; + *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 1) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0); break; case ROCKCHIP_DDRTYPE_LPDDR4: case ROCKCHIP_DDRTYPE_LPDDR4X: - *ctrl = DDRMON_CTRL_LPDDR4; + *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 1) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0); break; case ROCKCHIP_DDRTYPE_LPDDR5: ddrmon_ver = readl_relaxed(dfi->regs); if (ddrmon_ver < 0x40) { - *ctrl = DDRMON_CTRL_LPDDR5 | dfi->lp5_bank_mode; - *mask |= DDRMON_CTRL_LP5_BANK_MODE_MASK; + *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 1) | + FIELD_PREP_WM16(DDRMON_CTRL_LP5_BANK_MODE_MASK, + dfi->lp5_bank_mode); break; } @@ -172,7 +171,6 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi) void __iomem *dfi_regs = dfi->regs; int i, ret = 0; u32 ctrl; - u32 ctrl_mask; mutex_lock(&dfi->mutex); @@ -186,7 +184,7 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi) goto out; } - ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl, &ctrl_mask); + ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl); if (ret) goto out; @@ -196,15 +194,16 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi) continue; /* clear DDRMON_CTRL setting */ - writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_TIMER_CNT_EN | - DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN), + writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_TIMER_CNT_EN, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0) | + FIELD_PREP_WM16(DDRMON_CTRL_HARDWARE_EN, 0), dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); - writel_relaxed(HIWORD_UPDATE(ctrl, ctrl_mask), - dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); + writel_relaxed(ctrl, dfi_regs + i * dfi->ddrmon_stride + + DDRMON_CTRL); /* enable count, use software mode */ - writel_relaxed(HIWORD_UPDATE(DDRMON_CTRL_SOFTWARE_EN, DDRMON_CTRL_SOFTWARE_EN), + writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 1), dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); if (dfi->ddrmon_ctrl_single) @@ -234,8 +233,8 @@ static void rockchip_dfi_disable(struct rockchip_dfi *dfi) if (!(dfi->channel_mask & BIT(i))) continue; - writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_SOFTWARE_EN), - dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); + writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0), + dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); if (dfi->ddrmon_ctrl_single) break; diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index bb369b38b001..a5eef06c4226 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -12,13 +12,3 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. - -config DMABUF_HEAPS_CMA_LEGACY - bool "Legacy DMA-BUF CMA Heap" - default y - depends on DMABUF_HEAPS_CMA - help - Add a duplicate CMA-backed dma-buf heap with legacy naming derived - from the CMA area's devicetree node, or "reserved" if the area is not - defined in the devicetree. This uses the same underlying allocator as - CONFIG_DMABUF_HEAPS_CMA. diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 0df007111975..42f88193eab9 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -21,12 +22,27 @@ #include #include #include +#include +#include #include #include #include #define DEFAULT_CMA_NAME "default_cma_region" +static struct cma *dma_areas[MAX_CMA_AREAS] __initdata; +static unsigned int dma_areas_num __initdata; + +int __init dma_heap_cma_register_heap(struct cma *cma) +{ + if (dma_areas_num >= ARRAY_SIZE(dma_areas)) + return -EINVAL; + + dma_areas[dma_areas_num++] = cma; + + return 0; +} + struct cma_heap { struct dma_heap *heap; struct cma *cma; @@ -395,33 +411,30 @@ static int __init __add_cma_heap(struct cma *cma, const char *name) return 0; } -static int __init add_default_cma_heap(void) +static int __init add_cma_heaps(void) { struct cma *default_cma = dev_get_cma_area(NULL); - const char *legacy_cma_name; + unsigned int i; int ret; - if (!default_cma) - return 0; + if (default_cma) { + ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); + if (ret) + return ret; + } - ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); - if (ret) - return ret; + for (i = 0; i < dma_areas_num; i++) { + struct cma *cma = dma_areas[i]; - if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) { - legacy_cma_name = cma_get_name(default_cma); - if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) { - pr_warn("legacy name and default name are the same, skipping legacy heap\n"); - return 0; + ret = __add_cma_heap(cma, cma_get_name(cma)); + if (ret) { + pr_warn("Failed to add CMA heap %s", cma_get_name(cma)); + continue; } - ret = __add_cma_heap(default_cma, legacy_cma_name); - if (ret) - pr_warn("failed to add legacy heap: %pe\n", - ERR_PTR(ret)); } return 0; } -module_init(add_default_cma_heap); +module_init(add_cma_heaps); MODULE_DESCRIPTION("DMA-BUF CMA Heap"); diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c index 092e7027948a..e42e527813cf 100644 --- a/drivers/dpll/zl3073x/core.c +++ b/drivers/dpll/zl3073x/core.c @@ -1038,8 +1038,29 @@ zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev) int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full) { struct zl3073x_dpll *zldpll; + u8 info; int rc; + rc = zl3073x_read_u8(zldev, ZL_REG_INFO, &info); + if (rc) { + dev_err(zldev->dev, "Failed to read device status info\n"); + return rc; + } + + if (!FIELD_GET(ZL_INFO_READY, info)) { + /* The ready bit indicates that the firmware was successfully + * configured and is ready for normal operation. If it is + * cleared then the configuration stored in flash is wrong + * or missing. In this situation the driver will expose + * only devlink interface to give an opportunity to flash + * the correct config. + */ + dev_info(zldev->dev, + "FW not fully ready - missing or corrupted config\n"); + + return 0; + } + if (full) { /* Fetch device state */ rc = zl3073x_dev_state_fetch(zldev); diff --git a/drivers/dpll/zl3073x/fw.c b/drivers/dpll/zl3073x/fw.c index d5418ff74886..def37fe8d9b0 100644 --- a/drivers/dpll/zl3073x/fw.c +++ b/drivers/dpll/zl3073x/fw.c @@ -37,7 +37,7 @@ struct zl3073x_fw_component_info { static const struct zl3073x_fw_component_info component_info[] = { [ZL_FW_COMPONENT_UTIL] = { .name = "utility", - .max_size = 0x2300, + .max_size = 0x4000, .load_addr = 0x20000000, .flash_type = ZL3073X_FLASH_TYPE_NONE, }, diff --git a/drivers/dpll/zl3073x/regs.h b/drivers/dpll/zl3073x/regs.h index 19a25325bd9c..d837bee72b17 100644 --- a/drivers/dpll/zl3073x/regs.h +++ b/drivers/dpll/zl3073x/regs.h @@ -67,6 +67,9 @@ * Register Page 0, General **************************/ +#define ZL_REG_INFO ZL_REG(0, 0x00, 1) +#define ZL_INFO_READY BIT(7) + #define ZL_REG_ID ZL_REG(0, 0x01, 2) #define ZL_REG_REVISION ZL_REG(0, 0x03, 2) #define ZL_REG_FW_VER ZL_REG(0, 0x05, 2) diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 4b2f7d794275..c2672f369aed 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -150,7 +150,8 @@ drm_kms_helper-y := \ drm_plane_helper.o \ drm_probe_helper.o \ drm_self_refresh_helper.o \ - drm_simple_kms_helper.o + drm_simple_kms_helper.o \ + drm_vblank_helper.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c index 54cde090c3f4..4554cf75565e 100644 --- a/drivers/gpu/drm/adp/adp_drv.c +++ b/drivers/gpu/drm/adp/adp_drv.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 64e7acff8f18..ebe08947c5a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -37,7 +37,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_DISPLAY_PATH)/modules/inc \ -I$(FULL_AMD_DISPLAY_PATH)/dc \ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \ - -I$(FULL_AMD_PATH)/amdkfd + -I$(FULL_AMD_PATH)/amdkfd \ + -I$(FULL_AMD_PATH)/ras/ras_mgr # Locally disable W=1 warnings enabled in drm subsystem Makefile subdir-ccflags-y += -Wno-override-init @@ -324,4 +325,9 @@ amdgpu-y += \ isp_v4_1_1.o endif +AMD_GPU_RAS_PATH := ../ras +AMD_GPU_RAS_FULL_PATH := $(FULL_AMD_PATH)/ras +include $(AMD_GPU_RAS_FULL_PATH)/Makefile +amdgpu-y += $(AMD_GPU_RAS_FILES) + obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index 9569dc16dd3d..daa7b23bc775 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -88,6 +88,10 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) uint32_t ip_block; int r, i; + /* Skip suspend of SDMA IP versions >= 4.4.2. They are multi-aid */ + if (adev->aid_mask) + ip_block_mask &= ~BIT(AMD_IP_BLOCK_TYPE_SDMA); + amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 2a0df4cabb99..50079209c472 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -372,13 +372,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, u64 *flags); int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type); +bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type); int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block); int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block); -#define AMDGPU_MAX_IP_NUM 16 +#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM struct amdgpu_ip_block_status { bool valid; @@ -839,8 +841,6 @@ struct amd_powerplay { const struct amd_pm_funcs *pp_funcs; }; -struct ip_discovery_top; - /* polaris10 kickers */ #define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \ ((rid == 0xE3) || \ @@ -972,8 +972,7 @@ struct amdgpu_device { struct notifier_block acpi_nb; struct notifier_block pm_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; - struct debugfs_blob_wrapper debugfs_vbios_blob; - struct debugfs_blob_wrapper debugfs_discovery_blob; + struct debugfs_blob_wrapper debugfs_vbios_blob; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ struct mutex grbm_idx_mutex; @@ -1063,6 +1062,9 @@ struct amdgpu_device { u32 log2_max_MBps; } mm_stats; + /* discovery*/ + struct amdgpu_discovery_info discovery; + /* display */ bool enable_virtual_display; struct amdgpu_vkms_output *amdgpu_vkms_output; @@ -1174,6 +1176,12 @@ struct amdgpu_device { * queue fence. */ struct xarray userq_xa; + /** + * @userq_doorbell_xa: Global user queue map (doorbell index → queue) + * Key: doorbell_index (unique global identifier for the queue) + * Value: struct amdgpu_usermode_queue + */ + struct xarray userq_doorbell_xa; /* df */ struct amdgpu_df df; @@ -1265,8 +1273,6 @@ struct amdgpu_device { struct list_head ras_list; - struct ip_discovery_top *ip_top; - struct amdgpu_reset_domain *reset_domain; struct mutex benchmark_mutex; @@ -1290,6 +1296,7 @@ struct amdgpu_device { bool debug_disable_gpu_ring_reset; bool debug_vm_userptr; bool debug_disable_ce_logs; + bool debug_enable_ce_cs; /* Protection for the following isolation structure */ struct mutex enforce_isolation_mutex; @@ -1308,8 +1315,6 @@ struct amdgpu_device { */ bool apu_prefer_gtt; - struct list_head userq_mgr_list; - struct mutex userq_mutex; bool userq_halt_for_enforce_isolation; struct amdgpu_uid *uid_info; @@ -1637,7 +1642,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_release_kms(struct drm_device *dev); -int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_prepare(struct drm_device *dev); void amdgpu_device_complete(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 6c62e27b9800..d31460a9e958 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -507,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, pm_runtime_get_sync(adev_to_drm(adev)->dev); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(adev_to_drm(adev)); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index a2879d2b7c8e..644f79f3c9af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -36,6 +36,7 @@ #include "amdgpu_ras.h" #include "amdgpu_umc.h" #include "amdgpu_reset.h" +#include "amdgpu_ras_mgr.h" /* Total memory size in system memory and all GPU VRAM. Used to * estimate worst case amount of memory to reserve for page tables @@ -746,6 +747,20 @@ void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *ad enum amdgpu_ras_block block, uint16_t pasid, pasid_notify pasid_fn, void *data, uint32_t reset) { + + if (amdgpu_uniras_enabled(adev)) { + struct ras_ih_info ih_info; + + memset(&ih_info, 0, sizeof(ih_info)); + ih_info.block = block; + ih_info.pasid = pasid; + ih_info.reset = reset; + ih_info.pasid_fn = pasid_fn; + ih_info.data = data; + amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info); + return; + } + amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 9e120c934cc1..8bdfcde2029b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -71,7 +71,7 @@ struct kgd_mem { struct mutex lock; struct amdgpu_bo *bo; struct dma_buf *dmabuf; - struct hmm_range *range; + struct amdgpu_hmm_range *range; struct list_head attachments; /* protected by amdkfd_process_info.lock */ struct list_head validate_list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 83020963dfde..96ccd5ade031 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1057,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, struct amdkfd_process_info *process_info = mem->process_info; struct amdgpu_bo *bo = mem->bo; struct ttm_operation_ctx ctx = { true, false }; - struct hmm_range *range; + struct amdgpu_hmm_range *range; int ret = 0; mutex_lock(&process_info->lock); @@ -1089,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, return 0; } - ret = amdgpu_ttm_tt_get_user_pages(bo, &range); + range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!range)) { + ret = -ENOMEM; + goto unregister_out; + } + + ret = amdgpu_ttm_tt_get_user_pages(bo, range); if (ret) { + amdgpu_hmm_range_free(range); if (ret == -EAGAIN) pr_debug("Failed to get user pages, try again\n"); else @@ -1113,7 +1120,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, amdgpu_bo_unreserve(bo); release_out: - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); + amdgpu_hmm_range_free(range); unregister_out: if (ret) amdgpu_hmm_unregister(bo); @@ -1916,7 +1923,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { amdgpu_hmm_unregister(mem->bo); mutex_lock(&process_info->notifier_lock); - amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); + amdgpu_hmm_range_free(mem->range); mutex_unlock(&process_info->notifier_lock); } @@ -1954,9 +1961,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( */ if (size) { if (!is_imported && - (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM || - (adev->apu_prefer_gtt && - mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT))) + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) *size = bo_size; else *size = 0; @@ -2329,10 +2334,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem) int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, struct kfd_vm_fault_info *mem) { - if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { + if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) { *mem = *adev->gmc.vm_fault_info; - mb(); /* make sure read happened */ - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); } return 0; } @@ -2543,7 +2547,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, bo = mem->bo; - amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); + amdgpu_hmm_range_free(mem->range); mem->range = NULL; /* BO reservations and getting user pages (hmm_range_fault) @@ -2567,9 +2571,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, } } + mem->range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!mem->range)) + return -ENOMEM; /* Get updated user pages */ - ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range); + ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range); if (ret) { + amdgpu_hmm_range_free(mem->range); + mem->range = NULL; pr_debug("Failed %d to get user pages\n", ret); /* Return -EFAULT bad address error as success. It will @@ -2742,8 +2751,8 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i continue; /* Only check mem with hmm range associated */ - valid = amdgpu_ttm_tt_get_user_pages_done( - mem->bo->tbo.ttm, mem->range); + valid = amdgpu_hmm_range_valid(mem->range); + amdgpu_hmm_range_free(mem->range); mem->range = NULL; if (!valid) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index c7d32fb216e4..636385c80f64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -181,19 +181,22 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) u8 frev, crev; int usage_bytes = 0; - if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { - if (frev == 2 && crev == 1) { - fw_usage_v2_1 = - (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); - amdgpu_atomfirmware_allocate_fb_v2_1(adev, - fw_usage_v2_1, - &usage_bytes); - } else if (frev >= 2 && crev >= 2) { - fw_usage_v2_2 = - (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); - amdgpu_atomfirmware_allocate_fb_v2_2(adev, - fw_usage_v2_2, - &usage_bytes); + /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */ + if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) { + if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { + if (frev == 2 && crev == 1) { + fw_usage_v2_1 = + (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); + amdgpu_atomfirmware_allocate_fb_v2_1(adev, + fw_usage_v2_1, + &usage_bytes); + } else if (frev >= 2 && crev >= 2) { + fw_usage_v2_2 = + (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); + amdgpu_atomfirmware_allocate_fb_v2_2(adev, + fw_usage_v2_2, + &usage_bytes); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 00e96419fcda..35d04e69aec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -96,13 +96,14 @@ void amdgpu_bios_release(struct amdgpu_device *adev) * part of the system bios. On boot, the system bios puts a * copy of the igp rom at the start of vram if a discrete card is * present. - * For SR-IOV, the vbios image is also put in VRAM in the VF. + * For SR-IOV, if dynamic critical region is not enabled, + * the vbios image is also put at the start of VRAM in the VF. */ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev) { - uint8_t __iomem *bios; + uint8_t __iomem *bios = NULL; resource_size_t vram_base; - resource_size_t size = 256 * 1024; /* ??? */ + u32 size = 256U * 1024U; /* ??? */ if (!(adev->flags & AMD_IS_APU)) if (amdgpu_device_need_post(adev)) @@ -114,18 +115,33 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev) adev->bios = NULL; vram_base = pci_resource_start(adev->pdev, 0); - bios = ioremap_wc(vram_base, size); - if (!bios) - return false; adev->bios = kmalloc(size, GFP_KERNEL); - if (!adev->bios) { - iounmap(bios); + if (!adev->bios) return false; + + /* For SRIOV with dynamic critical region is enabled, + * the vbios image is put at a dynamic offset of VRAM in the VF. + * If dynamic critical region is disabled, follow the existing logic as on baremetal. + */ + if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { + if (amdgpu_virt_get_dynamic_data_info(adev, + AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) { + amdgpu_bios_release(adev); + return false; + } + } else { + bios = ioremap_wc(vram_base, size); + if (!bios) { + amdgpu_bios_release(adev); + return false; + } + + memcpy_fromio(adev->bios, bios, size); + iounmap(bios); } + adev->bios_size = size; - memcpy_fromio(adev->bios, bios, size); - iounmap(bios); if (!check_atom_bios(adev, size)) { amdgpu_bios_release(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index a716c9886c74..2b5e7c46a39d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -38,7 +38,7 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo *bo; struct amdgpu_bo_va *bo_va; uint32_t priority; - struct hmm_range *range; + struct amdgpu_hmm_range *range; bool user_invalidated; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 47e9bfba0642..9f96d568acf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -734,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -919,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1146,10 +1142,8 @@ out: amdgpu_connector_update_scratch_regs(connector, ret); exit: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } return ret; } @@ -1486,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - if (!drm_kms_helper_is_poll_worker()) { - pm_runtime_mark_last_busy(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) pm_runtime_put_autosuspend(connector->dev->dev); - } if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || connector->connector_type == DRM_MODE_CONNECTOR_eDP) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index ef996493115f..425a3e564360 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT /* * Copyright 2025 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h index bcb97d245673..353421807387 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2025 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9cd7741d2254..ecdfe6cb36cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include @@ -41,6 +40,7 @@ #include "amdgpu_gmc.h" #include "amdgpu_gem.h" #include "amdgpu_ras.h" +#include "amdgpu_hmm.h" static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, struct amdgpu_device *adev, @@ -364,6 +364,12 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, if (p->uf_bo && ring->funcs->no_user_fence) return -EINVAL; + if (!p->adev->debug_enable_ce_cs && + chunk_ib->flags & AMDGPU_IB_FLAG_CE) { + dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n"); + return -EINVAL; + } + if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) @@ -702,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, */ const s64 us_upper_bound = 200000; - if (!adev->mm_stats.log2_max_MBps) { + if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) { *max_bytes = 0; *max_vis_bytes = 0; return; @@ -885,12 +891,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, bool userpage_invalidated = false; struct amdgpu_bo *bo = e->bo; - r = amdgpu_ttm_tt_get_user_pages(bo, &e->range); + e->range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!e->range)) + return -ENOMEM; + + r = amdgpu_ttm_tt_get_user_pages(bo, e->range); if (r) goto out_free_user_pages; for (i = 0; i < bo->tbo.ttm->num_pages; i++) { - if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) { + if (bo->tbo.ttm->pages[i] != + hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) { userpage_invalidated = true; break; } @@ -984,9 +995,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, out_free_user_pages: amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - struct amdgpu_bo *bo = e->bo; - - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); + amdgpu_hmm_range_free(e->range); e->range = NULL; } mutex_unlock(&p->bo_list->bo_list_mutex); @@ -1317,8 +1326,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, */ r = 0; amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { - r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm, - e->range); + r |= !amdgpu_hmm_range_valid(e->range); + amdgpu_hmm_range_free(e->range); e->range = NULL; } if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a70651050acf..62d43b8cbe58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, if (use_bank) { if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return -EINVAL; @@ -179,7 +178,6 @@ end: if (pm_pg_lock) mutex_unlock(&adev->pm.mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); @@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off if (rd->id.use_grbm) { if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) || (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); mutex_unlock(&rd->lock); @@ -310,7 +307,6 @@ end: mutex_unlock(&rd->lock); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); @@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (!x) { @@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); amdgpu_virt_disable_access_debugfs(adev); return r; @@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (r) { @@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); if (!x) { @@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0); mutex_unlock(&adev->grbm_idx_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); while (size) { @@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu r = result; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused) up_write(&adev->reset_domain->sem); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val) *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val) *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; @@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val) r = amdgpu_benchmark(adev, val); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; @@ -1902,7 +1878,7 @@ no_preempt: continue; } job = to_amdgpu_job(s_job); - if (preempted && (&job->hw_fence.base) == fence) + if (preempted && (&job->hw_fence->base) == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } @@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) ret = -EINVAL; out: - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return ret; @@ -2123,10 +2098,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) debugfs_create_blob("amdgpu_vbios", 0444, root, &adev->debugfs_vbios_blob); - adev->debugfs_discovery_blob.data = adev->mman.discovery_bin; - adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size; - debugfs_create_blob("amdgpu_discovery", 0444, root, - &adev->debugfs_discovery_blob); + if (adev->discovery.debugfs_blob.size) + debugfs_create_blob("amdgpu_discovery", 0444, root, + &adev->discovery.debugfs_blob); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7a899fb4de29..654f4844b7ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1882,6 +1882,13 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev) { + /* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4. + * It's unclear if this is a platform-specific or GPU-specific issue. + * Disable ASPM on SI for the time being. + */ + if (adev->family == AMDGPU_FAMILY_SI) + return true; + #if IS_ENABLED(CONFIG_X86) struct cpuinfo_x86 *c = &cpu_data(0); @@ -2380,7 +2387,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, } /** - * amdgpu_device_ip_is_valid - is the hardware IP enabled + * amdgpu_device_ip_is_hw - is the hardware IP enabled * * @adev: amdgpu_device pointer * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) @@ -2388,6 +2395,27 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, * Check if the hardware IP is enable or not. * Returns true if it the IP is enable, false if not. */ +bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].version->type == block_type) + return adev->ip_blocks[i].status.hw; + } + return false; +} + +/** + * amdgpu_device_ip_is_valid - is the hardware IP valid + * + * @adev: amdgpu_device pointer + * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.) + * + * Check if the hardware IP is valid or not. + * Returns true if it the IP is valid, false if not. + */ bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type) { @@ -2626,7 +2654,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "arcturus"; break; case CHIP_NAVI12: - if (adev->mman.discovery_bin) + if (adev->discovery.bin) return 0; chip_name = "navi12"; break; @@ -2754,6 +2782,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) r = amdgpu_virt_request_full_gpu(adev, true); if (r) return r; + + r = amdgpu_virt_init_critical_region(adev); + if (r) + return r; } switch (adev->asic_type) { @@ -3773,7 +3805,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE) continue; - /* XXX handle errors */ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); if (r) return r; @@ -3856,9 +3887,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; - /* XXX handle errors */ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); - adev->ip_blocks[i].status.hw = false; + if (r) + return r; /* handle putting the SMC in the appropriate state */ if (!amdgpu_sriov_vf(adev)) { @@ -3888,7 +3919,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) * in each IP into a state suitable for suspend. * Returns 0 on success, negative error code on failure. */ -int amdgpu_device_ip_suspend(struct amdgpu_device *adev) +static int amdgpu_device_ip_suspend(struct amdgpu_device *adev) { int r; @@ -4184,7 +4215,6 @@ bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, #else return false; #endif - case CHIP_BONAIRE: case CHIP_KAVERI: case CHIP_KABINI: case CHIP_MULLINS: @@ -4278,58 +4308,53 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) long timeout; int ret = 0; - /* - * By default timeout for jobs is 10 sec - */ - adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000); - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + /* By default timeout for all queues is 2 sec */ + adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout = + adev->video_timeout = msecs_to_jiffies(2000); - if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { - while ((timeout_setting = strsep(&input, ",")) && - strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { - ret = kstrtol(timeout_setting, 0, &timeout); - if (ret) - return ret; + if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) + return 0; - if (timeout == 0) { - index++; - continue; - } else if (timeout < 0) { - timeout = MAX_SCHEDULE_TIMEOUT; - dev_warn(adev->dev, "lockup timeout disabled"); - add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); - } else { - timeout = msecs_to_jiffies(timeout); - } + while ((timeout_setting = strsep(&input, ",")) && + strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + ret = kstrtol(timeout_setting, 0, &timeout); + if (ret) + return ret; - switch (index++) { - case 0: - adev->gfx_timeout = timeout; - break; - case 1: - adev->compute_timeout = timeout; - break; - case 2: - adev->sdma_timeout = timeout; - break; - case 3: - adev->video_timeout = timeout; - break; - default: - break; - } + if (timeout == 0) { + index++; + continue; + } else if (timeout < 0) { + timeout = MAX_SCHEDULE_TIMEOUT; + dev_warn(adev->dev, "lockup timeout disabled"); + add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); + } else { + timeout = msecs_to_jiffies(timeout); } - /* - * There is only one value specified and - * it should apply to all non-compute jobs. - */ - if (index == 1) { - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) - adev->compute_timeout = adev->gfx_timeout; + + switch (index++) { + case 0: + adev->gfx_timeout = timeout; + break; + case 1: + adev->compute_timeout = timeout; + break; + case 2: + adev->sdma_timeout = timeout; + break; + case 3: + adev->video_timeout = timeout; + break; + default: + break; } } + /* When only one value specified apply it to all queues. */ + if (index == 1) + adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout = + adev->video_timeout = timeout; + return ret; } @@ -4384,6 +4409,55 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) dev_info(adev->dev, "MCBP is enabled\n"); } +static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_atombios_sysfs_init(adev); + if (r) + drm_err(&adev->ddev, + "registering atombios sysfs failed (%d).\n", r); + + r = amdgpu_pm_sysfs_init(adev); + if (r) + dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); + + r = amdgpu_ucode_sysfs_init(adev); + if (r) { + adev->ucode_sysfs_en = false; + dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); + } else + adev->ucode_sysfs_en = true; + + r = amdgpu_device_attr_sysfs_init(adev); + if (r) + dev_err(adev->dev, "Could not create amdgpu device attr\n"); + + r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); + if (r) + dev_err(adev->dev, + "Could not create amdgpu board attributes\n"); + + amdgpu_fru_sysfs_init(adev); + amdgpu_reg_state_sysfs_init(adev); + amdgpu_xcp_sysfs_init(adev); + + return r; +} + +static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev) +{ + if (adev->pm.sysfs_initialized) + amdgpu_pm_sysfs_fini(adev); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); + amdgpu_device_attr_sysfs_fini(adev); + amdgpu_fru_sysfs_fini(adev); + + amdgpu_reg_state_sysfs_fini(adev); + amdgpu_xcp_sysfs_fini(adev); +} + /** * amdgpu_device_init - initialize the driver * @@ -4483,7 +4557,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->gfx.userq_sch_mutex); mutex_init(&adev->gfx.workload_profile_mutex); mutex_init(&adev->vcn.workload_profile_mutex); - mutex_init(&adev->userq_mutex); amdgpu_device_init_apu_flags(adev); @@ -4511,7 +4584,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->pm.od_kobj_list); - INIT_LIST_HEAD(&adev->userq_mgr_list); + xa_init(&adev->userq_doorbell_xa); INIT_DELAYED_WORK(&adev->delayed_init_work, amdgpu_device_delayed_init_work_handler); @@ -4807,39 +4880,14 @@ fence_driver_init: flush_delayed_work(&adev->delayed_init_work); } + if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) + amdgpu_xgmi_reset_on_init(adev); /* * Place those sysfs registering after `late_init`. As some of those * operations performed in `late_init` might affect the sysfs * interfaces creating. */ - r = amdgpu_atombios_sysfs_init(adev); - if (r) - drm_err(&adev->ddev, - "registering atombios sysfs failed (%d).\n", r); - - r = amdgpu_pm_sysfs_init(adev); - if (r) - dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); - - r = amdgpu_ucode_sysfs_init(adev); - if (r) { - adev->ucode_sysfs_en = false; - dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); - } else - adev->ucode_sysfs_en = true; - - r = amdgpu_device_attr_sysfs_init(adev); - if (r) - dev_err(adev->dev, "Could not create amdgpu device attr\n"); - - r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group); - if (r) - dev_err(adev->dev, - "Could not create amdgpu board attributes\n"); - - amdgpu_fru_sysfs_init(adev); - amdgpu_reg_state_sysfs_init(adev); - amdgpu_xcp_sysfs_init(adev); + r = amdgpu_device_sys_interface_init(adev); if (IS_ENABLED(CONFIG_PERF_EVENTS)) r = amdgpu_pmu_init(adev); @@ -4867,9 +4915,6 @@ fence_driver_init: if (px) vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); - if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI) - amdgpu_xgmi_reset_on_init(adev); - amdgpu_device_check_iommu_direct_map(adev); adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; @@ -4961,15 +5006,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) } amdgpu_fence_driver_hw_fini(adev); - if (adev->pm.sysfs_initialized) - amdgpu_pm_sysfs_fini(adev); - if (adev->ucode_sysfs_en) - amdgpu_ucode_sysfs_fini(adev); - amdgpu_device_attr_sysfs_fini(adev); - amdgpu_fru_sysfs_fini(adev); - - amdgpu_reg_state_sysfs_fini(adev); - amdgpu_xcp_sysfs_fini(adev); + amdgpu_device_sys_interface_fini(adev); /* disable ras feature must before hw fini */ amdgpu_ras_pre_fini(adev); @@ -5044,7 +5081,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); - if (adev->mman.discovery_bin) + if (adev->discovery.bin) amdgpu_discovery_fini(adev); amdgpu_reset_put_reset_domain(adev->reset_domain); @@ -5212,16 +5249,20 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) dev_warn(adev->dev, "smart shift update failed\n"); if (notify_clients) - drm_client_dev_suspend(adev_to_drm(adev), false); + drm_client_dev_suspend(adev_to_drm(adev)); cancel_delayed_work_sync(&adev->delayed_init_work); amdgpu_ras_suspend(adev); - amdgpu_device_ip_suspend_phase1(adev); + r = amdgpu_device_ip_suspend_phase1(adev); + if (r) + return r; amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); - amdgpu_userq_suspend(adev); + r = amdgpu_userq_suspend(adev); + if (r) + return r; r = amdgpu_device_evict_resources(adev); if (r) @@ -5231,7 +5272,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) amdgpu_fence_driver_hw_fini(adev); - amdgpu_device_ip_suspend_phase2(adev); + r = amdgpu_device_ip_suspend_phase2(adev); + if (r) + return r; if (amdgpu_sriov_vf(adev)) amdgpu_virt_release_full_gpu(adev, false); @@ -5346,7 +5389,7 @@ exit: flush_delayed_work(&adev->delayed_init_work); if (notify_clients) - drm_client_dev_resume(adev_to_drm(adev), false); + drm_client_dev_resume(adev_to_drm(adev)); amdgpu_ras_resume(adev); @@ -5802,11 +5845,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!amdgpu_ring_sched_ready(ring)) continue; - /* Clear job fence from fence drv to avoid force_completion - * leave NULL and vm flush fence in fence drv - */ - amdgpu_fence_driver_clear_job_fences(ring); - /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } @@ -5951,7 +5989,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) if (r) goto out; - drm_client_dev_resume(adev_to_drm(tmp_adev), false); + drm_client_dev_resume(adev_to_drm(tmp_adev)); /* * The GPU enters bad state once faulty pages @@ -6286,7 +6324,7 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - drm_client_dev_suspend(adev_to_drm(tmp_adev), false); + drm_client_dev_suspend(adev_to_drm(tmp_adev)); /* disable ras on ALL IPs */ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) && @@ -6535,7 +6573,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * * job->base holds a reference to parent fence */ - if (job && dma_fence_is_signaled(&job->hw_fence.base)) { + if (job && dma_fence_is_signaled(&job->hw_fence->base)) { job_signaled = true; dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; @@ -7279,10 +7317,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev, if (adev->gmc.xgmi.connected_to_cpu) return; - if (ring && ring->funcs->emit_hdp_flush) + if (ring && ring->funcs->emit_hdp_flush) { amdgpu_ring_emit_hdp_flush(ring); - else - amdgpu_asic_flush_hdp(adev, ring); + return; + } + + if (!ring && amdgpu_sriov_runtime(adev)) { + if (!amdgpu_kiq_hdp_flush(adev)) + return; + } + + amdgpu_asic_flush_hdp(adev, ring); } void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 73401f0aeb34..fa2a22dfa048 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -107,6 +107,7 @@ #include "vcn_v5_0_1.h" #include "jpeg_v5_0_0.h" #include "jpeg_v5_0_1.h" +#include "amdgpu_ras_mgr.h" #include "amdgpu_vpe.h" #if defined(CONFIG_DRM_AMD_ISP) @@ -254,9 +255,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; /* This region is read-only and reserved from system use */ - discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC); + discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC); if (discv_regn) { - memcpy(binary, discv_regn, adev->mman.discovery_tmr_size); + memcpy(binary, discv_regn, adev->discovery.size); memunmap(discv_regn); return 0; } @@ -298,10 +299,31 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, else vram_size <<= 20; + /* + * If in VRAM, discovery TMR is marked for reservation. If it is in system mem, + * then it is not required to be reserved. + */ if (sz_valid) { - uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; - amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, - adev->mman.discovery_tmr_size, false); + if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) { + /* For SRIOV VFs with dynamic critical region enabled, + * we will get the IPD binary via below call. + * If dynamic critical is disabled, fall through to normal seq. + */ + if (amdgpu_virt_get_dynamic_data_info(adev, + AMD_SRIOV_MSG_IPD_TABLE_ID, binary, + &adev->discovery.size)) { + dev_err(adev->dev, + "failed to read discovery info from dynamic critical region."); + ret = -EINVAL; + goto exit; + } + } else { + uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; + + amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, + adev->discovery.size, false); + adev->discovery.reserve_tmr = true; + } } else { ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); } @@ -310,7 +332,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, dev_err(adev->dev, "failed to read discovery info from memory, vram size read: %llx", vram_size); - +exit: return ret; } @@ -389,6 +411,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, struct binary_header *bhdr) { + uint8_t *discovery_bin = adev->discovery.bin; struct table_info *info; uint16_t checksum; uint16_t offset; @@ -398,14 +421,14 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, checksum = le16_to_cpu(info->checksum); struct nps_info_header *nhdr = - (struct nps_info_header *)(adev->mman.discovery_bin + offset); + (struct nps_info_header *)(discovery_bin + offset); if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) { dev_dbg(adev->dev, "invalid ip discovery nps info table id\n"); return -EINVAL; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, le32_to_cpu(nhdr->size_bytes), checksum)) { dev_dbg(adev->dev, "invalid nps info data table checksum\n"); @@ -417,8 +440,11 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev, static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) { - if (amdgpu_discovery == 2) + if (amdgpu_discovery == 2) { + /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */ + adev->discovery.reserve_tmr = true; return "amdgpu/ip_discovery.bin"; + } switch (adev->asic_type) { case CHIP_VEGA10: @@ -447,49 +473,53 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; struct binary_header *bhdr; + uint8_t *discovery_bin; const char *fw_name; uint16_t offset; uint16_t size; uint16_t checksum; int r; - adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; - adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); - if (!adev->mman.discovery_bin) + adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); + if (!adev->discovery.bin) return -ENOMEM; + adev->discovery.size = DISCOVERY_TMR_SIZE; + adev->discovery.debugfs_blob.data = adev->discovery.bin; + adev->discovery.debugfs_blob.size = adev->discovery.size; + discovery_bin = adev->discovery.bin; /* Read from file if it is the preferred option */ fw_name = amdgpu_discovery_get_fw_name(adev); if (fw_name != NULL) { drm_dbg(&adev->ddev, "use ip discovery information from file"); - r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name); + r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin, + fw_name); if (r) goto out; } else { drm_dbg(&adev->ddev, "use ip discovery information from memory"); - r = amdgpu_discovery_read_binary_from_mem( - adev, adev->mman.discovery_bin); + r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin); if (r) goto out; } /* check the ip discovery binary signature */ - if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { + if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) { dev_err(adev->dev, "get invalid ip discovery binary signature\n"); r = -EINVAL; goto out; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); size = le16_to_cpu(bhdr->binary_size) - offset; checksum = le16_to_cpu(bhdr->binary_checksum); - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - size, checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size, + checksum)) { dev_err(adev->dev, "invalid ip discovery binary checksum\n"); r = -EINVAL; goto out; @@ -501,15 +531,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct ip_discovery_header *ihdr = - (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); + (struct ip_discovery_header *)(discovery_bin + offset); if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { dev_err(adev->dev, "invalid ip discovery data table signature\n"); r = -EINVAL; goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le16_to_cpu(ihdr->size), checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, + le16_to_cpu(ihdr->size), + checksum)) { dev_err(adev->dev, "invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; @@ -522,7 +553,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct gpu_info_header *ghdr = - (struct gpu_info_header *)(adev->mman.discovery_bin + offset); + (struct gpu_info_header *)(discovery_bin + offset); if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery gc table id\n"); @@ -530,8 +561,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(ghdr->size), checksum)) { + if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, + le32_to_cpu(ghdr->size), + checksum)) { dev_err(adev->dev, "invalid gc data table checksum\n"); r = -EINVAL; goto out; @@ -544,7 +576,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct harvest_info_header *hhdr = - (struct harvest_info_header *)(adev->mman.discovery_bin + offset); + (struct harvest_info_header *)(discovery_bin + offset); if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); @@ -552,8 +584,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - sizeof(struct harvest_table), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + sizeof(struct harvest_table), checksum)) { dev_err(adev->dev, "invalid harvest data table checksum\n"); r = -EINVAL; goto out; @@ -566,7 +599,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (offset) { struct vcn_info_header *vhdr = - (struct vcn_info_header *)(adev->mman.discovery_bin + offset); + (struct vcn_info_header *)(discovery_bin + offset); if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery vcn table id\n"); @@ -574,8 +607,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(vhdr->size_bytes), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + le32_to_cpu(vhdr->size_bytes), checksum)) { dev_err(adev->dev, "invalid vcn data table checksum\n"); r = -EINVAL; goto out; @@ -588,7 +622,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) if (0 && offset) { struct mall_info_header *mhdr = - (struct mall_info_header *)(adev->mman.discovery_bin + offset); + (struct mall_info_header *)(discovery_bin + offset); if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { dev_err(adev->dev, "invalid ip discovery mall table id\n"); @@ -596,8 +630,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) goto out; } - if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - le32_to_cpu(mhdr->size_bytes), checksum)) { + if (!amdgpu_discovery_verify_checksum( + discovery_bin + offset, + le32_to_cpu(mhdr->size_bytes), checksum)) { dev_err(adev->dev, "invalid mall data table checksum\n"); r = -EINVAL; goto out; @@ -607,8 +642,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) return 0; out: - kfree(adev->mman.discovery_bin); - adev->mman.discovery_bin = NULL; + kfree(adev->discovery.bin); + adev->discovery.bin = NULL; if ((amdgpu_discovery != 2) && (RREG32(mmIP_DISCOVERY_VERSION) == 4)) amdgpu_ras_query_boot_status(adev, 4); @@ -620,8 +655,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev) { amdgpu_discovery_sysfs_fini(adev); - kfree(adev->mman.discovery_bin); - adev->mman.discovery_bin = NULL; + kfree(adev->discovery.bin); + adev->discovery.bin = NULL; } static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, @@ -646,6 +681,7 @@ static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev, static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, uint32_t *vcn_harvest_count) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; @@ -655,21 +691,21 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, uint8_t inst; int i, j; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); /* scan harvest bit of all IP data structures */ for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); for (j = 0; j < num_ips; j++) { - ip = (struct ip *)(adev->mman.discovery_bin + - ip_offset); + ip = (struct ip *)(discovery_bin + ip_offset); inst = ip->number_instance; hw_id = le16_to_cpu(ip->hw_id); if (amdgpu_discovery_validate_ip(adev, inst, hw_id)) @@ -711,13 +747,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, uint32_t *vcn_harvest_count, uint32_t *umc_harvest_count) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct harvest_table *harvest_info; u16 offset; int i; uint32_t umc_harvest_config = 0; - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); if (!offset) { @@ -725,7 +762,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, return; } - harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); + harvest_info = (struct harvest_table *)(discovery_bin + offset); for (i = 0; i < 32; i++) { if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) @@ -1021,8 +1058,8 @@ static void ip_disc_release(struct kobject *kobj) kobj); struct amdgpu_device *adev = ip_top->adev; - adev->ip_top = NULL; kfree(ip_top); + adev->discovery.ip_top = NULL; } static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, @@ -1033,7 +1070,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, /* Until a uniform way is figured, get mask based on hwid */ switch (hw_id) { case VCN_HWID: - harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; + /* VCN vs UVD+VCE */ + if (!amdgpu_ip_version(adev, VCE_HWIP, 0)) + harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; break; case DMU_HWID: if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) @@ -1060,6 +1099,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, const size_t _ip_offset, const int num_ips, bool reg_base_64) { + uint8_t *discovery_bin = adev->discovery.bin; int ii, jj, kk, res; uint16_t hw_id; uint8_t inst; @@ -1077,7 +1117,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, struct ip_v4 *ip; struct ip_hw_instance *ip_hw_instance; - ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(discovery_bin + ip_offset); inst = ip->instance_number; hw_id = le16_to_cpu(ip->hw_id); if (amdgpu_discovery_validate_ip(adev, inst, hw_id) || @@ -1164,17 +1204,20 @@ next_ip: static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) { + struct ip_discovery_top *ip_top = adev->discovery.ip_top; + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; - struct kset *die_kset = &adev->ip_top->die_kset; + struct kset *die_kset = &ip_top->die_kset; u16 num_dies, die_offset, num_ips; size_t ip_offset; int ii, res; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); DRM_DEBUG("number of dies: %d\n", num_dies); @@ -1183,7 +1226,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) struct ip_die_entry *ip_die_entry; die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); @@ -1217,30 +1260,32 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; + struct ip_discovery_top *ip_top; struct kset *die_kset; int res, ii; - if (!adev->mman.discovery_bin) + if (!discovery_bin) return -EINVAL; - adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); - if (!adev->ip_top) + ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL); + if (!ip_top) return -ENOMEM; - adev->ip_top->adev = adev; - - res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, + ip_top->adev = adev; + adev->discovery.ip_top = ip_top; + res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype, &adev->dev->kobj, "ip_discovery"); if (res) { DRM_ERROR("Couldn't init and add ip_discovery/"); goto Err; } - die_kset = &adev->ip_top->die_kset; + die_kset = &ip_top->die_kset; kobject_set_name(&die_kset->kobj, "%s", "die"); - die_kset->kobj.parent = &adev->ip_top->kobj; + die_kset->kobj.parent = &ip_top->kobj; die_kset->kobj.ktype = &die_kobj_ktype; - res = kset_register(&adev->ip_top->die_kset); + res = kset_register(&ip_top->die_kset); if (res) { DRM_ERROR("Couldn't register die_kset"); goto Err; @@ -1254,7 +1299,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) return res; Err: - kobject_put(&adev->ip_top->kobj); + kobject_put(&ip_top->kobj); return res; } @@ -1299,10 +1344,11 @@ static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) { + struct ip_discovery_top *ip_top = adev->discovery.ip_top; struct list_head *el, *tmp; struct kset *die_kset; - die_kset = &adev->ip_top->die_kset; + die_kset = &ip_top->die_kset; spin_lock(&die_kset->list_lock); list_for_each_prev_safe(el, tmp, &die_kset->list) { list_del_init(el); @@ -1311,8 +1357,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) spin_lock(&die_kset->list_lock); } spin_unlock(&die_kset->list_lock); - kobject_put(&adev->ip_top->die_kset.kobj); - kobject_put(&adev->ip_top->kobj); + kobject_put(&ip_top->die_kset.kobj); + kobject_put(&ip_top->kobj); } /* ================================================== */ @@ -1323,6 +1369,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) struct binary_header *bhdr; struct ip_discovery_header *ihdr; struct die_header *dhdr; + uint8_t *discovery_bin; struct ip_v4 *ip; uint16_t die_offset; uint16_t ip_offset; @@ -1338,22 +1385,23 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) r = amdgpu_discovery_init(adev); if (r) return r; - + discovery_bin = adev->discovery.bin; wafl_ver = 0; adev->gfx.xcc_mask = 0; adev->sdma.sdma_mask = 0; adev->vcn.inst_mask = 0; adev->jpeg.inst_mask = 0; - bhdr = (struct binary_header *)adev->mman.discovery_bin; - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); + bhdr = (struct binary_header *)discovery_bin; + ihdr = (struct ip_discovery_header + *)(discovery_bin + + le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); num_dies = le16_to_cpu(ihdr->num_dies); DRM_DEBUG("number of dies: %d\n", num_dies); for (i = 0; i < num_dies; i++) { die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); - dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); + dhdr = (struct die_header *)(discovery_bin + die_offset); num_ips = le16_to_cpu(dhdr->num_ips); ip_offset = die_offset + sizeof(*dhdr); @@ -1367,7 +1415,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) le16_to_cpu(dhdr->die_id), num_ips); for (j = 0; j < num_ips; j++) { - ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); + ip = (struct ip_v4 *)(discovery_bin + ip_offset); inst = ip->instance_number; hw_id = le16_to_cpu(ip->hw_id); @@ -1517,16 +1565,16 @@ next_ip: static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct ip_discovery_header *ihdr; struct binary_header *bhdr; int vcn_harvest_count = 0; int umc_harvest_count = 0; uint16_t offset, ihdr_ver; - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset); - ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + - offset); + ihdr = (struct ip_discovery_header *)(discovery_bin + offset); ihdr_ver = le16_to_cpu(ihdr->version); /* * Harvest table does not fit Navi1x and legacy GPUs, @@ -1573,22 +1621,23 @@ union gc_info { static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union gc_info *gc_info; u16 offset; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[GC].offset); if (!offset) return 0; - gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); + gc_info = (union gc_info *)(discovery_bin + offset); switch (le16_to_cpu(gc_info->v1.header.version_major)) { case 1: @@ -1681,24 +1730,25 @@ union mall_info { static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union mall_info *mall_info; u32 u, mall_size_per_umc, m_s_present, half_use; u64 mall_size; u16 offset; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); if (!offset) return 0; - mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); + mall_info = (union mall_info *)(discovery_bin + offset); switch (le16_to_cpu(mall_info->v1.header.version_major)) { case 1: @@ -1737,12 +1787,13 @@ union vcn_info { static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) { + uint8_t *discovery_bin = adev->discovery.bin; struct binary_header *bhdr; union vcn_info *vcn_info; u16 offset; int v; - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { DRM_ERROR("ip discovery uninitialized\n"); return -EINVAL; } @@ -1757,13 +1808,13 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); if (!offset) return 0; - vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); + vcn_info = (union vcn_info *)(discovery_bin + offset); switch (le16_to_cpu(vcn_info->v1.header.version_major)) { case 1: @@ -1823,6 +1874,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, struct amdgpu_gmc_memrange **ranges, int *range_cnt, bool refresh) { + uint8_t *discovery_bin = adev->discovery.bin; struct amdgpu_gmc_memrange *mem_ranges; struct binary_header *bhdr; union nps_info *nps_info; @@ -1839,13 +1891,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, return r; nps_info = &nps_data; } else { - if (!adev->mman.discovery_bin) { + if (!discovery_bin) { dev_err(adev->dev, "fetch mem range failed, ip discovery uninitialized\n"); return -EINVAL; } - bhdr = (struct binary_header *)adev->mman.discovery_bin; + bhdr = (struct binary_header *)discovery_bin; offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset); if (!offset) @@ -1855,8 +1907,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev, if (amdgpu_discovery_verify_npsinfo(adev, bhdr)) return -ENOENT; - nps_info = - (union nps_info *)(adev->mman.discovery_bin + offset); + nps_info = (union nps_info *)(discovery_bin + offset); } switch (le16_to_cpu(nps_info->v1.header.version_major)) { @@ -2359,6 +2410,21 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) amdgpu_ip_version(adev, SDMA0_HWIP, 0)); return -EINVAL; } + + return 0; +} + +static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 12): + case IP_VERSION(13, 0, 14): + amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block); + break; + default: + break; + } return 0; } @@ -2565,7 +2631,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); @@ -2592,7 +2660,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); @@ -2619,8 +2689,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 1; + adev->sdma.sdma_mask = 1; adev->vcn.num_vcn_inst = 1; adev->gmc.num_umc = 2; + adev->gfx.xcc_mask = 1; if (adev->apu_flags & AMD_APU_IS_RAVEN2) { adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); @@ -2665,7 +2737,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); vega20_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; adev->gmc.num_umc = 8; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); @@ -2693,8 +2767,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); arct_reg_base_init(adev); adev->sdma.num_instances = 8; + adev->sdma.sdma_mask = 0xff; adev->vcn.num_vcn_inst = 2; adev->gmc.num_umc = 8; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); @@ -2726,8 +2802,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) amdgpu_discovery_init(adev); aldebaran_reg_base_init(adev); adev->sdma.num_instances = 5; + adev->sdma.sdma_mask = 0x1f; adev->vcn.num_vcn_inst = 2; adev->gmc.num_umc = 4; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); @@ -2762,6 +2840,8 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) } else { cyan_skillfish_reg_base_init(adev); adev->sdma.num_instances = 2; + adev->sdma.sdma_mask = 3; + adev->gfx.xcc_mask = 1; adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3); adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3); adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1); @@ -3125,6 +3205,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) if (r) return r; + r = amdgpu_discovery_set_ras_ip_blocks(adev); + if (r) + return r; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && !amdgpu_sriov_vf(adev)) || (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index b44d56465c5b..4ce04486cc31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,9 +24,21 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ +#include + #define DISCOVERY_TMR_SIZE (10 << 10) #define DISCOVERY_TMR_OFFSET (64 << 10) +struct ip_discovery_top; + +struct amdgpu_discovery_info { + struct debugfs_blob_wrapper debugfs_blob; + struct ip_discovery_top *ip_top; + uint32_t size; + uint8_t *bin; + bool reserve_tmr; +}; + void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 51bab32fd8c6..b5d34797d606 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, if (crtc->enabled) active = true; - pm_runtime_mark_last_busy(dev->dev); - adev = drm_to_adev(dev); /* if we have active crtcs and we don't have a power ref, * take the current one @@ -1365,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = { { AMDGPU_FMT_DITHER_ENABLE, "on" }, }; +/** + * DOC: property for adaptive backlight modulation + * + * The 'adaptive backlight modulation' property is used for the compositor to + * directly control the adaptive backlight modulation power savings feature + * that is part of DCN hardware. + * + * The property will be attached specifically to eDP panels that support it. + * + * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings' + * to be able to control it. + * If set to 'off' the compositor will ensure it stays off. + * The other values 'min', 'bias min', 'bias max', and 'max' will control the + * intensity of the power savings. + * + * Modifying this value can have implications on color accuracy, so tread + * carefully. + */ +static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev) +{ + const struct drm_prop_enum_list props[] = { + { ABM_SYSFS_CONTROL, "sysfs" }, + { ABM_LEVEL_OFF, "off" }, + { ABM_LEVEL_MIN, "min" }, + { ABM_LEVEL_BIAS_MIN, "bias min" }, + { ABM_LEVEL_BIAS_MAX, "bias max" }, + { ABM_LEVEL_MAX, "max" }, + }; + struct drm_property *prop; + int i; + + if (!adev->dc_enabled) + return 0; + + prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM, + "adaptive backlight modulation", + 6); + if (!prop) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + int ret; + + ret = drm_property_add_enum(prop, props[i].type, + props[i].name); + + if (ret) { + drm_property_destroy(adev_to_drm(adev), prop); + + return ret; + } + } + + adev->mode_info.abm_level_property = prop; + + return 0; +} + int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) { int sz; @@ -1411,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) "dither", amdgpu_dither_enum_list, sz); - return 0; + return amdgpu_display_setup_abm_prop(adev); } void amdgpu_display_update_priority(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index 930c171473b4..49a29bf47a37 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -55,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev); int amdgpu_display_get_scanout_buffer(struct drm_plane *plane, struct drm_scanout_buffer *sb); +#define ABM_SYSFS_CONTROL -1 +#define ABM_LEVEL_OFF 0 +#define ABM_LEVEL_MIN 1 +#define ABM_LEVEL_BIAS_MIN 2 +#define ABM_LEVEL_BIAS_MAX 3 +#define ABM_LEVEL_MAX 4 + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index bff25ef3e2d0..3776901bbb1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -144,7 +144,8 @@ enum AMDGPU_DEBUG_MASK { AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6), AMDGPU_DEBUG_SMU_POOL = BIT(7), AMDGPU_DEBUG_VM_USERPTR = BIT(8), - AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9) + AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9), + AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10) }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -353,22 +354,16 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint * DOC: lockup_timeout (string) * Set GPU scheduler timeout value in ms. * - * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or - * multiple values specified. 0 and negative values are invalidated. They will be adjusted - * to the default timeout. + * The format can be [single value] for setting all timeouts at once or + * [GFX,Compute,SDMA,Video] to set individual timeouts. + * Negative values mean infinity. * - * - With one value specified, the setting will apply to all non-compute jobs. - * - With multiple values specified, the first one will be for GFX. - * The second one is for Compute. The third and fourth ones are - * for SDMA and Video. - * - * By default(with no lockup_timeout settings), the timeout for all jobs is 10000. + * By default(with no lockup_timeout settings), the timeout for all queues is 2000. */ MODULE_PARM_DESC(lockup_timeout, - "GPU lockup timeout in ms (default: 10000 for all jobs. " - "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " - "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); -module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); + "GPU lockup timeout in ms (default: 2000. 0: keep default value. negative: infinity timeout), format: [single value for all] or [GFX,Compute,SDMA,Video]."); +module_param_string(lockup_timeout, amdgpu_lockup_timeout, + sizeof(amdgpu_lockup_timeout), 0444); /** * DOC: dpm (int) @@ -2233,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) adev->pdev->bus->number, i); if (p) { pm_runtime_get_sync(&p->dev); - pm_runtime_mark_last_busy(&p->dev); pm_runtime_put_autosuspend(&p->dev); pci_dev_put(p); } @@ -2289,6 +2283,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) pr_info("debug: disable kernel logs of correctable errors\n"); adev->debug_disable_ce_logs = true; } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) { + pr_info("debug: allowing command submission to CE engine\n"); + adev->debug_enable_ce_cs = true; + } } static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) @@ -2474,7 +2473,6 @@ retry_init: pm_runtime_allow(ddev->dev); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); pci_wake_from_d3(pdev, TRUE); @@ -2558,7 +2556,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) */ if (!amdgpu_passthrough(adev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; - amdgpu_device_ip_suspend(adev); + amdgpu_device_prepare(dev); + amdgpu_device_suspend(dev, true); adev->mp1_state = PP_MP1_STATE_NONE; } @@ -2771,22 +2770,8 @@ static int amdgpu_runtime_idle_check_userq(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(drm_dev); - struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0; - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - ret = -EBUSY; - goto done; - } - } -done: - mutex_unlock(&adev->userq_mutex); - - return ret; + return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY; } static int amdgpu_pmops_runtime_suspend(struct device *dev) @@ -2933,7 +2918,6 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) ret = amdgpu_runtime_idle_check_userq(dev); done: - pm_runtime_mark_last_busy(dev); pm_runtime_autosuspend(dev); return ret; } @@ -2969,7 +2953,6 @@ long amdgpu_drm_ioctl(struct file *filp, ret = drm_ioctl(filp, cmd, arg); - pm_runtime_mark_last_busy(dev->dev); out: pm_runtime_put_autosuspend(dev->dev); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index fd8cca241da6..c7843e336310 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -45,16 +45,11 @@ * Cast helper */ static const struct dma_fence_ops amdgpu_fence_ops; -static const struct dma_fence_ops amdgpu_job_fence_ops; static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f) { struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); - if (__f->base.ops == &amdgpu_fence_ops || - __f->base.ops == &amdgpu_job_fence_ops) - return __f; - - return NULL; + return __f; } /** @@ -98,51 +93,32 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * amdgpu_fence_emit - emit a fence on the requested ring * * @ring: ring the fence is associated with - * @f: resulting fence object * @af: amdgpu fence input * @flags: flags to pass into the subordinate .emit_fence() call * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, - struct amdgpu_fence *af, unsigned int flags) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, + unsigned int flags) { struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; - struct amdgpu_fence *am_fence; struct dma_fence __rcu **ptr; uint32_t seq; int r; - if (!af) { - /* create a separate hw fence */ - am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL); - if (!am_fence) - return -ENOMEM; - } else { - am_fence = af; - } - fence = &am_fence->base; - am_fence->ring = ring; + fence = &af->base; + af->ring = ring; seq = ++ring->fence_drv.sync_seq; - am_fence->seq = seq; - if (af) { - dma_fence_init(fence, &amdgpu_job_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - /* Against remove in amdgpu_job_{free, free_cb} */ - dma_fence_get(fence); - } else { - dma_fence_init(fence, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - } + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); - amdgpu_fence_save_wptr(fence); + amdgpu_fence_save_wptr(af); pm_runtime_get_noresume(adev_to_drm(adev)->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { @@ -167,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, */ rcu_assign_pointer(*ptr, dma_fence_get(fence)); - *f = fence; - return 0; } @@ -276,7 +250,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) drv->signalled_wptr = am_fence->wptr; dma_fence_signal(fence); dma_fence_put(fence); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } while (last_seq != seq); @@ -669,36 +642,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev) } } -/** - * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring - * - * @ring: fence of the ring to be cleared - * - */ -void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) -{ - int i; - struct dma_fence *old, **ptr; - - for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) { - ptr = &ring->fence_drv.fences[i]; - old = rcu_dereference_protected(*ptr, 1); - if (old && old->ops == &amdgpu_job_fence_ops) { - struct amdgpu_job *job; - - /* For non-scheduler bad job, i.e. failed ib test, we need to signal - * it right here or we won't be able to track them in fence_drv - * and they will remain unsignaled during sa_bo free. - */ - job = container_of(old, struct amdgpu_job, hw_fence.base); - if (!job->base.s_fence && !dma_fence_is_signaled(old)) - dma_fence_signal(old); - RCU_INIT_POINTER(*ptr, NULL); - dma_fence_put(old); - } - } -} - /** * amdgpu_fence_driver_set_error - set error code on fences * @ring: the ring which contains the fences @@ -755,21 +698,50 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) /** * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence * - * @fence: fence of the ring to signal + * @af: fence of the ring to signal * */ -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence) +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af) { - dma_fence_set_error(&fence->base, -ETIME); - amdgpu_fence_write(fence->ring, fence->seq); - amdgpu_fence_process(fence->ring); + struct dma_fence *unprocessed; + struct dma_fence __rcu **ptr; + struct amdgpu_fence *fence; + struct amdgpu_ring *ring = af->ring; + unsigned long flags; + u32 seq, last_seq; + + last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; + seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; + + /* mark all fences from the guilty context with an error */ + spin_lock_irqsave(&ring->fence_drv.lock, flags); + do { + last_seq++; + last_seq &= ring->fence_drv.num_fences_mask; + + ptr = &ring->fence_drv.fences[last_seq]; + rcu_read_lock(); + unprocessed = rcu_dereference(*ptr); + + if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) { + fence = container_of(unprocessed, struct amdgpu_fence, base); + + if (fence == af) + dma_fence_set_error(&fence->base, -ETIME); + else if (fence->context == af->context) + dma_fence_set_error(&fence->base, -ECANCELED); + } + rcu_read_unlock(); + } while (last_seq != seq); + spin_unlock_irqrestore(&ring->fence_drv.lock, flags); + /* signal the guilty fence */ + amdgpu_fence_write(ring, (u32)af->base.seqno); + amdgpu_fence_process(ring); } -void amdgpu_fence_save_wptr(struct dma_fence *fence) +void amdgpu_fence_save_wptr(struct amdgpu_fence *af) { - struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base); - - am_fence->wptr = am_fence->ring->wptr; + af->wptr = af->ring->wptr; } static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, @@ -790,14 +762,19 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, struct dma_fence *unprocessed; struct dma_fence __rcu **ptr; struct amdgpu_fence *fence; - u64 wptr, i, seqno; + u64 wptr; + u32 seq, last_seq; - seqno = amdgpu_fence_read(ring); + last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask; + seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask; wptr = ring->fence_drv.signalled_wptr; ring->ring_backup_entries_to_copy = 0; - for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { - ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; + do { + last_seq++; + last_seq &= ring->fence_drv.num_fences_mask; + + ptr = &ring->fence_drv.fences[last_seq]; rcu_read_lock(); unprocessed = rcu_dereference(*ptr); @@ -813,7 +790,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, wptr = fence->wptr; } rcu_read_unlock(); - } + } while (last_seq != seq); } /* @@ -830,13 +807,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) return (const char *)to_amdgpu_fence(f)->ring->name; } -static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) -{ - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); - - return (const char *)to_amdgpu_ring(job->base.sched)->name; -} - /** * amdgpu_fence_enable_signaling - enable signalling on fence * @f: fence @@ -853,23 +823,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) return true; } -/** - * amdgpu_job_fence_enable_signaling - enable signalling on job fence - * @f: fence - * - * This is the simliar function with amdgpu_fence_enable_signaling above, it - * only handles the job embedded fence. - */ -static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) -{ - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); - - if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) - amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); - - return true; -} - /** * amdgpu_fence_free - free up the fence memory * @@ -885,21 +838,6 @@ static void amdgpu_fence_free(struct rcu_head *rcu) kfree(to_amdgpu_fence(f)); } -/** - * amdgpu_job_fence_free - free up the job with embedded fence - * - * @rcu: RCU callback head - * - * Free up the job with embedded fence after the RCU grace period. - */ -static void amdgpu_job_fence_free(struct rcu_head *rcu) -{ - struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - - /* free job if fence has a parent job */ - kfree(container_of(f, struct amdgpu_job, hw_fence.base)); -} - /** * amdgpu_fence_release - callback that fence can be freed * @@ -913,19 +851,6 @@ static void amdgpu_fence_release(struct dma_fence *f) call_rcu(&f->rcu, amdgpu_fence_free); } -/** - * amdgpu_job_fence_release - callback that job embedded fence can be freed - * - * @f: fence - * - * This is the simliar function with amdgpu_fence_release above, it - * only handles the job embedded fence. - */ -static void amdgpu_job_fence_release(struct dma_fence *f) -{ - call_rcu(&f->rcu, amdgpu_job_fence_free); -} - static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, @@ -933,13 +858,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = { .release = amdgpu_fence_release, }; -static const struct dma_fence_ops amdgpu_job_fence_ops = { - .get_driver_name = amdgpu_fence_get_driver_name, - .get_timeline_name = amdgpu_job_fence_get_timeline_name, - .enable_signaling = amdgpu_job_fence_enable_signaling, - .release = amdgpu_job_fence_release, -}; - /* * Fence debugfs */ @@ -1009,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val) *val = atomic_read(&adev->reset_domain->reset_res); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index b2033f8352f5..83f3b94ed975 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -302,7 +302,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages) { unsigned t; - unsigned p; int i, j; u64 page_base; /* Starting from VEGA10, system bit must be 0 to mean invalid. */ @@ -316,8 +315,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, return; t = offset / AMDGPU_GPU_PAGE_SIZE; - p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; - for (i = 0; i < pages; i++, p++) { + for (i = 0; i < pages; i++) { page_base = adev->dummy_page_addr; if (!adev->gart.ptr) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index b7ebae289bea..3e38c5db2987 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj) struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj); amdgpu_hmm_unregister(aobj); - ttm_bo_put(&aobj->tbo); + ttm_bo_fini(&aobj->tbo); } int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, @@ -531,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_amdgpu_gem_userptr *args = data; struct amdgpu_fpriv *fpriv = filp->driver_priv; struct drm_gem_object *gobj; - struct hmm_range *range; + struct amdgpu_hmm_range *range; struct amdgpu_bo *bo; uint32_t handle; int r; @@ -572,10 +572,14 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, &range); - if (r) + range = amdgpu_hmm_range_alloc(NULL); + if (unlikely(!range)) + return -ENOMEM; + r = amdgpu_ttm_tt_get_user_pages(bo, range); + if (r) { + amdgpu_hmm_range_free(range); goto release_object; - + } r = amdgpu_bo_reserve(bo, true); if (r) goto user_pages_done; @@ -597,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, user_pages_done: if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); - + amdgpu_hmm_range_free(range); release_object: drm_gem_object_put(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ebe2b4c68b0f..8b118c53f351 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -33,6 +33,7 @@ #include "amdgpu_reset.h" #include "amdgpu_xcp.h" #include "amdgpu_xgmi.h" +#include "amdgpu_mes.h" #include "nvd.h" /* delay 0.1 second to enable gfx off feature */ @@ -1194,6 +1195,75 @@ failed_kiq_write: dev_err(adev->dev, "failed to write reg:%x\n", reg); } +int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev) +{ + signed long r, cnt = 0; + unsigned long flags; + uint32_t seq; + struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; + struct amdgpu_ring *ring = &kiq->ring; + + if (amdgpu_device_skip_hw_access(adev)) + return 0; + + if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready) + return amdgpu_mes_hdp_flush(adev); + + if (!ring->funcs->emit_hdp_flush) { + return -EOPNOTSUPP; + } + + spin_lock_irqsave(&kiq->ring_lock, flags); + r = amdgpu_ring_alloc(ring, 32); + if (r) + goto failed_unlock; + + amdgpu_ring_emit_hdp_flush(ring); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) + goto failed_undo; + + amdgpu_ring_commit(ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + + /* don't wait anymore for gpu reset case because this way may + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will + * never return if we keep waiting in virt_kiq_rreg, which cause + * gpu_recover() hang there. + * + * also don't wait anymore for IRQ context + * */ + if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) + goto failed_kiq_hdp_flush; + + might_sleep(); + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { + if (amdgpu_in_reset(adev)) + goto failed_kiq_hdp_flush; + + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); + } + + if (cnt > MAX_KIQ_REG_TRY) { + dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n"); + return -ETIMEDOUT; + } + + return 0; + +failed_undo: + amdgpu_ring_undo(ring); +failed_unlock: + spin_unlock_irqrestore(&kiq->ring_lock, flags); +failed_kiq_hdp_flush: + dev_err(adev->dev, "failed to flush HDP via KIQ\n"); + return r < 0 ? r : -EIO; +} + int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) { if (amdgpu_num_kcq == -1) { @@ -1600,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev, ret = amdgpu_gfx_run_cleaner_shader(adev, value); - pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); if (ret) @@ -2485,3 +2554,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev) &amdgpu_debugfs_compute_sched_mask_fops); #endif } + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index fb5f7a0ee029..efd61a1ccc66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -615,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id); void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); +int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index 2c6a6b858112..518ca3f4db2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -168,17 +168,13 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, - struct hmm_range **phmm_range) + struct amdgpu_hmm_range *range) { - struct hmm_range *hmm_range; unsigned long end; unsigned long timeout; unsigned long *pfns; int r = 0; - - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); - if (unlikely(!hmm_range)) - return -ENOMEM; + struct hmm_range *hmm_range = &range->hmm_range; pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); if (unlikely(!pfns)) { @@ -221,28 +217,79 @@ retry: hmm_range->start = start; hmm_range->hmm_pfns = pfns; - *phmm_range = hmm_range; - return 0; out_free_pfns: kvfree(pfns); + hmm_range->hmm_pfns = NULL; out_free_range: - kfree(hmm_range); - if (r == -EBUSY) r = -EAGAIN; return r; } -bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) +/** + * amdgpu_hmm_range_valid - check if an HMM range is still valid + * @range: pointer to the &struct amdgpu_hmm_range to validate + * + * Determines whether the given HMM range @range is still valid by + * checking for invalidations via the MMU notifier sequence. This is + * typically used to verify that the range has not been invalidated + * by concurrent address space updates before it is accessed. + * + * Return: + * * true if @range is valid and can be used safely + * * false if @range is NULL or has been invalidated + */ +bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) { - bool r; + if (!range) + return false; - r = mmu_interval_read_retry(hmm_range->notifier, - hmm_range->notifier_seq); - kvfree(hmm_range->hmm_pfns); - kfree(hmm_range); - - return r; + return !mmu_interval_read_retry(range->hmm_range.notifier, + range->hmm_range.notifier_seq); +} + +/** + * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range + * @bo: optional buffer object to associate with this HMM range + * + * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed. + * The reference count of the @bo is incremented. + * + * Return: + * Pointer to a newly allocated struct amdgpu_hmm_range on success, + * or NULL if memory allocation fails. + */ +struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) +{ + struct amdgpu_hmm_range *range; + + range = kzalloc(sizeof(*range), GFP_KERNEL); + if (!range) + return NULL; + + range->bo = amdgpu_bo_ref(bo); + return range; +} + +/** + * amdgpu_hmm_range_free - release an AMDGPU HMM range + * @range: pointer to the range object to free + * + * Releases all resources held by @range, including the associated + * hmm_pfns and the dropping reference of associated bo if any. + * + * Return: void + */ +void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) +{ + if (!range) + return; + + if (range->hmm_range.hmm_pfns) + kvfree(range->hmm_range.hmm_pfns); + + amdgpu_bo_unref(&range->bo); + kfree(range); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h index 953e1d06de20..140bc9cd57b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h @@ -31,13 +31,20 @@ #include #include +struct amdgpu_hmm_range { + struct hmm_range hmm_range; + struct amdgpu_bo *bo; +}; + int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, - struct hmm_range **phmm_range); -bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); + struct amdgpu_hmm_range *range); #if defined(CONFIG_HMM_MIRROR) +bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range); +struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo); +void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range); int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_hmm_unregister(struct amdgpu_bo *bo); #else @@ -47,7 +54,20 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) "add CONFIG_ZONE_DEVICE=y in config file to fix this\n"); return -ENODEV; } + static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {} + +static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) +{ + return false; +} + +static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) +{ + return NULL; +} + +static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {} #endif #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 7d9bcb72e8dd..39229ece83f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -149,17 +149,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, if (job) { vm = job->vm; fence_ctx = job->base.s_fence ? - job->base.s_fence->scheduled.context : 0; + job->base.s_fence->finished.context : 0; shadow_va = job->shadow_va; csa_va = job->csa_va; gds_va = job->gds_va; init_shadow = job->init_shadow; - af = &job->hw_fence; + af = job->hw_fence; /* Save the context of the job for reset handling. * The driver needs this so it can skip the ring * contents for guilty contexts. */ - af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; + af->context = fence_ctx; + /* the vm fence is also part of the job's context */ + job->hw_vm_fence->context = fence_ctx; } else { vm = NULL; fence_ctx = 0; @@ -167,7 +169,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = 0; gds_va = 0; init_shadow = false; - af = NULL; + af = kzalloc(sizeof(*af), GFP_ATOMIC); + if (!af) + return -ENOMEM; } if (!ring->sched.ready) { @@ -289,7 +293,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); } - r = amdgpu_fence_emit(ring, f, af, fence_flags); + r = amdgpu_fence_emit(ring, af, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) @@ -297,6 +301,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_undo(ring); return r; } + *f = &af->base; if (ring->funcs->insert_end) ring->funcs->insert_end(ring); @@ -317,7 +322,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, * fence so we know what rings contents to backup * after we reset the queue. */ - amdgpu_fence_save_wptr(*f); + amdgpu_fence_save_wptr(af); amdgpu_ring_ib_end(ring); amdgpu_ring_commit(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index d020a890a0ea..e08d837668f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -137,7 +137,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ring->funcs->reset) { dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); - r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence); + r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence); if (!r) { atomic_inc(&ring->adev->gpu_reset_counter); dev_err(adev->dev, "Ring %s reset succeeded\n", @@ -186,6 +186,9 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int num_ibs, struct amdgpu_job **job, u64 drm_client_id) { + struct amdgpu_fence *af; + int r; + if (num_ibs == 0) return -EINVAL; @@ -193,6 +196,20 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (!*job) return -ENOMEM; + af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + if (!af) { + r = -ENOMEM; + goto err_job; + } + (*job)->hw_fence = af; + + af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + if (!af) { + r = -ENOMEM; + goto err_fence; + } + (*job)->hw_vm_fence = af; + (*job)->vm = vm; amdgpu_sync_create(&(*job)->explicit_sync); @@ -204,6 +221,13 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, return drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id); + +err_fence: + kfree((*job)->hw_fence); +err_job: + kfree(*job); + + return r; } int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, @@ -251,11 +275,11 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) struct dma_fence *f; unsigned i; - /* Check if any fences where initialized */ + /* Check if any fences were initialized */ if (job->base.s_fence && job->base.s_fence->finished.ops) f = &job->base.s_fence->finished; - else if (job->hw_fence.base.ops) - f = &job->hw_fence.base; + else if (job->hw_fence && job->hw_fence->base.ops) + f = &job->hw_fence->base; else f = NULL; @@ -271,11 +295,7 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->explicit_sync); - /* only put the hw fence if has embedded fence */ - if (!job->hw_fence.base.ops) - kfree(job); - else - dma_fence_put(&job->hw_fence.base); + kfree(job); } void amdgpu_job_set_gang_leader(struct amdgpu_job *job, @@ -304,10 +324,7 @@ void amdgpu_job_free(struct amdgpu_job *job) if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); - if (!job->hw_fence.base.ops) - kfree(job); - else - dma_fence_put(&job->hw_fence.base); + kfree(job); } struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 4a6487eb6cb5..7abf069d17d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -64,7 +64,8 @@ struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; struct amdgpu_sync explicit_sync; - struct amdgpu_fence hw_fence; + struct amdgpu_fence *hw_fence; + struct amdgpu_fence *hw_vm_fence; struct dma_fence *gang_submit; uint32_t preamble_status; uint32_t preemption_status; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 6b7d66b6d4cc..63ee6ba6a931 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -371,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val) for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) { ring = &adev->jpeg.inst[i].ring_dec[j]; - if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j))) + if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j))) ring->sched.ready = true; else ring->sched.ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a9327472c651..6ee77f431d56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -758,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VRAM_USAGE: - ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); + ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? + ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_VIS_VRAM_USAGE: ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); @@ -804,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.vram.usable_heap_size = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size) - AMDGPU_VM_RESERVED_VRAM; - mem.vram.heap_usage = - ttm_resource_manager_usage(vram_man); + mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? + ttm_resource_manager_usage(vram_man) : 0; mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = @@ -1470,7 +1471,6 @@ error_pasid: kfree(fpriv); out_suspend: - pm_runtime_mark_last_busy(dev->dev); pm_put: pm_runtime_put_autosuspend(dev->dev); @@ -1538,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, kfree(fpriv); file_priv->driver_priv = NULL; - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 5bf9be073cdd..9c182ce501af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -105,8 +105,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev) spin_lock_init(&adev->mes.ring_lock[i]); adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK; - adev->mes.vmid_mask_mmhub = 0xffffff00; - adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00; + adev->mes.vmid_mask_mmhub = 0xFF00; + adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xFFFE : 0xFF00; num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me; if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES) @@ -409,7 +409,7 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev, return -EINVAL; /* Clear the doorbell array before detection */ - memset(adev->mes.hung_queue_db_array_cpu_addr, 0, + memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET, adev->mes.hung_queue_db_array_size * sizeof(u32)); input.queue_type = queue_type; input.detect_only = detect_only; @@ -420,12 +420,17 @@ int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev, dev_err(adev->dev, "failed to detect and reset\n"); } else { *hung_db_num = 0; - for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) { + for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) { if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) { hung_db_array[i] = db_array[i]; *hung_db_num += 1; } } + + /* + * TODO: return HQD info for MES scheduled user compute queue reset cases + * stored in hung_db_array hqd info offset to full array size + */ } return r; @@ -523,6 +528,18 @@ error: return r; } +int amdgpu_mes_hdp_flush(struct amdgpu_device *adev) +{ + uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask; + + hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev); + hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev); + ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0; + + return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset, + ref_and_mask, ref_and_mask); +} + int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, @@ -686,14 +703,11 @@ out: bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) { uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; - bool is_supported = false; - if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && - amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && - mes_rev >= 0x63) - is_supported = true; - - return is_supported; + return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) && + amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) && + mes_rev >= 0x63) || + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0)); } /* Fix me -- node_id is used to identify the correct MES instances in the future */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 6b506fc72f58..e989225b354b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -149,6 +149,7 @@ struct amdgpu_mes { void *resource_1_addr[AMDGPU_MAX_MES_PIPES]; int hung_queue_db_array_size; + int hung_queue_hqd_info_offset; struct amdgpu_bo *hung_queue_db_array_gpu_obj; uint64_t hung_queue_db_array_gpu_addr; void *hung_queue_db_array_cpu_addr; @@ -238,6 +239,7 @@ struct mes_add_queue_input { struct mes_remove_queue_input { uint32_t doorbell_offset; uint64_t gang_context_addr; + bool remove_queue_after_reset; }; struct mes_map_legacy_queue_input { @@ -427,6 +429,7 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev, int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, uint32_t reg0, uint32_t reg1, uint32_t ref, uint32_t mask); +int amdgpu_mes_hdp_flush(struct amdgpu_device *adev); int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 20460cfd09bc..dc8d2f52c7d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -326,6 +326,8 @@ struct amdgpu_mode_info { struct drm_property *audio_property; /* FMT dithering */ struct drm_property *dither_property; + /* Adaptive Backlight Modulation (power feature) */ + struct drm_property *abm_level_property; /* hardcoded DFP edid from BIOS */ const struct drm_edid *bios_hardcoded_edid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 656b8a931dae..52c2d1731aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -96,6 +96,7 @@ struct amdgpu_bo_va { * if non-zero, cannot unmap from GPU because user queues may still access it */ unsigned int queue_refcount; + atomic_t userq_va_mapped; }; struct amdgpu_bo { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c index 123bcf5c2bb1..bacf888735db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c @@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf, } amdgpu_gfx_off_ctrl(adev, true); - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index e0ee21150860..c8b4dd3ea5c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -41,6 +41,7 @@ #include "atom.h" #include "amdgpu_reset.h" #include "amdgpu_psp.h" +#include "amdgpu_ras_mgr.h" #ifdef CONFIG_X86_MCE_AMD #include @@ -611,6 +612,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, return size; } +static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev); + /** * DOC: AMDGPU RAS debugfs EEPROM table reset interface * @@ -635,6 +638,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, (struct amdgpu_device *)file_inode(f)->i_private; int ret; + if (amdgpu_uniras_enabled(adev)) { + ret = amdgpu_uniras_clear_badpages_info(adev); + return ret ? ret : size; + } + ret = amdgpu_ras_eeprom_reset_table( &(amdgpu_ras_get_context(adev)->eeprom_control)); @@ -1542,9 +1550,51 @@ out_fini_err_data: return ret; } +static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev) +{ + struct ras_cmd_dev_handle req = {0}; + int ret; + + ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO, + &req, sizeof(req), NULL, 0); + if (ret) { + dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret); + return ret; + } + + return 0; +} + +static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev, + struct ras_query_if *info) +{ + struct ras_cmd_block_ecc_info_req req = {0}; + struct ras_cmd_block_ecc_info_rsp rsp = {0}; + int ret; + + if (!info) + return -EINVAL; + + req.block_id = info->head.block; + req.subblock_id = info->head.sub_block_index; + + ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS, + &req, sizeof(req), &rsp, sizeof(rsp)); + if (!ret) { + info->ce_count = rsp.ce_count; + info->ue_count = rsp.ue_count; + info->de_count = rsp.de_count; + } + + return ret; +} + int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) { - return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_uniras_query_block_ecc(adev, info); + else + return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID); } int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, @@ -1596,6 +1646,27 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, return 0; } +static int amdgpu_uniras_error_inject(struct amdgpu_device *adev, + struct ras_inject_if *info) +{ + struct ras_cmd_inject_error_req inject_req; + struct ras_cmd_inject_error_rsp rsp; + + if (!info) + return -EINVAL; + + memset(&inject_req, 0, sizeof(inject_req)); + inject_req.block_id = info->head.block; + inject_req.subblock_id = info->head.sub_block_index; + inject_req.address = info->address; + inject_req.error_type = info->head.type; + inject_req.instance_mask = info->instance_mask; + inject_req.value = info->value; + + return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR, + &inject_req, sizeof(inject_req), &rsp, sizeof(rsp)); +} + /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -1613,6 +1684,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, info->head.block, info->head.sub_block_index); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_uniras_error_inject(adev, info); + /* inject on guest isn't allowed, return success directly */ if (amdgpu_sriov_vf(adev)) return 0; @@ -1757,7 +1831,9 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, /* sysfs begin */ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, - struct ras_badpage **bps, unsigned int *count); + struct ras_badpage *bps, uint32_t count, uint32_t start); +static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev, + struct ras_badpage *bps, uint32_t count, uint32_t start); static char *amdgpu_ras_badpage_flags_str(unsigned int flags) { @@ -1815,19 +1891,50 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, unsigned int end = div64_ul(ppos + count - 1, element_size); ssize_t s = 0; struct ras_badpage *bps = NULL; - unsigned int bps_count = 0; + int bps_count = 0, i, status; + uint64_t address; memset(buf, 0, count); - if (amdgpu_ras_badpages_read(adev, &bps, &bps_count)) + bps_count = end - start; + bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL); + if (!bps) return 0; - for (; start < end && start < bps_count; start++) + memset(bps, 0, sizeof(*bps) * bps_count); + + if (amdgpu_uniras_enabled(adev)) + bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start); + else + bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start); + + if (bps_count <= 0) { + kfree(bps); + return 0; + } + + for (i = 0; i < bps_count; i++) { + address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT; + if (amdgpu_ras_check_critical_address(adev, address)) + continue; + + bps[i].size = AMDGPU_GPU_PAGE_SIZE; + + status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, + address); + if (status == -EBUSY) + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; + else if (status == -ENOENT) + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; + else + bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED; + s += scnprintf(&buf[s], element_size + 1, "0x%08x : 0x%08x : %1s\n", - bps[start].bp, - bps[start].size, - amdgpu_ras_badpage_flags_str(bps[start].flags)); + bps[i].bp, + bps[i].size, + amdgpu_ras_badpage_flags_str(bps[i].flags)); + } kfree(bps); @@ -1843,12 +1950,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, return sysfs_emit(buf, "feature mask: 0x%x\n", con->features); } +static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major, + u32 *minor, u32 *rev) +{ + int i; + + if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev)) + return false; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) { + *major = adev->ip_blocks[i].version->major; + *minor = adev->ip_blocks[i].version->minor; + *rev = adev->ip_blocks[i].version->rev; + return true; + } + } + + return false; +} + static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct amdgpu_ras *con = container_of(attr, struct amdgpu_ras, version_attr); - return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version); + u32 major, minor, rev; + ssize_t size = 0; + + size += sysfs_emit_at(buf, size, "table version: 0x%x\n", + con->eeprom_control.tbl_hdr.version); + + if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev)) + size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n", + major, minor, rev); + + return size; } static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, @@ -2241,6 +2378,11 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY)) return; + if (amdgpu_uniras_enabled(adev)) { + amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL); + return; + } + if (adev->nbio.ras && adev->nbio.ras->handle_ras_controller_intr_no_bifring) adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); @@ -2411,6 +2553,16 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_manager *obj; struct ras_ih_data *data; + if (amdgpu_uniras_enabled(adev)) { + struct ras_ih_info ih_info; + + memset(&ih_info, 0, sizeof(ih_info)); + ih_info.block = info->head.block; + memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry)); + + return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info); + } + obj = amdgpu_ras_find_obj(adev, &info->head); if (!obj) return -EINVAL; @@ -2605,62 +2757,83 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) } } -/* recovery begin */ - -/* return 0 on success. - * caller need free bps. - */ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, - struct ras_badpage **bps, unsigned int *count) + struct ras_badpage *bps, uint32_t count, uint32_t start) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; - int i = 0; - int ret = 0, status; + int r = 0; + uint32_t i; if (!con || !con->eh_data || !bps || !count) return -EINVAL; mutex_lock(&con->recovery_lock); data = con->eh_data; - if (!data || data->count == 0) { - *bps = NULL; - ret = -EINVAL; - goto out; + if (start < data->count) { + for (i = start; i < data->count; i++) { + if (!data->bps[i].ts) + continue; + + bps[r].bp = data->bps[i].retired_page; + r++; + if (r >= count) + break; + } } - - *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL); - if (!*bps) { - ret = -ENOMEM; - goto out; - } - - for (; i < data->count; i++) { - if (!data->bps[i].ts) - continue; - - (*bps)[i] = (struct ras_badpage){ - .bp = data->bps[i].retired_page, - .size = AMDGPU_GPU_PAGE_SIZE, - .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, - }; - - if (amdgpu_ras_check_critical_address(adev, - data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) - continue; - - status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr, - data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT); - if (status == -EBUSY) - (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; - else if (status == -ENOENT) - (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; - } - - *count = con->bad_page_num; -out: mutex_unlock(&con->recovery_lock); - return ret; + + return r; +} + +static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev, + struct ras_badpage *bps, uint32_t count, uint32_t start) +{ + struct ras_cmd_bad_pages_info_req cmd_input; + struct ras_cmd_bad_pages_info_rsp *output; + uint32_t group, start_group, end_group; + uint32_t pos, pos_in_group; + int r = 0, i; + + if (!bps || !count) + return -EINVAL; + + output = kmalloc(sizeof(*output), GFP_KERNEL); + if (!output) + return -ENOMEM; + + memset(&cmd_input, 0, sizeof(cmd_input)); + + start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) / + RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + + pos = start; + for (group = start_group; group < end_group; group++) { + memset(output, 0, sizeof(*output)); + cmd_input.group_index = group; + if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES, + &cmd_input, sizeof(cmd_input), output, sizeof(*output))) + goto out; + + if (pos >= output->bp_total_cnt) + goto out; + + pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + for (i = pos_in_group; i < output->bp_in_group; i++, pos++) { + if (!output->records[i].ts) + continue; + + bps[r].bp = output->records[i].retired_page; + r++; + if (r >= count) + goto out; + } + } + +out: + kfree(output); + return r; } static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev, @@ -3126,7 +3299,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, *new_cnt = unit_num; /* only new entries are saved */ - if (unit_num > 0) { + if (unit_num && save_count) { /*old asics only save pa to eeprom like before*/ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) { if (amdgpu_ras_eeprom_append(control, @@ -3590,6 +3763,9 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) if (!con || amdgpu_sriov_vf(adev)) return 0; + if (amdgpu_uniras_enabled(adev)) + return 0; + control = &con->eeprom_control; ret = amdgpu_ras_eeprom_init(control); control->is_eeprom_valid = !ret; @@ -3975,7 +4151,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) atomic_set(&con->ras_ue_count, ue_count); } - pm_runtime_mark_last_busy(dev->dev); Out: pm_runtime_put_autosuspend(dev->dev); } @@ -4584,6 +4759,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_ struct ras_event_state *event_state; int ret = 0; + if (amdgpu_uniras_enabled(adev)) + return 0; + if (type >= RAS_EVENT_TYPE_COUNT) { ret = -EINVAL; goto out; @@ -4634,20 +4812,18 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type return id; } -void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) +int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); enum ras_event_type type = RAS_EVENT_TYPE_FATAL; - u64 event_id; + u64 event_id = RAS_EVENT_INVALID_ID; - if (amdgpu_ras_mark_ras_event(adev, type)) { - dev_err(adev->dev, - "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n"); - return; - } + if (amdgpu_uniras_enabled(adev)) + return 0; - event_id = amdgpu_ras_acquire_event_id(adev, type); + if (!amdgpu_ras_mark_ras_event(adev, type)) + event_id = amdgpu_ras_acquire_event_id(adev, type); RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); @@ -4656,6 +4832,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; amdgpu_ras_reset_gpu(adev); } + + return -EBUSY; } bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) @@ -5408,6 +5586,9 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_ras_mgr_is_rma(adev); + if (!con) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 6cf0dfd38be8..556cf4d7b5ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -504,6 +504,7 @@ struct ras_critical_region { }; struct amdgpu_ras { + void *ras_mgr; /* ras infrastructure */ /* for ras itself. */ uint32_t features; @@ -909,7 +910,7 @@ static inline void amdgpu_ras_intr_cleared(void) atomic_set(&amdgpu_ras_in_intr, 0); } -void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); +int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 3eb3fb55ccb0..5a7bf0661dbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -32,6 +32,7 @@ #include #include "amdgpu_reset.h" +#include "amdgpu_ras_mgr.h" /* These are memory addresses as would be seen by one or more EEPROM * chips strung on the I2C bus, usually by manipulating pins 1-3 of a @@ -556,6 +557,9 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + if (amdgpu_uniras_enabled(adev)) + return amdgpu_ras_mgr_check_eeprom_safety_watermark(adev); + if (!__is_ras_eeprom_supported(adev) || !amdgpu_bad_page_threshold) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 8f6ce948c684..43f769fed810 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -159,8 +159,16 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) */ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { - while (ib->length_dw & ring->funcs->align_mask) - ib->ptr[ib->length_dw++] = ring->funcs->nop; + u32 align_mask = ring->funcs->align_mask; + u32 count = ib->length_dw & align_mask; + + if (count) { + count = align_mask + 1 - count; + + memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count); + + ib->length_dw += count; + } } /** @@ -811,7 +819,7 @@ int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, if (r) return r; - /* signal the fence of the bad job */ + /* signal the guilty fence and set an error on all fences from the context */ if (guilty_fence) amdgpu_fence_driver_guilty_force_completion(guilty_fence); /* Re-emit the non-guilty commands */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index b6b649179776..87b962df5460 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -147,16 +147,14 @@ struct amdgpu_fence { u64 wptr; /* fence context for resets */ u64 context; - uint32_t seq; }; extern const struct drm_sched_backend_ops amdgpu_sched_ops; -void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); -void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence); -void amdgpu_fence_save_wptr(struct dma_fence *fence); +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af); +void amdgpu_fence_save_wptr(struct amdgpu_fence *af); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, @@ -166,8 +164,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, - struct amdgpu_fence *af, unsigned int flags); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af, + unsigned int flags); int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, uint32_t timeout); bool amdgpu_fence_process(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c index 41ebe690eeff..3739be1b71e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c @@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u dev_err(adev->dev, "Invalid input: %s\n", str); } - pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index aa9ee5dffa45..9777c5c9cb26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -286,12 +286,13 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, * move and different for a BO to BO copy. * */ -int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - const struct amdgpu_copy_mem *src, - const struct amdgpu_copy_mem *dst, - uint64_t size, bool tmz, - struct dma_resv *resv, - struct dma_fence **f) +__attribute__((nonnull)) +static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, + const struct amdgpu_copy_mem *src, + const struct amdgpu_copy_mem *dst, + uint64_t size, bool tmz, + struct dma_resv *resv, + struct dma_fence **f) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_res_cursor src_mm, dst_mm; @@ -365,9 +366,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, } error: mutex_unlock(&adev->mman.gtt_window_lock); - if (f) - *f = dma_fence_get(fence); - dma_fence_put(fence); + *f = fence; return r; } @@ -706,10 +705,11 @@ struct amdgpu_ttm_tt { * memory and start HMM tracking CPU page table update * * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only - * once afterwards to stop HMM tracking + * once afterwards to stop HMM tracking. Its the caller responsibility to ensure + * that range is a valid memory and it is freed too. */ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range) + struct amdgpu_hmm_range *range) { struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); @@ -719,9 +719,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, bool readonly; int r = 0; - /* Make sure get_user_pages_done() can cleanup gracefully */ - *range = NULL; - mm = bo->notifier.mm; if (unlikely(!mm)) { DRM_DEBUG_DRIVER("BO is not registered?\n"); @@ -756,38 +753,6 @@ out_unlock: return r; } -/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations - */ -void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range) -{ - struct amdgpu_ttm_tt *gtt = (void *)ttm; - - if (gtt && gtt->userptr && range) - amdgpu_hmm_range_get_pages_done(range); -} - -/* - * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change - * Check if the pages backing this ttm range have been invalidated - * - * Returns: true if pages are still valid - */ -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range) -{ - struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); - - if (!gtt || !gtt->userptr || !range) - return false; - - DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", - gtt->userptr, ttm->num_pages); - - WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); - - return !amdgpu_hmm_range_get_pages_done(range); -} #endif /* @@ -797,12 +762,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, * that backs user memory and will ultimately be mapped into the device * address space. */ -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range) +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range) { unsigned long i; for (i = 0; i < ttm->num_pages; ++i) - ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL; + ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL; } /* @@ -1804,18 +1769,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; } - if (!adev->gmc.is_app_apu) { - ret = amdgpu_bo_create_kernel_at( - adev, adev->gmc.real_vram_size - reserve_size, - reserve_size, &adev->mman.fw_reserved_memory, NULL); - if (ret) { - dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); - amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, - NULL, NULL); - return ret; - } - } else { - DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n"); + ret = amdgpu_bo_create_kernel_at( + adev, adev->gmc.real_vram_size - reserve_size, reserve_size, + &adev->mman.fw_reserved_memory, NULL); + if (ret) { + dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, + NULL); + return ret; } return 0; @@ -1837,7 +1798,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev) for (i = 0; i < adev->gmc.num_mem_partitions; i++) { ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev, adev->gmc.mem_partitions[i].numa.node, - false, false); + TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M))); } return 0; } @@ -1930,8 +1891,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, adev_to_drm(adev)->vma_offset_manager, - adev->need_swiotlb, - dma_addressing_limited(adev->dev)); + (adev->need_swiotlb ? + TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) | + (dma_addressing_limited(adev->dev) ? + TTM_ALLOCATION_POOL_USE_DMA32 : 0) | + TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M))); if (r) { dev_err(adev->dev, "failed initializing buffer object driver(%d).\n", r); @@ -1980,19 +1944,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; /* - *The reserved vram for driver must be pinned to the specified - *place on the VRAM, so reserve it early. + * The reserved VRAM for the driver must be pinned to a specific + * location in VRAM, so reserve it early. */ r = amdgpu_ttm_drv_reserve_vram_init(adev); if (r) return r; /* - * only NAVI10 and onwards ASIC support for IP discovery. - * If IP discovery enabled, a block of memory should be - * reserved for IP discovey. + * only NAVI10 and later ASICs support IP discovery. + * If IP discovery is enabled, a block of memory should be + * reserved for it. */ - if (adev->mman.discovery_bin) { + if (adev->discovery.reserve_tmr) { r = amdgpu_ttm_reserve_tmr(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 0be2728aa872..577ee04ce0bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -28,6 +28,7 @@ #include #include #include "amdgpu_vram_mgr.h" +#include "amdgpu_hmm.h" #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) @@ -82,9 +83,6 @@ struct amdgpu_mman { uint64_t stolen_reserved_offset; uint64_t stolen_reserved_size; - /* discovery */ - uint8_t *discovery_bin; - uint32_t discovery_tmr_size; /* fw reserved memory */ struct amdgpu_bo *fw_reserved_memory; struct amdgpu_bo *fw_reserved_memory_extend; @@ -170,12 +168,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, struct dma_resv *resv, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush, uint32_t copy_flags); -int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, - const struct amdgpu_copy_mem *src, - const struct amdgpu_copy_mem *dst, - uint64_t size, bool tmz, - struct dma_resv *resv, - struct dma_fence **f); int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, struct dma_resv *resv, struct dma_fence **fence); @@ -192,29 +184,16 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range); -void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range); -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range); + struct amdgpu_hmm_range *range); #else static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct hmm_range **range) + struct amdgpu_hmm_range *range) { return -EPERM; } -static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, - struct hmm_range *range) -{ -} -static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, - struct hmm_range *range) -{ - return false; -} #endif -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range); +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range); int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, uint64_t *user_addr); int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 1add21160d21..13cc5a686dfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -29,6 +29,8 @@ #include "amdgpu.h" #include "amdgpu_vm.h" #include "amdgpu_userq.h" +#include "amdgpu_hmm.h" +#include "amdgpu_reset.h" #include "amdgpu_userq_fence.h" u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) @@ -44,10 +46,29 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev) return userq_ip_mask; } -int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, - u64 expected_size) +static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue, + struct amdgpu_bo_va_mapping *va_map, u64 addr) +{ + struct amdgpu_userq_va_cursor *va_cursor; + struct userq_va_list; + + va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL); + if (!va_cursor) + return -ENOMEM; + + INIT_LIST_HEAD(&va_cursor->list); + va_cursor->gpu_addr = addr; + atomic_set(&va_map->bo_va->userq_va_mapped, 1); + list_add(&va_cursor->list, &queue->userq_va_list); + + return 0; +} + +int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue, + u64 addr, u64 expected_size) { struct amdgpu_bo_va_mapping *va_map; + struct amdgpu_vm *vm = queue->vm; u64 user_addr; u64 size; int r = 0; @@ -67,6 +88,7 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, /* Only validate the userq whether resident in the VM mapping range */ if (user_addr >= va_map->start && va_map->last - user_addr + 1 >= size) { + amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr); amdgpu_bo_unreserve(vm->root.bo); return 0; } @@ -77,6 +99,76 @@ out_err: return r; } +static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr) +{ + struct amdgpu_bo_va_mapping *mapping; + bool r; + + if (amdgpu_bo_reserve(vm->root.bo, false)) + return false; + + mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); + if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped)) + r = true; + else + r = false; + amdgpu_bo_unreserve(vm->root.bo); + + return r; +} + +static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_userq_va_cursor *va_cursor, *tmp; + int r = 0; + + list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { + r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr); + dev_dbg(queue->userq_mgr->adev->dev, + "validate the userq mapping:%p va:%llx r:%d\n", + queue, va_cursor->gpu_addr, r); + } + + if (r != 0) + return true; + + return false; +} + +static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping, + struct amdgpu_userq_va_cursor *va_cursor) +{ + atomic_set(&mapping->bo_va->userq_va_mapped, 0); + list_del(&va_cursor->list); + kfree(va_cursor); +} + +static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev, + struct amdgpu_usermode_queue *queue) +{ + struct amdgpu_userq_va_cursor *va_cursor, *tmp; + struct amdgpu_bo_va_mapping *mapping; + int r; + + r = amdgpu_bo_reserve(queue->vm->root.bo, false); + if (r) + return r; + + list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { + mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr); + if (!mapping) { + r = -EINVAL; + goto err; + } + dev_dbg(adev->dev, "delete the userq:%p va:%llx\n", + queue, va_cursor->gpu_addr); + amdgpu_userq_buffer_va_list_del(mapping, va_cursor); + } +err: + amdgpu_bo_unreserve(queue->vm->root.bo); + return r; +} + static int amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) @@ -159,19 +251,24 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr, return r; } -static void +static int amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) { struct dma_fence *f = queue->last_fence; - int ret; + int ret = 0; if (f && !dma_fence_is_signaled(f)) { - ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); - if (ret <= 0) + ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT); + if (ret <= 0) { drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n", f->context, f->seqno); + queue->state = AMDGPU_USERQ_STATE_HUNG; + return -ETIME; + } } + + return ret; } static void @@ -182,16 +279,27 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_device *adev = uq_mgr->adev; const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type]; + /* Wait for mode-1 reset to complete */ + down_read(&adev->reset_domain->sem); + + /* Drop the userq reference. */ + amdgpu_userq_buffer_vas_list_cleanup(adev, queue); uq_funcs->mqd_destroy(uq_mgr, queue); amdgpu_userq_fence_driver_free(queue); - idr_remove(&uq_mgr->userq_idr, queue_id); + /* Use interrupt-safe locking since IRQ handlers may access these XArrays */ + xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id); + xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index); + queue->userq_mgr = NULL; + list_del(&queue->userq_va_list); kfree(queue); + + up_read(&adev->reset_domain->sem); } static struct amdgpu_usermode_queue * amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid) { - return idr_find(&uq_mgr->userq_idr, qid); + return xa_load(&uq_mgr->userq_mgr_xa, qid); } void @@ -319,17 +427,6 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, case AMDGPU_HW_IP_DMA: db_size = sizeof(u64); break; - - case AMDGPU_HW_IP_VCN_ENC: - db_size = sizeof(u32); - db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1; - break; - - case AMDGPU_HW_IP_VPE: - db_size = sizeof(u32); - db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1; - break; - default: drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n", db_info->queue_type); @@ -391,7 +488,6 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) amdgpu_userq_cleanup(uq_mgr, queue, queue_id); mutex_unlock(&uq_mgr->userq_mutex); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -463,8 +559,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) struct amdgpu_db_info db_info; char *queue_name; bool skip_map_queue; + u32 qid; uint64_t index; - int qid, r = 0; + int r = 0; int priority = (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; @@ -487,7 +584,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) * * This will also make sure we have a valid eviction fence ready to be used. */ - mutex_lock(&adev->userq_mutex); amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr); uq_funcs = adev->userq_funcs[args->in.ip_type]; @@ -505,14 +601,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } - /* Validate the userq virtual address.*/ - if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) || - amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || - amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { - r = -EINVAL; - kfree(queue); - goto unlock; - } + INIT_LIST_HEAD(&queue->userq_va_list); queue->doorbell_handle = args->in.doorbell_handle; queue->queue_type = args->in.ip_type; queue->vm = &fpriv->vm; @@ -523,6 +612,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) db_info.db_obj = &queue->db_obj; db_info.doorbell_offset = args->in.doorbell_offset; + /* Validate the userq virtual address.*/ + if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) || + amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || + amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { + r = -EINVAL; + kfree(queue); + goto unlock; + } + /* Convert relative doorbell offset into absolute doorbell index */ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp); if (index == (uint64_t)-EINVAL) { @@ -548,16 +646,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) goto unlock; } + /* Wait for mode-1 reset to complete */ + down_read(&adev->reset_domain->sem); + r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL)); + if (r) { + kfree(queue); + up_read(&adev->reset_domain->sem); + goto unlock; + } - qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL); - if (qid < 0) { + r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL); + if (r) { drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); amdgpu_userq_fence_driver_free(queue); uq_funcs->mqd_destroy(uq_mgr, queue); kfree(queue); r = -ENOMEM; + up_read(&adev->reset_domain->sem); goto unlock; } + up_read(&adev->reset_domain->sem); + queue->userq_mgr = uq_mgr; /* don't map the queue if scheduling is halted */ if (adev->userq_halt_for_enforce_isolation && @@ -570,7 +679,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) r = amdgpu_userq_map_helper(uq_mgr, queue); if (r) { drm_file_err(uq_mgr->file, "Failed to map Queue\n"); - idr_remove(&uq_mgr->userq_idr, qid); + xa_erase(&uq_mgr->userq_mgr_xa, qid); amdgpu_userq_fence_driver_free(queue); uq_funcs->mqd_destroy(uq_mgr, queue); kfree(queue); @@ -595,7 +704,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) unlock: mutex_unlock(&uq_mgr->userq_mutex); - mutex_unlock(&adev->userq_mutex); return r; } @@ -693,11 +801,19 @@ static int amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id; + unsigned long queue_id; int ret = 0, r; /* Resume all the queues for this process */ - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { + + if (!amdgpu_userq_buffer_vas_mapped(queue)) { + drm_file_err(uq_mgr->file, + "trying restore queue without va mapping\n"); + queue->state = AMDGPU_USERQ_STATE_INVALID_VA; + continue; + } + r = amdgpu_userq_restore_helper(uq_mgr, queue); if (r) ret = r; @@ -760,12 +876,21 @@ static int amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); + bool invalidated = false, new_addition = false; + struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_hmm_range *range; struct amdgpu_vm *vm = &fpriv->vm; + unsigned long key, tmp_key; struct amdgpu_bo_va *bo_va; + struct amdgpu_bo *bo; struct drm_exec exec; + struct xarray xa; int ret; + xa_init(&xa); + +retry_lock: drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { ret = amdgpu_vm_lock_pd(vm, &exec, 1); @@ -792,10 +917,72 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) goto unlock_all; } + if (invalidated) { + xa_for_each(&xa, tmp_key, range) { + bo = range->bo; + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unlock_all; + + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range); + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unlock_all; + } + invalidated = false; + } + ret = amdgpu_vm_handle_moved(adev, vm, NULL); if (ret) goto unlock_all; + key = 0; + /* Validate User Ptr BOs */ + list_for_each_entry(bo_va, &vm->done, base.vm_status) { + bo = bo_va->base.bo; + + if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm)) + continue; + + range = xa_load(&xa, key); + if (range && range->bo != bo) { + xa_erase(&xa, key); + amdgpu_hmm_range_free(range); + range = NULL; + } + + if (!range) { + range = amdgpu_hmm_range_alloc(bo); + if (!range) { + ret = -ENOMEM; + goto unlock_all; + } + + xa_store(&xa, key, range, GFP_KERNEL); + new_addition = true; + } + key++; + } + + if (new_addition) { + drm_exec_fini(&exec); + xa_for_each(&xa, tmp_key, range) { + if (!range) + continue; + bo = range->bo; + ret = amdgpu_ttm_tt_get_user_pages(bo, range); + if (ret) + goto unlock_all; + } + + invalidated = true; + new_addition = false; + goto retry_lock; + } + ret = amdgpu_vm_update_pdes(adev, vm, false); if (ret) goto unlock_all; @@ -815,6 +1002,13 @@ amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) unlock_all: drm_exec_fini(&exec); + xa_for_each(&xa, tmp_key, range) { + if (!range) + continue; + bo = range->bo; + amdgpu_hmm_range_free(range); + } + xa_destroy(&xa); return ret; } @@ -848,11 +1042,11 @@ static int amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id; + unsigned long queue_id; int ret = 0, r; /* Try to unmap all the queues in this process ctx */ - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { r = amdgpu_userq_preempt_helper(uq_mgr, queue); if (r) ret = r; @@ -867,9 +1061,10 @@ static int amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr) { struct amdgpu_usermode_queue *queue; - int queue_id, ret; + unsigned long queue_id; + int ret; - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) { struct dma_fence *f = queue->last_fence; if (!f || dma_fence_is_signaled(f)) @@ -922,44 +1117,30 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f struct amdgpu_device *adev) { mutex_init(&userq_mgr->userq_mutex); - idr_init_base(&userq_mgr->userq_idr, 1); + xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC); userq_mgr->adev = adev; userq_mgr->file = file_priv; - mutex_lock(&adev->userq_mutex); - list_add(&userq_mgr->list, &adev->userq_mgr_list); - mutex_unlock(&adev->userq_mutex); - INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker); return 0; } void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr) { - struct amdgpu_device *adev = userq_mgr->adev; struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - uint32_t queue_id; + unsigned long queue_id; cancel_delayed_work_sync(&userq_mgr->resume_work); - mutex_lock(&adev->userq_mutex); mutex_lock(&userq_mgr->userq_mutex); - idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { + xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) { amdgpu_userq_wait_for_last_fence(userq_mgr, queue); amdgpu_userq_unmap_helper(userq_mgr, queue); amdgpu_userq_cleanup(userq_mgr, queue, queue_id); } - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - if (uqm == userq_mgr) { - list_del(&uqm->list); - break; - } - } - idr_destroy(&userq_mgr->userq_idr); + xa_destroy(&userq_mgr->userq_mgr_xa); mutex_unlock(&userq_mgr->userq_mutex); - mutex_unlock(&adev->userq_mutex); mutex_destroy(&userq_mgr->userq_mutex); } @@ -967,57 +1148,50 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev) { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0, r; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; + int r; if (!ip_mask) return 0; - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; cancel_delayed_work_sync(&uqm->resume_work); - mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (adev->in_s0ix) - r = amdgpu_userq_preempt_helper(uqm, queue); - else - r = amdgpu_userq_unmap_helper(uqm, queue); - if (r) - ret = r; - } - mutex_unlock(&uqm->userq_mutex); + guard(mutex)(&uqm->userq_mutex); + if (adev->in_s0ix) + r = amdgpu_userq_preempt_helper(uqm, queue); + else + r = amdgpu_userq_unmap_helper(uqm, queue); + if (r) + return r; } - mutex_unlock(&adev->userq_mutex); - return ret; + return 0; } int amdgpu_userq_resume(struct amdgpu_device *adev) { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; - int ret = 0, r; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; + int r; if (!ip_mask) return 0; - mutex_lock(&adev->userq_mutex); - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (adev->in_s0ix) - r = amdgpu_userq_restore_helper(uqm, queue); - else - r = amdgpu_userq_map_helper(uqm, queue); - if (r) - ret = r; - } - mutex_unlock(&uqm->userq_mutex); + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; + guard(mutex)(&uqm->userq_mutex); + if (adev->in_s0ix) + r = amdgpu_userq_restore_helper(uqm, queue); + else + r = amdgpu_userq_map_helper(uqm, queue); + if (r) + return r; } - mutex_unlock(&adev->userq_mutex); - return ret; + + return 0; } int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, @@ -1025,33 +1199,31 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; int ret = 0, r; /* only need to stop gfx/compute */ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) return 0; - mutex_lock(&adev->userq_mutex); if (adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already stopped!\n"); adev->userq_halt_for_enforce_isolation = true; - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; cancel_delayed_work_sync(&uqm->resume_work); mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (((queue->queue_type == AMDGPU_HW_IP_GFX) || - (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && - (queue->xcp_id == idx)) { - r = amdgpu_userq_preempt_helper(uqm, queue); - if (r) - ret = r; - } + if (((queue->queue_type == AMDGPU_HW_IP_GFX) || + (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && + (queue->xcp_id == idx)) { + r = amdgpu_userq_preempt_helper(uqm, queue); + if (r) + ret = r; } mutex_unlock(&uqm->userq_mutex); } - mutex_unlock(&adev->userq_mutex); + return ret; } @@ -1060,21 +1232,20 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, { u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; - int queue_id; + struct amdgpu_userq_mgr *uqm; + unsigned long queue_id; int ret = 0, r; /* only need to stop gfx/compute */ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE)))) return 0; - mutex_lock(&adev->userq_mutex); if (!adev->userq_halt_for_enforce_isolation) dev_warn(adev->dev, "userq scheduling already started!\n"); adev->userq_halt_for_enforce_isolation = false; - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + uqm = queue->userq_mgr; mutex_lock(&uqm->userq_mutex); - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { if (((queue->queue_type == AMDGPU_HW_IP_GFX) || (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && (queue->xcp_id == idx)) { @@ -1082,9 +1253,39 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, if (r) ret = r; } - } mutex_unlock(&uqm->userq_mutex); } - mutex_unlock(&adev->userq_mutex); + return ret; } + +int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t saddr) +{ + u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev); + struct amdgpu_bo_va *bo_va = mapping->bo_va; + struct dma_resv *resv = bo_va->base.bo->tbo.base.resv; + int ret = 0; + + if (!ip_mask) + return 0; + + dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr); + /** + * The userq VA mapping reservation should include the eviction fence, + * if the eviction fence can't signal successfully during unmapping, + * then driver will warn to flag this improper unmap of the userq VA. + * Note: The eviction fence may be attached to different BOs, and this + * unmap is only for one kind of userq VAs, so at this point suppose + * the eviction fence is always unsignaled. + */ + if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) { + ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true, + MAX_SCHEDULE_TIMEOUT); + if (ret <= 0) + return -EBUSY; + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h index c027dd916672..09da0617bfa2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -37,6 +37,7 @@ enum amdgpu_userq_state { AMDGPU_USERQ_STATE_MAPPED, AMDGPU_USERQ_STATE_PREEMPTED, AMDGPU_USERQ_STATE_HUNG, + AMDGPU_USERQ_STATE_INVALID_VA, }; struct amdgpu_mqd_prop; @@ -47,6 +48,11 @@ struct amdgpu_userq_obj { struct amdgpu_bo *obj; }; +struct amdgpu_userq_va_cursor { + u64 gpu_addr; + struct list_head list; +}; + struct amdgpu_usermode_queue { int queue_type; enum amdgpu_userq_state state; @@ -66,6 +72,8 @@ struct amdgpu_usermode_queue { u32 xcp_id; int priority; struct dentry *debugfs_queue; + + struct list_head userq_va_list; }; struct amdgpu_userq_funcs { @@ -88,11 +96,15 @@ struct amdgpu_userq_funcs { /* Usermode queues for gfx */ struct amdgpu_userq_mgr { - struct idr userq_idr; + /** + * @userq_mgr_xa: Per-process user queue map (queue ID → queue) + * Key: queue_id (unique ID within the process's userq manager) + * Value: struct amdgpu_usermode_queue + */ + struct xarray userq_mgr_xa; struct mutex userq_mutex; struct amdgpu_device *adev; struct delayed_work resume_work; - struct list_head list; struct drm_file *file; }; @@ -136,7 +148,9 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx); int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, u32 idx); - -int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, - u64 expected_size); +int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue, + u64 addr, u64 expected_size); +int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t saddr); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c index 761bad98da3e..2aeeaa954882 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -537,7 +537,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data, } /* Retrieve the user queue */ - queue = idr_find(&userq_mgr->userq_idr, args->queue_id); + queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id); if (!queue) { r = -ENOENT; goto put_gobj_write; @@ -899,7 +899,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, */ num_fences = dma_fence_dedup_array(fences, num_fences); - waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id); + waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id); if (!waitq) { r = -EINVAL; goto free_fences; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index dc8a17bcc3c8..82624b44e661 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -100,7 +100,8 @@ #define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \ ({ \ - uint32_t internal_reg_offset, addr; \ + /* To avoid a -Wunused-but-set-variable warning. */ \ + uint32_t internal_reg_offset __maybe_unused, addr; \ bool video_range, video1_range, aon_range, aon1_range; \ \ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ @@ -161,7 +162,8 @@ #define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \ ({ \ - uint32_t internal_reg_offset, addr; \ + /* To avoid a -Wunused-but-set-variable warning. */ \ + uint32_t internal_reg_offset __maybe_unused, addr; \ bool video_range, video1_range, aon_range, aon1_range; \ \ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3328ab63376b..f2ce8f506aa8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -44,6 +44,18 @@ vf2pf_info->ucode_info[ucode].version = ver; \ } while (0) +#define mmRCC_CONFIG_MEMSIZE 0xde3 + +const char *amdgpu_virt_dynamic_crit_table_name[] = { + "IP DISCOVERY", + "VBIOS IMG", + "RAS TELEMETRY", + "DATA EXCHANGE", + "BAD PAGE INFO", + "INIT HEADER", + "LAST", +}; + bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) { /* By now all MMIO pages except mailbox are blocked */ @@ -150,9 +162,10 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev) virt->ops->req_init_data(adev); if (adev->virt.req_init_data_ver > 0) - DRM_INFO("host supports REQ_INIT_DATA handshake\n"); + dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n", + adev->virt.req_init_data_ver); else - DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n"); + dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n"); } /** @@ -205,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) &adev->virt.mm_table.gpu_addr, (void *)&adev->virt.mm_table.cpu_addr); if (r) { - DRM_ERROR("failed to alloc mm table and error = %d.\n", r); + dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r); return r; } memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); - DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", + dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n", adev->virt.mm_table.gpu_addr, adev->virt.mm_table.cpu_addr); return 0; @@ -390,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GPU_PAGE_SIZE, &bo, NULL)) - DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); + dev_dbg(adev->dev, + "RAS WARN: reserve vram for retired page %llx fail\n", + bp); data->bps_bo[i] = bo; } data->last_reserved = i + 1; @@ -598,8 +613,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->driver_cert = 0; vf2pf_info->os_info.all = 0; - vf2pf_info->fb_usage = - ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20; + vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ? + ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0; vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20; vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; @@ -658,10 +673,34 @@ out: schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); } +static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data) +{ + uint32_t dataexchange_offset = + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset; + uint32_t dataexchange_size = + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10; + uint64_t pos = 0; + + dev_info(adev->dev, + "Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n", + dataexchange_offset, dataexchange_size); + + if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) { + dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n"); + return -EINVAL; + } + + pos = (uint64_t)dataexchange_offset; + amdgpu_device_vram_access(adev, pos, pfvf_data, + dataexchange_size, false); + + return 0; +} + void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) { if (adev->virt.vf2pf_update_interval_ms != 0) { - DRM_INFO("clean up the vf2pf work item\n"); + dev_info(adev->dev, "clean up the vf2pf work item\n"); cancel_delayed_work_sync(&adev->virt.vf2pf_work); adev->virt.vf2pf_update_interval_ms = 0; } @@ -669,13 +708,15 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) { + uint32_t *pfvf_data = NULL; + adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; adev->virt.vf2pf_update_retry_cnt = 0; if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { - DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!"); + dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!"); } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { /* go through this logic in ip_init and reset to init workqueue*/ amdgpu_virt_exchange_data(adev); @@ -684,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); } else if (adev->bios != NULL) { /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) { + pfvf_data = + kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10, + GFP_KERNEL); + if (!pfvf_data) { + dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n"); + return; + } - amdgpu_virt_read_pf2vf_data(adev); + if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data)) + goto free_pfvf_data; + + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *)pfvf_data; + + amdgpu_virt_read_pf2vf_data(adev); + +free_pfvf_data: + kfree(pfvf_data); + pfvf_data = NULL; + adev->virt.fw_reserve.p_pf2vf = NULL; + } else { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); + + amdgpu_virt_read_pf2vf_data(adev); + } } } @@ -701,23 +765,38 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { if (adev->mman.fw_vram_usage_va) { - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); - adev->virt.fw_reserve.p_vf2pf = - (struct amd_sriov_msg_vf2pf_info_header *) - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); - adev->virt.fw_reserve.ras_telemetry = - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); + if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset + + (AMD_SRIOV_MSG_SIZE_KB << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset); + } else { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10)); + adev->virt.fw_reserve.ras_telemetry = + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10)); + } } else if (adev->mman.drv_vram_usage_va) { adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10)); adev->virt.fw_reserve.p_vf2pf = (struct amd_sriov_msg_vf2pf_info_header *) - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10)); adev->virt.fw_reserve.ras_telemetry = - (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10)); + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10)); } amdgpu_virt_read_pf2vf_data(adev); @@ -816,7 +895,7 @@ static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg) break; default: /* other chip doesn't support SRIOV */ is_sriov = false; - DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type); + dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type); break; } } @@ -842,6 +921,215 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev) adev->virt.ras.cper_rptr = 0; } +static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end) +{ + uint32_t sum = 0; + + if (buf_start >= buf_end) + return 0; + + for (; buf_start < buf_end; buf_start++) + sum += buf_start[0]; + + return 0xffffffff - sum; +} + +int amdgpu_virt_init_critical_region(struct amdgpu_device *adev) +{ + struct amd_sriov_msg_init_data_header *init_data_hdr = NULL; + u64 init_hdr_offset = adev->virt.init_data_header.offset; + u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */ + u64 vram_size; + u64 end; + int r = 0; + uint8_t checksum = 0; + + /* Skip below init if critical region version != v2 */ + if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2) + return 0; + + if (init_hdr_offset < 0) { + dev_err(adev->dev, "Invalid init header offset\n"); + return -EINVAL; + } + + vram_size = RREG32(mmRCC_CONFIG_MEMSIZE); + if (!vram_size || vram_size == U32_MAX) + return -EINVAL; + vram_size <<= 20; + + if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) { + dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n"); + return -EINVAL; + } + + /* Allocate for init_data_hdr */ + init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL); + if (!init_data_hdr) + return -ENOMEM; + + amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr, + sizeof(struct amd_sriov_msg_init_data_header), false); + + /* Table validation */ + if (strncmp(init_data_hdr->signature, + AMDGPU_SRIOV_CRIT_DATA_SIGNATURE, + AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) { + dev_err(adev->dev, "Invalid init data signature: %.4s\n", + init_data_hdr->signature); + r = -EINVAL; + goto out; + } + + checksum = amdgpu_virt_crit_region_calc_checksum( + (uint8_t *)&init_data_hdr->initdata_offset, + (uint8_t *)init_data_hdr + + sizeof(struct amd_sriov_msg_init_data_header)); + if (checksum != init_data_hdr->checksum) { + dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n", + checksum, init_data_hdr->checksum); + r = -EINVAL; + goto out; + } + + memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn)); + memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl)); + + adev->virt.crit_regn.offset = init_data_hdr->initdata_offset; + adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb; + + /* Validation and initialization for each table entry */ + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) { + if (!init_data_hdr->ip_discovery_size_in_kb || + init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID], + init_data_hdr->ip_discovery_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset = + init_data_hdr->ip_discovery_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb = + init_data_hdr->ip_discovery_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) { + if (!init_data_hdr->vbios_img_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID], + init_data_hdr->vbios_img_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset = + init_data_hdr->vbios_img_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb = + init_data_hdr->vbios_img_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) { + if (!init_data_hdr->ras_tele_info_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID], + init_data_hdr->ras_tele_info_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset = + init_data_hdr->ras_tele_info_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb = + init_data_hdr->ras_tele_info_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) { + if (!init_data_hdr->dataexchange_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID], + init_data_hdr->dataexchange_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset = + init_data_hdr->dataexchange_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb = + init_data_hdr->dataexchange_size_in_kb; + } + + if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) { + if (!init_data_hdr->bad_page_size_in_kb) { + dev_err(adev->dev, "Invalid %s size: 0x%x\n", + amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID], + init_data_hdr->bad_page_size_in_kb); + r = -EINVAL; + goto out; + } + + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset = + init_data_hdr->bad_page_info_offset; + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb = + init_data_hdr->bad_page_size_in_kb; + } + + /* Validation for critical region info */ + if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) { + dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n", + adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb); + r = -EINVAL; + goto out; + } + + /* reserved memory starts from crit region base offset with the size of 5MB */ + adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset; + adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10; + dev_info(adev->dev, + "critical region v%d requested to reserve memory start at %08llx with %llu KB.\n", + init_data_hdr->version, + adev->mman.fw_vram_usage_start_offset, + adev->mman.fw_vram_usage_size >> 10); + + adev->virt.is_dynamic_crit_regn_enabled = true; + +out: + kfree(init_data_hdr); + init_data_hdr = NULL; + + return r; +} + +int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev, + int data_id, uint8_t *binary, u32 *size) +{ + uint32_t data_offset = 0; + uint32_t data_size = 0; + enum amd_sriov_msg_table_id_enum data_table_id = data_id; + + if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID) + return -EINVAL; + + data_offset = adev->virt.crit_regn_tbl[data_table_id].offset; + data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10; + + /* Validate on input params */ + if (!binary || !size || *size < (uint64_t)data_size) + return -EINVAL; + + /* Proceed to copy the dynamic content */ + amdgpu_device_vram_access(adev, + (uint64_t)data_offset, (uint32_t *)binary, data_size, false); + *size = (uint64_t)data_size; + + dev_dbg(adev->dev, + "Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n", + amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size); + + return 0; +} + void amdgpu_virt_init(struct amdgpu_device *adev) { bool is_sriov = false; @@ -1289,7 +1577,7 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc case AMDGPU_RAS_BLOCK__MPIO: return RAS_TELEMETRY_GPU_BLOCK_MPIO; default: - DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n", + dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block); return RAS_TELEMETRY_GPU_BLOCK_COUNT; } @@ -1304,7 +1592,7 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return 0; tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL); @@ -1383,7 +1671,7 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return -EINVAL; cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL); @@ -1515,7 +1803,7 @@ static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev, checksum = host_telemetry->header.checksum; used_size = host_telemetry->header.used_size; - if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) + if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10)) return 0; tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d1172c8e58c4..14d864be5800 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -54,6 +54,12 @@ #define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2 +/* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */ +#define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA" +#define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4 + +#define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id))) + enum amdgpu_sriov_vf_mode { SRIOV_VF_MODE_BARE_METAL = 0, SRIOV_VF_MODE_ONE_VF, @@ -262,6 +268,11 @@ struct amdgpu_virt_ras { DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST); +struct amdgpu_virt_region { + uint32_t offset; + uint32_t size_kb; +}; + /* GPU virtualization */ struct amdgpu_virt { uint32_t caps; @@ -289,6 +300,12 @@ struct amdgpu_virt { bool ras_init_done; uint32_t reg_access; + /* dynamic(v2) critical regions */ + struct amdgpu_virt_region init_data_header; + struct amdgpu_virt_region crit_regn; + struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID]; + bool is_dynamic_crit_regn_enabled; + /* vf2pf message */ struct delayed_work vf2pf_work; uint32_t vf2pf_update_interval_ms; @@ -424,6 +441,10 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev); void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); void amdgpu_virt_init(struct amdgpu_device *adev); +int amdgpu_virt_init_critical_region(struct amdgpu_device *adev); +int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev, + int data_id, uint8_t *binary, u32 *size); + bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index c1a801203949..9309830821b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -779,7 +779,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool cleaner_shader_needed = false; bool pasid_mapping_needed = false; struct dma_fence *fence = NULL; - struct amdgpu_fence *af; unsigned int patch; int r; @@ -842,12 +841,10 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, } if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) { - r = amdgpu_fence_emit(ring, &fence, NULL, 0); + r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0); if (r) return r; - /* this is part of the job's context */ - af = container_of(fence, struct amdgpu_fence, base); - af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; + fence = &job->hw_vm_fence->base; } if (vm_flush_needed) { @@ -1952,6 +1949,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; struct amdgpu_vm *vm = bo_va->base.vm; bool valid = true; + int r; saddr /= AMDGPU_GPU_PAGE_SIZE; @@ -1972,6 +1970,17 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, return -ENOENT; } + /* It's unlikely to happen that the mapping userq hasn't been idled + * during user requests GEM unmap IOCTL except for forcing the unmap + * from user space. + */ + if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) { + r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr); + if (unlikely(r == -EBUSY)) + dev_warn_once(adev->dev, + "Attempt to unmap an active userq buffer\n"); + } + list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); mapping->bo_va = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 474bfe36c0c2..aa78c2ee9e21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block) return 0; } +static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev) +{ + switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) { + case IP_VERSION(6, 1, 1): + return adev->pm.fw_version < 0x0a640500; + default: + return false; + } +} + +static int vpe_get_dpm_level(struct amdgpu_device *adev) +{ + struct amdgpu_vpe *vpe = &adev->vpe; + + if (!adev->pm.dpm_enabled) + return 0; + + return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv)); +} + static void vpe_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = @@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work) unsigned int fences = 0; fences += amdgpu_fence_count_emitted(&adev->vpe.ring); + if (fences) + goto reschedule; - if (fences == 0) - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); - else - schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); + if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0) + goto reschedule; + + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE); + return; + +reschedule: + schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT); } static int vpe_common_init(struct amdgpu_vpe *vpe) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index a5adb2ed9b3c..9d934c07fa6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj, !adev->gmc.vram_vendor) return 0; + if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) + return 0; + return attr->mode; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 3a79ed7d8031..1cee083fb6bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -23,26 +23,84 @@ #ifndef AMDGV_SRIOV_MSG__H_ #define AMDGV_SRIOV_MSG__H_ -/* unit in kilobytes */ -#define AMD_SRIOV_MSG_VBIOS_OFFSET 0 -#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64 -#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB -#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4 -#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 -#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2 -#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64 +#define AMD_SRIOV_MSG_SIZE_KB 1 + /* - * layout + * layout v1 * 0 64KB 65KB 66KB 68KB 132KB * | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... * | 64KB | 1KB | 1KB | 2KB | 64KB | ... */ -#define AMD_SRIOV_MSG_SIZE_KB 1 -#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB -#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) -#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB) -#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB) +/* + * layout v2 (offsets are dynamically allocated and the offsets below are examples) + * 0 1KB 64KB 65KB 66KB 68KB 132KB + * | INITD_H | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ... + * | 1KB | 64KB | 1KB | 1KB | 2KB | 64KB | ... + * + * Note: PF2VF + VF2PF + Bad Page = DataExchange region (allocated contiguously) + */ + +/* v1 layout sizes */ +#define AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 64 +#define AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 1 +#define AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 1 +#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1 2 +#define AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 64 +#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 \ + (AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 + AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 + \ + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1) + +/* v1 offsets */ +#define AMD_SRIOV_MSG_VBIOS_OFFSET_V1 0 +#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 +#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048 +#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 +#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB) +#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 \ + (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1) +#define AMD_SRIOV_MSG_INIT_DATA_TOT_SIZE_KB_V1 \ + (AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 + AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 + \ + AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1) + +enum amd_sriov_crit_region_version { + GPU_CRIT_REGION_V1 = 1, + GPU_CRIT_REGION_V2 = 2, +}; + +/* v2 layout offset enum (in order of allocation) */ +enum amd_sriov_msg_table_id_enum { + AMD_SRIOV_MSG_IPD_TABLE_ID = 0, + AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, + AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID, + AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID, + AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID, + AMD_SRIOV_MSG_INITD_H_TABLE_ID, + AMD_SRIOV_MSG_MAX_TABLE_ID, +}; + +struct amd_sriov_msg_init_data_header { + char signature[4]; /* "INDA" */ + uint32_t version; + uint32_t checksum; + uint32_t initdata_offset; /* 0 */ + uint32_t initdata_size_in_kb; /* 5MB */ + uint32_t valid_tables; + uint32_t vbios_img_offset; + uint32_t vbios_img_size_in_kb; + uint32_t dataexchange_offset; + uint32_t dataexchange_size_in_kb; + uint32_t ras_tele_info_offset; + uint32_t ras_tele_info_size_in_kb; + uint32_t ip_discovery_offset; + uint32_t ip_discovery_size_in_kb; + uint32_t bad_page_info_offset; + uint32_t bad_page_size_in_kb; + uint32_t reserved[8]; +}; /* * PF2VF history log: diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c index 96616a865aac..ed1e25661706 100644 --- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT /* * Copyright 2018 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 8841d7213de4..751732f3e883 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -9951,6 +9951,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush, }; static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 66c47c466532..252517ce5d5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -2438,7 +2438,7 @@ static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev) if (version_minor == 3) gfx_v11_0_load_rlcp_rlcv_microcode(adev); } - + return 0; } @@ -3886,7 +3886,7 @@ static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev) } memcpy(fw, fw_data, fw_size); - + amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); @@ -5862,8 +5862,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; - BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); control |= ib->length_dw | (vmid << 24); @@ -7320,6 +7318,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush, }; static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 710ec9c34e43..35d5a7e99a7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -4419,8 +4419,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; - BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE); - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); control |= ib->length_dw | (vmid << 24); @@ -5597,6 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush, }; static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 0856ff65288c..d3d0a4b0380c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6939,6 +6939,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { .pad_ib = amdgpu_ring_generic_pad_ib, .emit_rreg = gfx_v8_0_ring_emit_rreg, .emit_wreg = gfx_v8_0_ring_emit_wreg, + .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index dd19a97436db..f1a2efc2a8d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -7586,6 +7586,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, }; static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index 77f9d5b9a556..e0b50c690f8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -2152,7 +2152,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) return 0; } -static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore) +static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, + bool restore) { struct amdgpu_device *adev = ring->adev; struct v9_mqd *mqd = ring->mqd_ptr; @@ -2186,8 +2187,6 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, b atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } - - return 0; } static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) @@ -2220,7 +2219,7 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) { struct amdgpu_ring *ring; - int i, r; + int i; gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); @@ -2228,9 +2227,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); - if (r) - return r; + gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); } return amdgpu_gfx_enable_kcq(adev, xcc_id); @@ -3605,11 +3602,8 @@ pipe_reset: return r; } - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); - if (r) { - dev_err(adev->dev, "fail to init kcq\n"); - return r; - } + gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); + spin_lock_irqsave(&kiq->ring_lock, flags); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size); if (r) { @@ -4798,6 +4792,7 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { .emit_wreg = gfx_v9_4_3_ring_emit_wreg, .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, + .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, }; static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index f4a19357ccbc..cad2d19105c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -312,9 +312,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, return; } - mutex_lock(&adev->mman.gtt_window_lock); gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0); - mutex_unlock(&adev->mman.gtt_window_lock); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 93d7ccb7d013..0e5e54d0a9a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1068,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block) GFP_KERNEL); if (!adev->gmc.vm_fault_info) return -ENOMEM; - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); return 0; } @@ -1290,7 +1290,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) - && !atomic_read(&adev->gmc.vm_fault_info_updated)) { + && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) { struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1306,8 +1306,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, info->prot_read = protections & 0x8 ? true : false; info->prot_write = protections & 0x10 ? true : false; info->prot_exec = protections & 0x20 ? true : false; - mb(); - atomic_set(&adev->gmc.vm_fault_info_updated, 1); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 1); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c5e2a2c41e06..e1509480dfc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1183,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block) GFP_KERNEL); if (!adev->gmc.vm_fault_info) return -ENOMEM; - atomic_set(&adev->gmc.vm_fault_info_updated, 0); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 0); return 0; } @@ -1478,7 +1478,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) - && !atomic_read(&adev->gmc.vm_fault_info_updated)) { + && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) { struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, @@ -1494,8 +1494,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, info->prot_read = protections & 0x8 ? true : false; info->prot_write = protections & 0x10 ? true : false; info->prot_exec = protections & 0x20 ? true : false; - mb(); - atomic_set(&adev->gmc.vm_fault_info_updated, 1); + atomic_set_release(&adev->gmc.vm_fault_info_updated, 1); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 0d1dd587db5f..e716097dfde4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1843,6 +1843,10 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) && + adev->rev_id == 0x3) + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; + if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { vram_info = RREG32(regBIF_BIOS_SCRATCH_4); adev->gmc.vram_vendor = vram_info & 0xF; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c index 2db9b2c63693..b1ee9473d628 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c @@ -205,13 +205,13 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev, int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev); struct mes_detect_and_reset_queue_input input; struct amdgpu_usermode_queue *queue; - struct amdgpu_userq_mgr *uqm, *tmp; unsigned int hung_db_num = 0; - int queue_id, r, i; - u32 db_array[4]; + unsigned long queue_id; + u32 db_array[8]; + int r, i; - if (db_array_size > 4) { - dev_err(adev->dev, "DB array size (%d vs 4) too small\n", + if (db_array_size > 8) { + dev_err(adev->dev, "DB array size (%d vs 8) too small\n", db_array_size); return -EINVAL; } @@ -227,16 +227,14 @@ static int mes_userq_detect_and_reset(struct amdgpu_device *adev, if (r) { dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r); } else if (hung_db_num) { - list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { - idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { - if (queue->queue_type == queue_type) { - for (i = 0; i < hung_db_num; i++) { - if (queue->doorbell_index == db_array[i]) { - queue->state = AMDGPU_USERQ_STATE_HUNG; - atomic_inc(&adev->gpu_reset_counter); - amdgpu_userq_fence_driver_force_completion(queue); - drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); - } + xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { + if (queue->queue_type == queue_type) { + for (i = 0; i < hung_db_num; i++) { + if (queue->doorbell_index == db_array[i]) { + queue->state = AMDGPU_USERQ_STATE_HUNG; + atomic_inc(&adev->gpu_reset_counter); + amdgpu_userq_fence_driver_force_completion(queue); + drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL); } } } @@ -254,7 +252,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; struct drm_amdgpu_userq_in *mqd_user = args_in; struct amdgpu_mqd_prop *userq_props; - struct amdgpu_gfx_shadow_info shadow_info; int r; /* Structure to initialize MQD for userqueue using generic MQD init function */ @@ -280,8 +277,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->doorbell_index = queue->doorbell_index; userq_props->fence_address = queue->fence_drv->gpu_addr; - if (adev->gfx.funcs->get_gfx_shadow_info) - adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; @@ -298,8 +293,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, goto free_mqd; } - if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va, - max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE))) + r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va, + 2048); + if (r) goto free_mqd; userq_props->eop_gpu_addr = compute_mqd->eop_va; @@ -311,6 +307,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, kfree(compute_mqd); } else if (queue->queue_type == AMDGPU_HW_IP_GFX) { struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11; + struct amdgpu_gfx_shadow_info shadow_info; + + if (adev->gfx.funcs->get_gfx_shadow_info) { + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); + } else { + r = -EINVAL; + goto free_mqd; + } if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) { DRM_ERROR("Invalid GFX MQD\n"); @@ -330,8 +334,13 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, userq_props->tmz_queue = mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; - if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va, - shadow_info.shadow_size)) + r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va, + shadow_info.shadow_size); + if (r) + goto free_mqd; + r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va, + shadow_info.csa_size); + if (r) goto free_mqd; kfree(mqd_gfx_v11); @@ -350,9 +359,9 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr, r = -ENOMEM; goto free_mqd; } - - if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va, - shadow_info.csa_size)) + r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va, + 32); + if (r) goto free_mqd; userq_props->csa_addr = mqd_sdma_v11->csa_va; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e82188431f79..3a52754b5cad 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -66,7 +66,8 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev); #define GFX_MES_DRAM_SIZE 0x80000 #define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE) -#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 4 +#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */ +#define MES11_HUNG_HQD_INFO_OFFSET 4 static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring) { @@ -368,6 +369,7 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes, struct mes_remove_queue_input *input) { union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; + uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK; memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); @@ -378,6 +380,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes, mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; + if (mes_rev >= 0x60) + mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset; + return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt), offsetof(union MESAPI__REMOVE_QUEUE, api_status)); @@ -1720,8 +1725,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int pipe, r; - adev->mes.hung_queue_db_array_size = - MES11_HUNG_DB_OFFSET_ARRAY_SIZE; + adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE; + adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET; + for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index aff06f06aeee..744e95d3984a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -47,7 +47,8 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev); #define MES_EOP_SIZE 2048 -#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 4 +#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */ +#define MES12_HUNG_HQD_INFO_OFFSET 4 static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring) { @@ -228,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, pipe, x_pkt->header.opcode); r = amdgpu_fence_wait_polling(ring, seq, timeout); - if (r < 1 || !*status_ptr) { + + /* + * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success). + * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information. + */ + if (r < 1 || !(lower_32_bits(*status_ptr))) { if (misc_op_str) dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n", @@ -355,6 +361,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, struct mes_remove_queue_input *input) { union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; + uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK; memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); @@ -365,6 +372,9 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; + if (mes_rev >= 0x5a) + mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset; + return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE, &mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt), @@ -1899,8 +1909,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int pipe, r; - adev->mes.hung_queue_db_array_size = - MES12_HUNG_DB_OFFSET_ARRAY_SIZE; + adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE; + adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET; + for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { r = amdgpu_mes_init_microcode(adev, pipe); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index e5282a5d05d9..cd5b2f07edb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -222,12 +222,20 @@ send_request: adev->virt.req_init_data_ver = 0; } else { if (req == IDH_REQ_GPU_INIT_DATA) { - adev->virt.req_init_data_ver = - RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1); - - /* assume V1 in case host doesn't set version number */ - if (adev->virt.req_init_data_ver < 1) - adev->virt.req_init_data_ver = 1; + switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) { + case GPU_CRIT_REGION_V2: + adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2; + adev->virt.init_data_header.offset = + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2); + adev->virt.init_data_header.size_kb = + RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3); + break; + default: + adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1; + adev->virt.init_data_header.offset = -1; + adev->virt.init_data_header.size_kb = 0; + break; + } } } @@ -285,7 +293,8 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, static int xgpu_nv_request_init_data(struct amdgpu_device *adev) { - return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA); + return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA, + 0, GPU_CRIT_REGION_V2, 0); } static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index 1c22bc11c1f8..bdfd2917e3ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -41,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev) { - u32 tmp; + u32 rev_id; - tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); - /* If it is VF or subrevision holds a non-zero value, that should be used */ - if (tmp || amdgpu_sriov_vf(adev)) - return tmp; + /* + * fetch the sub-revision field from the IP-discovery table + * (returns zero if the table entry is not populated). + */ + if (amdgpu_sriov_vf(adev)) { + rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0)); + } else { + rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); + rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, + STRAP_ATI_REV_ID_DEV0_F0); + } - /* If discovery subrev is not updated, use register version */ - tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); - tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0, - STRAP_ATI_REV_ID_DEV0_F0); - - return tmp; + return rev_id; } static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 6c5c7c1bf5ed..4fbe865ff279 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1209,6 +1209,15 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, pr_debug_ratelimited("Evicting process pid %d queues\n", pdd->process->lead_thread->pid); + if (dqm->dev->kfd->shared_resources.enable_mes) { + pdd->last_evict_timestamp = get_jiffies_64(); + retval = suspend_all_queues_mes(dqm); + if (retval) { + dev_err(dev, "Suspending all queues failed"); + goto out; + } + } + /* Mark all queues as evicted. Deactivate all active queues on * the qpd. */ @@ -1221,23 +1230,27 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, decrement_queue_count(dqm, qpd, q); if (dqm->dev->kfd->shared_resources.enable_mes) { - int err; - - err = remove_queue_mes(dqm, q, qpd); - if (err) { + retval = remove_queue_mes(dqm, q, qpd); + if (retval) { dev_err(dev, "Failed to evict queue %d\n", q->properties.queue_id); - retval = err; + goto out; } } } - pdd->last_evict_timestamp = get_jiffies_64(); - if (!dqm->dev->kfd->shared_resources.enable_mes) + + if (!dqm->dev->kfd->shared_resources.enable_mes) { + pdd->last_evict_timestamp = get_jiffies_64(); retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); + } else { + retval = resume_all_queues_mes(dqm); + if (retval) + dev_err(dev, "Resuming all queues failed"); + } out: dqm_unlock(dqm); @@ -1884,6 +1897,8 @@ fail_packet_manager_init: static int stop_cpsch(struct device_queue_manager *dqm) { + int ret = 0; + dqm_lock(dqm); if (!dqm->sched_running) { dqm_unlock(dqm); @@ -1891,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm) } if (!dqm->dev->kfd->shared_resources.enable_mes) - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); + ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, + 0, USE_DEFAULT_GRACE_PERIOD, false); else - remove_all_kfd_queues_mes(dqm); + ret = remove_all_kfd_queues_mes(dqm); dqm->sched_running = false; @@ -1907,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) dqm->detect_hang_info = NULL; dqm_unlock(dqm); - return 0; + return ret; } static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, @@ -3098,61 +3114,17 @@ out: return ret; } -static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm, - struct qcm_process_device *qpd) -{ - struct device *dev = dqm->dev->adev->dev; - int ret = 0; - - /* Check if process is already evicted */ - dqm_lock(dqm); - if (qpd->evicted) { - /* Increment the evicted count to make sure the - * process stays evicted before its terminated. - */ - qpd->evicted++; - dqm_unlock(dqm); - goto out; - } - dqm_unlock(dqm); - - ret = suspend_all_queues_mes(dqm); - if (ret) { - dev_err(dev, "Suspending all queues failed"); - goto out; - } - - ret = dqm->ops.evict_process_queues(dqm, qpd); - if (ret) { - dev_err(dev, "Evicting process queues failed"); - goto out; - } - - ret = resume_all_queues_mes(dqm); - if (ret) - dev_err(dev, "Resuming all queues failed"); - -out: - return ret; -} - int kfd_evict_process_device(struct kfd_process_device *pdd) { struct device_queue_manager *dqm; struct kfd_process *p; - int ret = 0; p = pdd->process; dqm = pdd->dev->dqm; WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid); - if (dqm->dev->kfd->shared_resources.enable_mes) - ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd); - else - ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); - - return ret; + return dqm->ops.evict_process_queues(dqm, &pdd->qpd); } int reserve_debug_trap_vmid(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 4ceb251312a6..d76fb61869c7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -28,6 +28,7 @@ #include "kfd_device_queue_manager.h" #include "kfd_smi_events.h" #include "amdgpu_ras.h" +#include "amdgpu_ras_mgr.h" /* * GFX9 SQ Interrupts @@ -228,7 +229,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, kfd_signal_poison_consumed_event(dev, pasid); - event_id = amdgpu_ras_acquire_event_id(dev->adev, type); + if (amdgpu_uniras_enabled(dev->adev)) + event_id = amdgpu_ras_mgr_gen_ras_event_seqno(dev->adev, + RAS_SEQNO_TYPE_POISON_CONSUMPTION); + else + event_id = amdgpu_ras_acquire_event_id(dev->adev, type); RAS_EVENT_LOG(dev->adev, event_id, "poison is consumed by client %d, kick off gpu reset flow\n", client_id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 59a5a3fea65d..46c84fc60af1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -21,7 +21,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ #include -#include #include #include #include diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index 2eebf67f9c2c..2b7fd442d29c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -31,7 +31,6 @@ #include #include #include -#include #include "kfd_priv.h" #include "kfd_svm.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ddfe30c13e9d..a085faac9fe1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1083,7 +1083,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) * for auto suspend */ if (pdd->runtime_inuse) { - pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev); pdd->runtime_inuse = false; } @@ -1162,9 +1161,6 @@ static void kfd_process_wq_release(struct work_struct *work) release_work); struct dma_fence *ef; - kfd_process_dequeue_from_all_devices(p); - pqm_uninit(&p->pqm); - /* * If GPU in reset, user queues may still running, wait for reset complete. */ @@ -1226,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p) cancel_delayed_work_sync(&p->eviction_work); cancel_delayed_work_sync(&p->restore_work); + /* + * Dequeue and destroy user queues, it is not safe for GPU to access + * system memory after mmu release notifier callback returns because + * exit_mmap free process memory afterwards. + */ + kfd_process_dequeue_from_all_devices(p); + pqm_uninit(&p->pqm); + for (i = 0; i < p->n_pdds; i++) { struct kfd_process_device *pdd = p->pdds[i]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9d72411c3379..ffb7b36e577c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1698,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, start = map_start << PAGE_SHIFT; end = (map_last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { - struct hmm_range *hmm_range = NULL; + struct amdgpu_hmm_range *range = NULL; unsigned long map_start_vma; unsigned long map_last_vma; struct vm_area_struct *vma; @@ -1737,12 +1737,18 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } WRITE_ONCE(p->svms.faulting_task, current); - r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, - readonly, owner, - &hmm_range); + range = amdgpu_hmm_range_alloc(NULL); + if (likely(range)) + r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, + readonly, owner, range); + else + r = -ENOMEM; WRITE_ONCE(p->svms.faulting_task, NULL); - if (r) + if (r) { + amdgpu_hmm_range_free(range); + range = NULL; pr_debug("failed %d to get svm range pages\n", r); + } } else { r = -EFAULT; } @@ -1750,7 +1756,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm, if (!r) { offset = (addr >> PAGE_SHIFT) - prange->start; r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, - hmm_range->hmm_pfns); + range->hmm_range.hmm_pfns); if (r) pr_debug("failed %d to dma map range\n", r); } @@ -1758,13 +1764,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm, svm_range_lock(prange); /* Free backing memory of hmm_range if it was initialized - * Overrride return value to TRY AGAIN only if prior returns + * Override return value to TRY AGAIN only if prior returns * were successful */ - if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) { + if (range && !amdgpu_hmm_range_valid(range) && !r) { pr_debug("hmm update the range, need validate again\n"); r = -EAGAIN; } + /* Free the hmm range */ + if (range) + amdgpu_hmm_range_free(range); + if (!r && !list_empty(&prange->child_list)) { pr_debug("range split by unmap in parallel, validate again\n"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 01c7a4877904..a63dfc95b602 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -31,7 +31,6 @@ #include #include #include -#include #include "amdgpu.h" #include "kfd_priv.h" diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0d03e324d5b9..bb0fe91a1601 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) { - struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); + struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT); if (!offload_work) { drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n"); return; } - struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL); + struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT); if (!adjust_copy) { drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n"); kfree(offload_work); @@ -2085,8 +2085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_hardware_init(adev->dm.dc); - adev->dm.restore_backlight = true; - adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); if (!adev->dm.hpd_rx_offload_wq) { drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); @@ -3394,6 +3392,67 @@ static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev, } } +/** + * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks + * @adev: amdgpu device pointer + * + * Iterates through all DC links and dumps information about local and remote + * (MST) sinks. Should be called after connector detection is complete to see + * the final state of all links. + */ +static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct drm_device *dev = adev_to_drm(adev); + int li; + + if (!dc) + return; + + for (li = 0; li < dc->link_count; li++) { + struct dc_link *l = dc->links[li]; + const char *name = NULL; + int rs; + + if (!l) + continue; + if (l->local_sink && l->local_sink->edid_caps.display_name[0]) + name = l->local_sink->edid_caps.display_name; + else + name = "n/a"; + + drm_dbg_kms(dev, + "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n", + li, + l->local_sink, + l->type, + l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE, + l->sink_count, + name, + l->dpcd_caps.is_mst_capable, + l->mst_stream_alloc_table.stream_count); + + /* Dump remote (MST) sinks if any */ + for (rs = 0; rs < l->sink_count; rs++) { + struct dc_sink *rsink = l->remote_sinks[rs]; + const char *rname = NULL; + + if (!rsink) + continue; + if (rsink->edid_caps.display_name[0]) + rname = rsink->edid_caps.display_name; + else + rname = "n/a"; + drm_dbg_kms(dev, + " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n", + li, rs, + rsink, + rsink->sink_signal, + rname); + } + } +} + static int dm_resume(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -3442,7 +3501,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); dc_resume(dm->dc); - adev->dm.restore_backlight = true; amdgpu_dm_irq_resume_early(adev); @@ -3579,6 +3637,12 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) } drm_connector_list_iter_end(&iter); + /* Debug dump: list all DC links and their associated sinks after detection + * is complete for all connectors. This provides a comprehensive view of the + * final state without repeating the dump for each connector. + */ + amdgpu_dm_dump_links_and_sinks(adev); + amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); @@ -3789,7 +3853,9 @@ void amdgpu_dm_update_connector_after_detect( drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", aconnector->connector_id, aconnector->dc_sink, sink); - guard(mutex)(&dev->mode_config.mutex); + /* When polling, DRM has already locked the mutex for us. */ + if (!drm_kms_helper_is_poll_worker()) + mutex_lock(&dev->mode_config.mutex); /* * 1. Update status of the drm connector @@ -3852,6 +3918,10 @@ void amdgpu_dm_update_connector_after_detect( } update_subconnector_property(aconnector); + + /* When polling, the mutex will be unlocked for us by DRM. */ + if (!drm_kms_helper_is_poll_worker()) + mutex_unlock(&dev->mode_config.mutex); } static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) @@ -5136,6 +5206,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm, static void setup_backlight_device(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector) { + struct amdgpu_dm_backlight_caps *caps; struct dc_link *link = aconnector->dc_link; int bl_idx = dm->num_of_edps; @@ -5155,6 +5226,13 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm, dm->num_of_edps++; update_connector_ext_caps(aconnector); + caps = &dm->backlight_caps[aconnector->bl_idx]; + + /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */ + if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0) + drm_object_attach_property(&aconnector->base.base, + dm->adev->mode_info.abm_level_property, + ABM_SYSFS_CONTROL); } static void amdgpu_set_panel_orientation(struct drm_connector *connector); @@ -5410,6 +5488,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) amdgpu_set_panel_orientation(&aconnector->base); } + /* Debug dump: list all DC links and their associated sinks after detection + * is complete for all connectors. This provides a comprehensive view of the + * final state without repeating the dump for each connector. + */ + amdgpu_dm_dump_links_and_sinks(adev); + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -7148,29 +7232,101 @@ finish: return stream; } +/** + * amdgpu_dm_connector_poll() - Poll a connector to see if it's connected to a display + * + * Used for connectors that don't support HPD (hotplug detection) + * to periodically checked whether the connector is connected to a display. + */ +static enum drm_connector_status +amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force) +{ + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc_link *link = aconnector->dc_link; + enum dc_connection_type conn_type = dc_connection_none; + enum drm_connector_status status = connector_status_disconnected; + + /* When we determined the connection using DAC load detection, + * do NOT poll the connector do detect disconnect because + * that would run DAC load detection again which can cause + * visible visual glitches. + * + * Only allow to poll such a connector again when forcing. + */ + if (!force && link->local_sink && link->type == dc_connection_dac_load) + return connector->status; + + mutex_lock(&aconnector->hpd_lock); + + if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) && + conn_type != dc_connection_none) { + mutex_lock(&adev->dm.dc_lock); + + /* Only call full link detection when a sink isn't created yet, + * ie. just when the display is plugged in, otherwise we risk flickering. + */ + if (link->local_sink || + dc_link_detect(link, DETECT_REASON_HPD)) + status = connector_status_connected; + + mutex_unlock(&adev->dm.dc_lock); + } + + if (connector->status != status) { + if (status == connector_status_disconnected) { + if (link->local_sink) + dc_sink_release(link->local_sink); + + link->local_sink = NULL; + link->dpcd_sink_count = 0; + link->type = dc_connection_none; + } + + amdgpu_dm_update_connector_after_detect(aconnector); + } + + mutex_unlock(&aconnector->hpd_lock); + return status; +} + +/** + * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display + * + * A connector is considered connected when it has a sink that is not NULL. + * For connectors that support HPD (hotplug detection), the connection is + * handled in the HPD interrupt. + * For connectors that may not support HPD, such as analog connectors, + * DRM will call this function repeatedly to poll them. + * + * Notes: + * 1. This interface is NOT called in context of HPD irq. + * 2. This interface *is called* in context of user-mode ioctl. Which + * makes it a bad place for *any* MST-related activity. + */ static enum drm_connector_status amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) { - bool connected; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - /* - * Notes: - * 1. This interface is NOT called in context of HPD irq. - * 2. This interface *is called* in context of user-mode ioctl. Which - * makes it a bad place for *any* MST-related activity. - */ - - if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && - !aconnector->fake_enable) - connected = (aconnector->dc_sink != NULL); - else - connected = (aconnector->base.force == DRM_FORCE_ON || - aconnector->base.force == DRM_FORCE_ON_DIGITAL); - update_subconnector_property(aconnector); - return (connected ? connector_status_connected : + if (aconnector->base.force == DRM_FORCE_ON || + aconnector->base.force == DRM_FORCE_ON_DIGITAL) + return connector_status_connected; + else if (aconnector->base.force == DRM_FORCE_OFF) + return connector_status_disconnected; + + /* Poll analog connectors and only when either + * disconnected or connected to an analog display. + */ + if (drm_kms_helper_is_poll_worker() && + dc_connector_supports_analog(aconnector->dc_link->link_id.id) && + (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog)) + return amdgpu_dm_connector_poll(aconnector, force); + + return (aconnector->dc_sink ? connector_status_connected : connector_status_disconnected); } @@ -7221,6 +7377,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { dm_new_state->underscan_enable = val; ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + switch (val) { + case ABM_SYSFS_CONTROL: + dm_new_state->abm_sysfs_forbidden = false; + break; + case ABM_LEVEL_OFF: + dm_new_state->abm_sysfs_forbidden = true; + dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE; + break; + default: + dm_new_state->abm_sysfs_forbidden = true; + dm_new_state->abm_level = val; + }; + ret = 0; } return ret; @@ -7263,6 +7433,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, } else if (property == adev->mode_info.underscan_property) { *val = dm_state->underscan_enable; ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + if (!dm_state->abm_sysfs_forbidden) + *val = ABM_SYSFS_CONTROL; + else + *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? + dm_state->abm_level : 0; + ret = 0; } return ret; @@ -7315,10 +7492,16 @@ static ssize_t panel_power_savings_store(struct device *device, return -EINVAL; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - to_dm_connector_state(connector->state)->abm_level = val ?: - ABM_LEVEL_IMMEDIATE_DISABLE; + if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden) + ret = -EBUSY; + else + to_dm_connector_state(connector->state)->abm_level = val ?: + ABM_LEVEL_IMMEDIATE_DISABLE; drm_modeset_unlock(&dev->mode_config.connection_mutex); + if (ret) + return ret; + drm_kms_helper_hotplug_event(dev); return count; @@ -8158,7 +8341,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, return 0; } -static int to_drm_connector_type(enum signal_type st) +static int to_drm_connector_type(enum signal_type st, uint32_t connector_id) { switch (st) { case SIGNAL_TYPE_HDMI_TYPE_A: @@ -8174,6 +8357,10 @@ static int to_drm_connector_type(enum signal_type st) return DRM_MODE_CONNECTOR_DisplayPort; case SIGNAL_TYPE_DVI_DUAL_LINK: case SIGNAL_TYPE_DVI_SINGLE_LINK: + if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII || + connector_id == CONNECTOR_ID_DUAL_LINK_DVII) + return DRM_MODE_CONNECTOR_DVII; + return DRM_MODE_CONNECTOR_DVID; case SIGNAL_TYPE_VIRTUAL: return DRM_MODE_CONNECTOR_VIRTUAL; @@ -8225,7 +8412,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector) static struct drm_display_mode * amdgpu_dm_create_common_mode(struct drm_encoder *encoder, - char *name, + const char *name, int hdisplay, int vdisplay) { struct drm_device *dev = encoder->dev; @@ -8247,6 +8434,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder, } +static const struct amdgpu_dm_mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; +} common_modes[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} +}; + static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) { @@ -8257,23 +8462,6 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, to_amdgpu_dm_connector(connector); int i; int n; - struct mode_size { - char name[DRM_DISPLAY_MODE_LEN]; - int w; - int h; - } common_modes[] = { - { "640x480", 640, 480}, - { "800x600", 800, 600}, - { "1024x768", 1024, 768}, - { "1280x720", 1280, 720}, - { "1280x800", 1280, 800}, - {"1280x1024", 1280, 1024}, - { "1440x900", 1440, 900}, - {"1680x1050", 1680, 1050}, - {"1600x1200", 1600, 1200}, - {"1920x1080", 1920, 1080}, - {"1920x1200", 1920, 1200} - }; if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) && (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)) @@ -8474,6 +8662,10 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect if (!(amdgpu_freesync_vid_mode && drm_edid)) return; + if (!amdgpu_dm_connector->dc_sink || amdgpu_dm_connector->dc_sink->edid_caps.analog || + !dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version)) + return; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) amdgpu_dm_connector->num_modes += add_fs_modes(amdgpu_dm_connector); @@ -8497,6 +8689,15 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) amdgpu_dm_connector->num_modes += drm_add_modes_noedid(connector, 1920, 1080); + + if (amdgpu_dm_connector->dc_sink->edid_caps.analog) { + /* Analog monitor connected by DAC load detection. + * Add common modes. It will be up to the user to select one that works. + */ + for (int i = 0; i < ARRAY_SIZE(common_modes); i++) + amdgpu_dm_connector->num_modes += drm_add_modes_noedid( + connector, common_modes[i].w, common_modes[i].h); + } } else { amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); if (encoder) @@ -8565,6 +8766,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, case DRM_MODE_CONNECTOR_DVID: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; break; + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_VGA: + aconnector->base.polled = + DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + break; default: break; } @@ -8766,7 +8972,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, goto out_free; } - connector_type = to_drm_connector_type(link->connector_signal); + connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id); res = drm_connector_init_with_ddc( dm->ddev, @@ -9969,6 +10175,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -10088,6 +10295,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -10144,16 +10352,13 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, * to fix a flicker issue. * It will cause the dm->actual_brightness is not the current panel brightness * level. (the dm->brightness is the correct panel level) - * So we set the backlight level with dm->brightness value after initial - * set mode. Use restore_backlight flag to avoid setting backlight level - * for every subsequent mode set. + * So we set the backlight level with dm->brightness value after set mode */ - if (dm->restore_backlight) { + if (set_backlight_level) { for (i = 0; i < dm->num_of_edps; i++) { if (dm->backlight_dev[i]) amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); } - dm->restore_backlight = false; } } @@ -10523,7 +10728,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * Here we create an empty update on each plane. * To fix this, DC should permit updating only stream properties. */ - dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); + dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL); if (!dummy_updates) { drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n"); continue; @@ -12450,7 +12655,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, int j = state->num_private_objs-1; dm_atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); /* If i is not at the end of the array then the * last element needs to be moved to where i was @@ -12461,7 +12666,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, state->private_objs[j]; state->private_objs[j].ptr = NULL; - state->private_objs[j].state = NULL; + state->private_objs[j].state_to_destroy = NULL; state->private_objs[j].old_state = NULL; state->private_objs[j].new_state = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 009f206226f0..5a7aa903bd3c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -630,13 +630,6 @@ struct amdgpu_display_manager { */ u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; - /** - * @restore_backlight: - * - * Flag to indicate whether to restore backlight after modeset. - */ - bool restore_backlight; - /** * @aux_hpd_discon_quirk: * @@ -1000,6 +993,7 @@ struct dm_connector_state { bool underscan_enable; bool freesync_capable; bool update_hdcp; + bool abm_sysfs_forbidden; uint8_t abm_level; int vcpi_slots; uint64_t pbn; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 1ec9d03ad747..38f9ea313dcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) struct vblank_control_work *vblank_work = container_of(work, struct vblank_control_work, work); struct amdgpu_display_manager *dm = vblank_work->dm; + struct amdgpu_device *adev = drm_to_adev(dm->ddev); + int r; mutex_lock(&dm->dc_lock); @@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work) if (dm->active_vblank_irq_count == 0) { dc_post_update_surfaces_to_stream(dm->dc); + + r = amdgpu_dpm_pause_power_profile(adev, true); + if (r) + dev_warn(adev->dev, "failed to set default power profile mode\n"); + dc_allow_idle_optimizations(dm->dc, true); + + r = amdgpu_dpm_pause_power_profile(adev, false); + if (r) + dev_warn(adev->dev, "failed to restore the power profile mode\n"); } mutex_unlock(&dm->dc_lock); @@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) int irq_type; int rc = 0; - if (acrtc->otg_inst == -1) - goto skip; + if (enable && !acrtc->base.enabled) { + drm_dbg_vbl(crtc->dev, + "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n", + acrtc->crtc_id, acrtc->base.enabled); + return -EINVAL; + } irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); @@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) return rc; } #endif -skip: + if (amdgpu_in_reset(adev)) return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f263e1a4537e..cb4bb67289a4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -759,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us int max_param_num = 11; enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; bool disable_hpd = false; + bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID; bool valid_test_pattern = false; uint8_t param_nums = 0; /* init with default 80bit custom pattern */ @@ -850,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us * because it might have been disabled after a test pattern was set. * AUX depends on HPD * sequence dependent, do not move! */ - if (!disable_hpd) + if (supports_hpd && !disable_hpd) dc_link_enable_hpd(link); prefer_link_settings.lane_count = link->verified_link_cap.lane_count; @@ -888,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us * Need disable interrupt to avoid SW driver disable DP output. This is * done after the test pattern is set. */ - if (valid_test_pattern && disable_hpd) + if (valid_test_pattern && supports_hpd && disable_hpd) dc_link_disable_hpd(link); kfree(wr_buf); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index fe100e4c9801..eb2c587b0b9b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct edid_caps->panel_patch.remove_sink_ext_caps = true; break; case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): + case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171): drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id); edid_caps->panel_patch.disable_colorimetry = true; break; @@ -130,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps( edid_caps->serial_number = edid_buf->serial; edid_caps->manufacture_week = edid_buf->mfg_week; edid_caps->manufacture_year = edid_buf->mfg_year; + edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL); drm_edid_get_monitor_name(edid_buf, edid_caps->display_name, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index a1c722112c22..0a2a3f233a0e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -476,6 +476,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); int src; struct list_head *hnd_list_h; struct list_head *hnd_list_l; @@ -512,6 +513,9 @@ void amdgpu_dm_irq_suspend(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_disable(dev); } void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) @@ -537,6 +541,7 @@ void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); int src; struct list_head *hnd_list_h, *hnd_list_l; unsigned long irq_table_flags; @@ -557,6 +562,9 @@ void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); } /* @@ -893,6 +901,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct drm_connector_list_iter iter; int irq_type; int i; + bool use_polling = false; /* First, clear all hpd and hpdrx interrupts */ for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) { @@ -906,6 +915,8 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct amdgpu_dm_connector *amdgpu_dm_connector; const struct dc_link *dc_link; + use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD; + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; @@ -947,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); + + if (use_polling) + drm_kms_helper_poll_init(dev); } /** @@ -997,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); + + if (dev->mode_config.poll_enabled) + drm_kms_helper_poll_fini(dev); } diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index dc943abd6dba..7277ed21552f 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -36,7 +36,7 @@ DC_LIBS += dcn30 DC_LIBS += dcn301 DC_LIBS += dcn31 DC_LIBS += dml -DC_LIBS += dml2 +DC_LIBS += dml2_0 DC_LIBS += soc_and_ip_translator endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 154fd2c18e88..4120d6c4c5e4 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp, ATOM_OBJECT *object); static struct device_id device_type_from_device_id(uint16_t device_id); static uint32_t signal_to_ss_id(enum as_signal_type signal); -static uint32_t get_support_mask_for_device_id(struct device_id device_id); +static uint32_t get_support_mask_for_device_id( + enum dal_device_type device_type, + uint32_t enum_id); static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record( struct bios_parser *bp, ATOM_OBJECT *object); @@ -441,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4( le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10; info->pll_info.max_output_pxl_clk_pll_frequency = le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10; + info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10; if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support) /* Since there is no information on the SS, report conservative @@ -497,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1( info->external_clock_source_frequency_for_dp = le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10; info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level; + info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10; /* There should be only one entry in the SS info table for Memory Clock */ @@ -736,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control( return bp->cmd_tbl.transmitter_control(bp, cntl); } +static enum bp_result bios_parser_select_crtc_source( + struct dc_bios *dcb, + struct bp_crtc_source_select *bp_params) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + + if (!bp->cmd_tbl.select_crtc_source) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.select_crtc_source(bp, bp_params); +} + static enum bp_result bios_parser_encoder_control( struct dc_bios *dcb, struct bp_encoder_control *cntl) { struct bios_parser *bp = BP_FROM_DCB(dcb); + if (cntl->engine_id == ENGINE_ID_DACA) { + if (!bp->cmd_tbl.dac1_encoder_control) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.dac1_encoder_control( + bp, cntl->action == ENCODER_CONTROL_ENABLE, + cntl->pixel_clock, ATOM_DAC1_PS2); + } else if (cntl->engine_id == ENGINE_ID_DACB) { + if (!bp->cmd_tbl.dac2_encoder_control) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.dac2_encoder_control( + bp, cntl->action == ENCODER_CONTROL_ENABLE, + cntl->pixel_clock, ATOM_DAC1_PS2); + } + if (!bp->cmd_tbl.dig_encoder_control) return BP_RESULT_FAILURE; return bp->cmd_tbl.dig_encoder_control(bp, cntl); } +static enum bp_result bios_parser_dac_load_detection( + struct dc_bios *dcb, + enum engine_id engine_id, + enum dal_device_type device_type, + uint32_t enum_id) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + struct dc_context *ctx = dcb->ctx; + struct bp_load_detection_parameters bp_params = {0}; + enum bp_result bp_result; + uint32_t bios_0_scratch; + uint32_t device_id_mask = 0; + + bp_params.engine_id = engine_id; + bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id); + + if (engine_id != ENGINE_ID_DACA && + engine_id != ENGINE_ID_DACB) + return BP_RESULT_UNSUPPORTED; + + if (!bp->cmd_tbl.dac_load_detection) + return BP_RESULT_UNSUPPORTED; + + if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT) + device_id_mask = ATOM_S0_CRT1_MASK; + else if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT) + device_id_mask = ATOM_S0_CRT2_MASK; + else + return BP_RESULT_UNSUPPORTED; + + /* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */ + bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0); + bios_0_scratch &= ~device_id_mask; + dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch); + + bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params); + + if (bp_result != BP_RESULT_OK) + return bp_result; + + bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0); + + if (bios_0_scratch & device_id_mask) + return BP_RESULT_OK; + + return BP_RESULT_FAILURE; +} + static enum bp_result bios_parser_adjust_pixel_clock( struct dc_bios *dcb, struct bp_adjust_pixel_clock_parameters *bp_params) @@ -858,7 +938,7 @@ static bool bios_parser_is_device_id_supported( { struct bios_parser *bp = BP_FROM_DCB(dcb); - uint32_t mask = get_support_mask_for_device_id(id); + uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id); return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0; } @@ -2149,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal) return clk_id_ss; } -static uint32_t get_support_mask_for_device_id(struct device_id device_id) +static uint32_t get_support_mask_for_device_id( + enum dal_device_type device_type, + uint32_t enum_id) { - enum dal_device_type device_type = device_id.device_type; - uint32_t enum_id = device_id.enum_id; - switch (device_type) { case DEVICE_TYPE_LCD: switch (enum_id) { @@ -2829,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = { .is_device_id_supported = bios_parser_is_device_id_supported, /* COMMANDS */ + .select_crtc_source = bios_parser_select_crtc_source, + .encoder_control = bios_parser_encoder_control, + .dac_load_detection = bios_parser_dac_load_detection, + .transmitter_control = bios_parser_transmitter_control, .enable_crtc = bios_parser_enable_crtc, diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 58e88778da7f..22457f417e65 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp); static void init_set_pixel_clock(struct bios_parser *bp); static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp); static void init_adjust_display_pll(struct bios_parser *bp); +static void init_select_crtc_source(struct bios_parser *bp); static void init_dac_encoder_control(struct bios_parser *bp); +static void init_dac_load_detection(struct bios_parser *bp); static void init_dac_output_control(struct bios_parser *bp); static void init_set_crtc_timing(struct bios_parser *bp); static void init_enable_crtc(struct bios_parser *bp); @@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp) init_set_pixel_clock(bp); init_enable_spread_spectrum_on_ppll(bp); init_adjust_display_pll(bp); + init_select_crtc_source(bp); init_dac_encoder_control(bp); + init_dac_load_detection(bp); init_dac_output_control(bp); init_set_crtc_timing(bp); init_enable_crtc(bp); @@ -1609,6 +1613,198 @@ static enum bp_result adjust_display_pll_v3( return result; } +/******************************************************************************* + ******************************************************************************** + ** + ** SELECT CRTC SOURCE + ** + ******************************************************************************** + *******************************************************************************/ + +static enum bp_result select_crtc_source_v1( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); +static enum bp_result select_crtc_source_v2( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); +static enum bp_result select_crtc_source_v3( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); + +static void init_select_crtc_source(struct bios_parser *bp) +{ + switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) { + case 1: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v1; + break; + case 2: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v2; + break; + case 3: + bp->cmd_tbl.select_crtc_source = select_crtc_source_v3; + break; + default: + bp->cmd_tbl.select_crtc_source = NULL; + break; + } +} + +static enum bp_result select_crtc_source_v1( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PS_ALLOCATION params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + switch (bp_params->engine_id) { + case ENGINE_ID_DACA: + params.ucDevice = ATOM_DEVICE_CRT1_INDEX; + break; + case ENGINE_ID_DACB: + params.ucDevice = ATOM_DEVICE_CRT2_INDEX; + break; + default: + return BP_RESULT_BADINPUT; + } + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + +static bool select_crtc_source_v2_encoder_id( + enum engine_id engine_id, uint8_t *out_encoder_id) +{ + uint8_t encoder_id = 0; + + switch (engine_id) { + case ENGINE_ID_DIGA: + encoder_id = ASIC_INT_DIG1_ENCODER_ID; + break; + case ENGINE_ID_DIGB: + encoder_id = ASIC_INT_DIG2_ENCODER_ID; + break; + case ENGINE_ID_DIGC: + encoder_id = ASIC_INT_DIG3_ENCODER_ID; + break; + case ENGINE_ID_DIGD: + encoder_id = ASIC_INT_DIG4_ENCODER_ID; + break; + case ENGINE_ID_DIGE: + encoder_id = ASIC_INT_DIG5_ENCODER_ID; + break; + case ENGINE_ID_DIGF: + encoder_id = ASIC_INT_DIG6_ENCODER_ID; + break; + case ENGINE_ID_DIGG: + encoder_id = ASIC_INT_DIG7_ENCODER_ID; + break; + case ENGINE_ID_DACA: + encoder_id = ASIC_INT_DAC1_ENCODER_ID; + break; + case ENGINE_ID_DACB: + encoder_id = ASIC_INT_DAC2_ENCODER_ID; + break; + default: + return false; + } + + *out_encoder_id = encoder_id; + return true; +} + +static bool select_crtc_source_v2_encoder_mode( + enum signal_type signal_type, uint8_t *out_encoder_mode) +{ + uint8_t encoder_mode = 0; + + switch (signal_type) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + encoder_mode = ATOM_ENCODER_MODE_DVI; + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + encoder_mode = ATOM_ENCODER_MODE_HDMI; + break; + case SIGNAL_TYPE_LVDS: + encoder_mode = ATOM_ENCODER_MODE_LVDS; + break; + case SIGNAL_TYPE_RGB: + encoder_mode = ATOM_ENCODER_MODE_CRT; + break; + case SIGNAL_TYPE_DISPLAY_PORT: + encoder_mode = ATOM_ENCODER_MODE_DP; + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + encoder_mode = ATOM_ENCODER_MODE_DP_MST; + break; + case SIGNAL_TYPE_EDP: + encoder_mode = ATOM_ENCODER_MODE_DP; + break; + default: + return false; + } + + *out_encoder_mode = encoder_mode; + return true; +} + +static enum bp_result select_crtc_source_v2( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PARAMETERS_V3 params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + if (!select_crtc_source_v2_encoder_id( + bp_params->engine_id, + ¶ms.ucEncoderID)) + return BP_RESULT_BADINPUT; + if (!select_crtc_source_v2_encoder_mode( + bp_params->sink_signal, + ¶ms.ucEncodeMode)) + return BP_RESULT_BADINPUT; + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + +static enum bp_result select_crtc_source_v3( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + SELECT_CRTC_SOURCE_PARAMETERS_V3 params; + + if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, ¶ms.ucCRTC)) + return BP_RESULT_BADINPUT; + + if (!select_crtc_source_v2_encoder_id( + bp_params->engine_id, + ¶ms.ucEncoderID)) + return BP_RESULT_BADINPUT; + if (!select_crtc_source_v2_encoder_mode( + bp_params->sink_signal, + ¶ms.ucEncodeMode)) + return BP_RESULT_BADINPUT; + + params.ucDstBpc = bp_params->bit_depth; + + if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) + result = BP_RESULT_OK; + + return result; +} + /******************************************************************************* ******************************************************************************** ** @@ -1708,6 +1904,96 @@ static enum bp_result dac2_encoder_control_v1( return result; } +/******************************************************************************* + ******************************************************************************** + ** + ** DAC LOAD DETECTION + ** + ******************************************************************************** + *******************************************************************************/ + +static enum bp_result dac_load_detection_v1( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); + +static enum bp_result dac_load_detection_v3( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); + +static void init_dac_load_detection(struct bios_parser *bp) +{ + switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) { + case 1: + case 2: + bp->cmd_tbl.dac_load_detection = dac_load_detection_v1; + break; + case 3: + default: + bp->cmd_tbl.dac_load_detection = dac_load_detection_v3; + break; + } +} + +static void dac_load_detect_prepare_params( + struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params, + enum engine_id engine_id, + uint16_t device_id, + uint8_t misc) +{ + uint8_t dac_type = ENGINE_ID_DACA; + + if (engine_id == ENGINE_ID_DACB) + dac_type = ATOM_DAC_B; + + params->sDacload.usDeviceID = cpu_to_le16(device_id); + params->sDacload.ucDacType = dac_type; + params->sDacload.ucMisc = misc; +} + +static enum bp_result dac_load_detection_v1( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + DAC_LOAD_DETECTION_PS_ALLOCATION params; + + dac_load_detect_prepare_params( + ¶ms, + bp_params->engine_id, + bp_params->device_id, + 0); + + if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params)) + result = BP_RESULT_OK; + + return result; +} + +static enum bp_result dac_load_detection_v3( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params) +{ + enum bp_result result = BP_RESULT_FAILURE; + DAC_LOAD_DETECTION_PS_ALLOCATION params; + + uint8_t misc = 0; + + if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT || + bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT) + misc = DAC_LOAD_MISC_YPrPb; + + dac_load_detect_prepare_params( + ¶ms, + bp_params->engine_id, + bp_params->device_id, + misc); + + if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params)) + result = BP_RESULT_OK; + + return result; +} + /******************************************************************************* ******************************************************************************** ** diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h index ad533775e724..e89b1ba0048b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h @@ -52,6 +52,9 @@ struct cmd_tbl { enum bp_result (*adjust_display_pll)( struct bios_parser *bp, struct bp_adjust_pixel_clock_parameters *bp_params); + enum bp_result (*select_crtc_source)( + struct bios_parser *bp, + struct bp_crtc_source_select *bp_params); enum bp_result (*dac1_encoder_control)( struct bios_parser *bp, bool enable, @@ -68,6 +71,9 @@ struct cmd_tbl { enum bp_result (*dac2_output_control)( struct bios_parser *bp, bool enable); + enum bp_result (*dac_load_detection)( + struct bios_parser *bp, + struct bp_load_detection_parameters *bp_params); enum bp_result (*set_crtc_timing)( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 9e63fa72101c..db687a13174d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -509,16 +509,16 @@ void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_b regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index b315ed91e010..3a881451e9da 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -40,7 +40,7 @@ #include "dm_helpers.h" #include "dc_dmub_srv.h" - +#include "reg_helper.h" #include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ @@ -48,9 +48,43 @@ #include "link_service.h" +#define MAX_INSTANCE 7 +#define MAX_SEGMENT 8 + +struct IP_BASE_INSTANCE { + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE { + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + +static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } }, + { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } }, + { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } }, + { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } }, + { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } }, + { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } }; + +#define regCLK1_CLK0_CURRENT_CNT 0x0314 +#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK1_CURRENT_CNT 0x0315 +#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK2_CURRENT_CNT 0x0316 +#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK3_CURRENT_CNT 0x0317 +#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK4_CURRENT_CNT 0x0318 +#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0 +#define regCLK1_CLK5_CURRENT_CNT 0x0319 +#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0 + #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) +#define REG(reg_name) \ + (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) + #define UNSUPPORTED_DCFCLK 10000000 #define MIN_DPP_DISP_CLK 100000 @@ -245,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } +static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + // read dtbclk + internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT); + + // read dcfclk + internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT); + + // read dppclk + internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT); + + // read dprefclk + internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT); + + // read dispclk + internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT); +} + static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { + struct dcn35_clk_internal internal = {0}; + + dcn315_dump_clk_registers_internal(&internal, clk_mgr_base); + + regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10; + regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10; + regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10; + regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10; + regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; return; } @@ -594,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn315_update_clocks, - .init_clocks = dcn31_init_clocks, + .init_clocks = dcn315_init_clocks, .enable_pme_wa = dcn315_enable_pme_wa, .are_clock_states_equal = dcn31_are_clock_states_equal, .notify_wm_ranges = dcn315_notify_wm_ranges }; extern struct clk_mgr_funcs dcn3_fpga_funcs; +void dcn315_init_clocks(struct clk_mgr *clk_mgr) +{ + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int); + struct clk_log_info log_info = {0}; + + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; + clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; + + dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info); + clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000; +} + void dcn315_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_dcn315 *clk_mgr, @@ -661,6 +743,7 @@ void dcn315_clk_mgr_construct( /* Saved clocks configured at boot for debug purposes */ dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); + clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000; clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h index ac36ddf5dd1a..642ae3d4a790 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h @@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg); +void dcn315_init_clocks(struct clk_mgr *clk_mgr); void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); #endif //__DCN315_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index b11383fba35f..35d20a663d67 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps); if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz) new_clocks->ref_dtbclk_khz = 600000; + else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000) + new_clocks->ref_dtbclk_khz = 0; /* * if it is safe to lower, but we are already in the lower state, we don't have to do anything @@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT); - if (actual_dtbclk) { + if (actual_dtbclk > 590000) { clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz; clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; } @@ -633,16 +635,16 @@ static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10; regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4) + if (regs_and_bypass->dppclk_bypass > 4) regs_and_bypass->dppclk_bypass = 0; regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4) + if (regs_and_bypass->dcfclk_bypass > 4) regs_and_bypass->dcfclk_bypass = 0; regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4) + if (regs_and_bypass->dispclk_bypass > 4) regs_and_bypass->dispclk_bypass = 0; regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007; - if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4) + if (regs_and_bypass->dprefclk_bypass > 4) regs_and_bypass->dprefclk_bypass = 0; if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5f2d5638c819..b720e007c654 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -83,7 +83,7 @@ #include "hw_sequencer_private.h" #if defined(CONFIG_DRM_AMD_DC_FP) -#include "dml2/dml2_internal_types.h" +#include "dml2_0/dml2_internal_types.h" #include "soc_and_ip_translator.h" #endif @@ -148,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build"; /* Private functions */ -static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) +static inline void elevate_update_type( + struct surface_update_descriptor *descriptor, + enum surface_update_type new_type, + enum dc_lock_descriptor new_locks +) { - if (new > *original) - *original = new; + if (new_type > descriptor->update_type) + descriptor->update_type = new_type; + + descriptor->lock_descriptor |= new_locks; } static void destroy_links(struct dc *dc) @@ -493,9 +499,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, 1, *adjust); stream->adjust.timing_adjust_pending = false; + + if (dc->hwss.notify_cursor_offload_drr_update) + dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream); + return true; } } + return false; } @@ -1143,8 +1154,8 @@ static bool dc_construct(struct dc *dc, /* set i2c speed if not done by the respective dcnxxx__resource.c */ if (dc->caps.i2c_speed_in_khz_hdcp == 0) dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; - if (dc->caps.max_optimizable_video_width == 0) - dc->caps.max_optimizable_video_width = 5120; + if (dc->check_config.max_optimizable_video_width == 0) + dc->check_config.max_optimizable_video_width = 5120; dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); if (!dc->clk_mgr) goto fail; @@ -2158,8 +2169,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c */ if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); if (dc->hwss.update_dsc_pg) dc->hwss.update_dsc_pg(dc, context, false); @@ -2188,8 +2199,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); } + for (i = 0; i < dc->current_state->stream_count; i++) + dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false); + result = dc->hwss.apply_ctx_to_hw(dc, context); + for (i = 0; i < context->stream_count; i++) + dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true); + if (result != DC_OK) { /* Application of dc_state to hardware stopped. */ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; @@ -2229,8 +2246,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc->hwss.commit_subvp_config(dc, context); if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; @@ -2645,47 +2662,49 @@ static bool is_surface_in_context( return false; } -static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u) +static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; - enum surface_update_type update_type = UPDATE_TYPE_FAST; + struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (!u->plane_info) - return UPDATE_TYPE_FAST; + return update_type; + + elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_PLANE); if (u->plane_info->color_space != u->surface->color_space) { update_flags->bits.color_space_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); } if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { update_flags->bits.horizontal_mirror_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); } if (u->plane_info->rotation != u->surface->rotation) { update_flags->bits.rotation_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } if (u->plane_info->format != u->surface->format) { update_flags->bits.pixel_format_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } if (u->plane_info->stereo_format != u->surface->stereo_format) { update_flags->bits.stereo_format_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { update_flags->bits.per_pixel_alpha_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); } if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { update_flags->bits.global_alpha_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); } if (u->plane_info->dcc.enable != u->surface->dcc.enable @@ -2697,7 +2716,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, * recalculate stutter period. */ update_flags->bits.dcc_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } if (resource_pixel_format_to_bpp(u->plane_info->format) != @@ -2706,30 +2725,41 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, * and DML calculation */ update_flags->bits.bpp_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { update_flags->bits.plane_size_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); } + const struct dc_tiling_info *tiling = &u->plane_info->tiling_info; - if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, - sizeof(struct dc_tiling_info)) != 0) { + if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) { update_flags->bits.swizzle_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_MED); + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); - /* todo: below are HW dependent, we should add a hook to - * DCE/N resource and validated there. - */ - if (!dc->debug.skip_full_updated_if_possible) { - /* swizzled mode requires RQ to be setup properly, - * thus need to run DML to calculate RQ settings - */ - update_flags->bits.bandwidth_change = 1; - elevate_update_type(&update_type, UPDATE_TYPE_FULL); + switch (tiling->gfxversion) { + case DcGfxVersion9: + case DcGfxVersion10: + case DcGfxVersion11: + if (tiling->gfx9.swizzle != DC_SW_LINEAR) { + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); + update_flags->bits.bandwidth_change = 1; + } + break; + case DcGfxAddr3: + if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) { + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); + update_flags->bits.bandwidth_change = 1; + } + break; + case DcGfxVersion7: + case DcGfxVersion8: + case DcGfxVersionUnknown: + default: + break; } } @@ -2737,14 +2767,17 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc, return update_type; } -static enum surface_update_type get_scaling_info_update_type( - const struct dc *dc, +static struct surface_update_descriptor get_scaling_info_update_type( + const struct dc_check_config *check_config, const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; + struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (!u->scaling_info) - return UPDATE_TYPE_FAST; + return update_type; + + elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_PLANE); if (u->scaling_info->src_rect.width != u->surface->src_rect.width || u->scaling_info->src_rect.height != u->surface->src_rect.height @@ -2768,7 +2801,7 @@ static enum surface_update_type get_scaling_info_update_type( /* Making dst rect smaller requires a bandwidth change */ update_flags->bits.bandwidth_change = 1; - if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width && + if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width && (u->scaling_info->clip_rect.width > u->surface->clip_rect.width || u->scaling_info->clip_rect.height > u->surface->clip_rect.height)) /* Changing clip size of a large surface may result in MPC slice count change */ @@ -2787,40 +2820,41 @@ static enum surface_update_type get_scaling_info_update_type( if (update_flags->bits.clock_change || update_flags->bits.bandwidth_change || update_flags->bits.scaling_change) - return UPDATE_TYPE_FULL; + elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); if (update_flags->bits.position_change) - return UPDATE_TYPE_MED; + elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); - return UPDATE_TYPE_FAST; + return update_type; } -static enum surface_update_type det_surface_update(const struct dc *dc, - const struct dc_surface_update *u) +static struct surface_update_descriptor det_surface_update( + const struct dc_check_config *check_config, + struct dc_surface_update *u) { - const struct dc_state *context = dc->current_state; - enum surface_update_type type; - enum surface_update_type overall_type = UPDATE_TYPE_FAST; + struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; union surface_update_flags *update_flags = &u->surface->update_flags; - if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { + if (u->surface->force_full_update) { update_flags->raw = 0xFFFFFFFF; - return UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); + return overall_type; } update_flags->raw = 0; // Reset all flags - type = get_plane_info_update_type(dc, u); - elevate_update_type(&overall_type, type); + struct surface_update_descriptor inner_type = get_plane_info_update_type(u); - type = get_scaling_info_update_type(dc, u); - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); + + inner_type = get_scaling_info_update_type(check_config, u); + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); if (u->flip_addr) { update_flags->bits.addr_update = 1; if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { update_flags->bits.tmz_changed = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_FULL); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } } if (u->in_transfer_func) @@ -2856,13 +2890,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->hdr_mult.value) if (u->hdr_mult.value != u->surface->hdr_mult.value) { update_flags->bits.hdr_mult = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_MED); + // TODO: Should be fast? + elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); } if (u->sdr_white_level_nits) if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) { update_flags->bits.sdr_white_level_nits = 1; - elevate_update_type(&overall_type, UPDATE_TYPE_FULL); + // TODO: Should be fast? + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } if (u->cm2_params) { @@ -2876,27 +2912,24 @@ static enum surface_update_type det_surface_update(const struct dc *dc, update_flags->bits.mcm_transfer_function_enable_change = 1; } if (update_flags->bits.in_transfer_func_change) { - type = UPDATE_TYPE_MED; - elevate_update_type(&overall_type, type); + // TODO: Fast? + elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STATE); } if (update_flags->bits.lut_3d && u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } if (update_flags->bits.mcm_transfer_function_enable_change) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } - if (dc->debug.enable_legacy_fast_update && + if (check_config->enable_legacy_fast_update && (update_flags->bits.gamma_change || update_flags->bits.gamut_remap_change || update_flags->bits.input_csc_change || update_flags->bits.coeff_reduction_change)) { - type = UPDATE_TYPE_FULL; - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } return overall_type; } @@ -2924,40 +2957,34 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda } } -static enum surface_update_type check_update_surfaces_for_stream( - struct dc *dc, +static struct surface_update_descriptor check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status) + struct dc_stream_update *stream_update) { - int i; - enum surface_update_type overall_type = UPDATE_TYPE_FAST; - - if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc)) - overall_type = UPDATE_TYPE_FULL; - - if (stream_status == NULL || stream_status->plane_count != surface_count) - overall_type = UPDATE_TYPE_FULL; + struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; if (stream_update && stream_update->pending_test_pattern) { - overall_type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } if (stream_update && stream_update->hw_cursor_req) { - overall_type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); } /* some stream updates require passive update */ if (stream_update) { union stream_update_flags *su_flags = &stream_update->stream->update_flags; + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); + if ((stream_update->src.height != 0 && stream_update->src.width != 0) || (stream_update->dst.height != 0 && stream_update->dst.width != 0) || stream_update->integer_scaling_update) su_flags->bits.scaling = 1; - if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; if (stream_update->abm_level) @@ -2993,7 +3020,7 @@ static enum surface_update_type check_update_surfaces_for_stream( su_flags->bits.out_csc = 1; if (su_flags->raw != 0) - overall_type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STATE); if (stream_update->output_csc_transform) su_flags->bits.out_csc = 1; @@ -3001,15 +3028,15 @@ static enum surface_update_type check_update_surfaces_for_stream( /* Output transfer function changes do not require bandwidth recalculation, * so don't trigger a full update */ - if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func) + if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) su_flags->bits.out_tf = 1; } - for (i = 0 ; i < surface_count; i++) { - enum surface_update_type type = - det_surface_update(dc, &updates[i]); + for (int i = 0 ; i < surface_count; i++) { + struct surface_update_descriptor inner_type = + det_surface_update(check_config, &updates[i]); - elevate_update_type(&overall_type, type); + elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); } return overall_type; @@ -3020,44 +3047,18 @@ static enum surface_update_type check_update_surfaces_for_stream( * * See :c:type:`enum surface_update_type ` for explanation of update types */ -enum surface_update_type dc_check_update_surfaces_for_stream( - struct dc *dc, +struct surface_update_descriptor dc_check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status) + struct dc_stream_update *stream_update) { - int i; - enum surface_update_type type; - if (stream_update) stream_update->stream->update_flags.raw = 0; - for (i = 0; i < surface_count; i++) + for (size_t i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0; - type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); - if (type == UPDATE_TYPE_FULL) { - if (stream_update) { - uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; - stream_update->stream->update_flags.raw = 0xFFFFFFFF; - stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; - } - for (i = 0; i < surface_count; i++) - updates[i].surface->update_flags.raw = 0xFFFFFFFF; - } - - if (type == UPDATE_TYPE_FAST) { - // If there's an available clock comparator, we use that. - if (dc->clk_mgr->funcs->are_clock_states_equal) { - if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) - dc->optimized_required = true; - // Else we fallback to mem compare. - } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { - dc->optimized_required = true; - } - } - - return type; + return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update); } static struct dc_stream_status *stream_get_status( @@ -3426,6 +3427,13 @@ static void update_seamless_boot_flags(struct dc *dc, } } +static bool full_update_required_weak( + const struct dc *dc, + const struct dc_surface_update *srf_updates, + int surface_count, + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream); + /** * update_planes_and_stream_state() - The function takes planes and stream * updates as inputs and determines the appropriate update type. If update type @@ -3472,7 +3480,10 @@ static bool update_planes_and_stream_state(struct dc *dc, context = dc->current_state; update_type = dc_check_update_surfaces_for_stream( - dc, srf_updates, surface_count, stream_update, stream_status); + &dc->check_config, srf_updates, surface_count, stream_update).update_type; + if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) + update_type = UPDATE_TYPE_FULL; + /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. @@ -3504,6 +3515,16 @@ static bool update_planes_and_stream_state(struct dc *dc, } } + if (update_type == UPDATE_TYPE_FULL) { + if (stream_update) { + uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; + stream_update->stream->update_flags.raw = 0xFFFFFFFF; + stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; + } + for (i = 0; i < surface_count; i++) + srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF; + } + if (update_type >= update_surface_trace_level) update_surface_trace(dc, srf_updates, surface_count); @@ -4149,7 +4170,7 @@ static void commit_planes_for_stream(struct dc *dc, if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) if (top_pipe_to_program && top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (should_use_dmub_lock(stream->link)) { + if (should_use_dmub_inbox1_lock(dc, stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -4176,16 +4197,16 @@ static void commit_planes_for_stream(struct dc *dc, if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); dc->hwss.interdependent_update_lock(dc, context, true); } else { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, true); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, true); /* Lock the top pipe while updating plane addrs, since freesync requires * plane addr update event triggers to be synchronized. @@ -4228,9 +4249,8 @@ static void commit_planes_for_stream(struct dc *dc, dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); - + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); return; } @@ -4419,7 +4439,7 @@ static void commit_planes_for_stream(struct dc *dc, top_pipe_to_program->stream_res.tg, CRTC_STATE_VACTIVE); - if (should_use_dmub_lock(stream->link)) { + if (should_use_dmub_inbox1_lock(dc, stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -4467,13 +4487,13 @@ static void commit_planes_for_stream(struct dc *dc, if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); } else { if (dc->hwss.subvp_pipe_control_lock) dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); - if (dc->hwss.fams2_global_control_lock) - dc->hwss.fams2_global_control_lock(dc, context, false); + if (dc->hwss.dmub_hw_control_lock) + dc->hwss.dmub_hw_control_lock(dc, context, false); } // Fire manual trigger only when bottom plane is flipped @@ -4489,6 +4509,8 @@ static void commit_planes_for_stream(struct dc *dc, pipe_ctx->plane_state->skip_manual_trigger) continue; + if (dc->hwss.program_cursor_offload_now) + dc->hwss.program_cursor_offload_now(dc, pipe_ctx); if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); } @@ -4994,7 +5016,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update, } } -static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count) +static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count) { int i; @@ -5035,18 +5057,44 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_ return false; } -static bool full_update_required(struct dc *dc, - struct dc_surface_update *srf_updates, +static bool full_update_required_weak( + const struct dc *dc, + const struct dc_surface_update *srf_updates, int surface_count, - struct dc_stream_update *stream_update, - struct dc_stream_state *stream) + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) { - - int i; - struct dc_stream_status *stream_status; const struct dc_state *context = dc->current_state; + if (srf_updates) + for (int i = 0; i < surface_count; i++) + if (!is_surface_in_context(context, srf_updates[i].surface)) + return true; - for (i = 0; i < surface_count; i++) { + if (stream) { + const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream); + if (stream_status == NULL || stream_status->plane_count != surface_count) + return true; + } + if (dc->idle_optimizations_allowed) + return true; + + if (dc_can_clear_cursor_limit(dc)) + return true; + + return false; +} + +static bool full_update_required( + const struct dc *dc, + const struct dc_surface_update *srf_updates, + int surface_count, + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) +{ + if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) + return true; + + for (int i = 0; i < surface_count; i++) { if (srf_updates && (srf_updates[i].plane_info || srf_updates[i].scaling_info || @@ -5062,8 +5110,7 @@ static bool full_update_required(struct dc *dc, srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || (srf_updates[i].cm2_params && (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting || - srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) || - !is_surface_in_context(context, srf_updates[i].surface))) + srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)))) return true; } @@ -5099,26 +5146,16 @@ static bool full_update_required(struct dc *dc, stream_update->hw_cursor_req)) return true; - if (stream) { - stream_status = dc_stream_get_status(stream); - if (stream_status == NULL || stream_status->plane_count != surface_count) - return true; - } - if (dc->idle_optimizations_allowed) - return true; - - if (dc_can_clear_cursor_limit(dc)) - return true; - return false; } -static bool fast_update_only(struct dc *dc, - struct dc_fast_update *fast_update, - struct dc_surface_update *srf_updates, +static bool fast_update_only( + const struct dc *dc, + const struct dc_fast_update *fast_update, + const struct dc_surface_update *srf_updates, int surface_count, - struct dc_stream_update *stream_update, - struct dc_stream_state *stream) + const struct dc_stream_update *stream_update, + const struct dc_stream_state *stream) { return fast_updates_exist(fast_update, surface_count) && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); @@ -5181,7 +5218,7 @@ static bool update_planes_and_stream_v2(struct dc *dc, commit_minimal_transition_state_in_dc_update(dc, context, stream, srf_updates, surface_count); - if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { + if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, srf_updates, surface_count, @@ -5224,7 +5261,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc, stream_update); if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && - !dc->debug.enable_legacy_fast_update) + !dc->check_config.enable_legacy_fast_update) commit_planes_for_stream_fast(dc, srf_updates, surface_count, @@ -5350,7 +5387,8 @@ bool dc_update_planes_and_stream(struct dc *dc, * specially handle compatibility problems with transitions among those * features as they are now transparent to the new sequence. */ - if (dc->ctx->dce_version >= DCN_VERSION_4_01) + if (dc->ctx->dce_version >= DCN_VERSION_4_01 || dc->ctx->dce_version == DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_21) ret = update_planes_and_stream_v3(dc, srf_updates, surface_count, stream, stream_update); else @@ -6349,7 +6387,7 @@ bool dc_is_cursor_limit_pending(struct dc *dc) return false; } -bool dc_can_clear_cursor_limit(struct dc *dc) +bool dc_can_clear_cursor_limit(const struct dc *dc) { uint32_t i; @@ -6378,3 +6416,8 @@ void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, if (dc->hwss.get_underflow_debug_data) dc->hwss.get_underflow_debug_data(dc, tg, out_data); } + +void dc_log_preos_dmcub_info(const struct dc *dc) +{ + dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index d82b1cb467f4..f95cb0cf4b8a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -32,6 +32,12 @@ #include "resource.h" #include "dc_dmub_srv.h" #include "dc_state_priv.h" +#include "opp.h" +#include "dsc.h" +#include "dchubbub.h" +#include "dccg.h" +#include "abm.h" +#include "dcn10/dcn10_hubbub.h" #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) #define MAX_NUM_MCACHE 8 @@ -755,11 +761,11 @@ void hwss_build_fast_sequence(struct dc *dc, block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } - if (dc->hwss.fams2_global_control_lock_fast) { - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); - block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST; + if (dc->hwss.dmub_hw_control_lock_fast) { + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = true; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); + block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST; (*num_steps)++; } if (dc->hwss.pipe_control_lock) { @@ -784,7 +790,7 @@ void hwss_build_fast_sequence(struct dc *dc, while (current_mpc_pipe) { if (current_mpc_pipe->plane_state) { if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) { - block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].params.set_flip_control_gsl_params.hubp = current_mpc_pipe->plane_res.hubp; block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate; block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; (*num_steps)++; @@ -894,11 +900,11 @@ void hwss_build_fast_sequence(struct dc *dc, block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; (*num_steps)++; } - if (dc->hwss.fams2_global_control_lock_fast) { - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false; - block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); - block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST; + if (dc->hwss.dmub_hw_control_lock_fast) { + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = false; + block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); + block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST; (*num_steps)++; } @@ -911,6 +917,13 @@ void hwss_build_fast_sequence(struct dc *dc, current_mpc_pipe->stream && current_mpc_pipe->plane_state && current_mpc_pipe->plane_state->update_flags.bits.addr_update && !current_mpc_pipe->plane_state->skip_manual_trigger) { + if (dc->hwss.program_cursor_offload_now) { + block_sequence[*num_steps].params.program_cursor_update_now_params.dc = dc; + block_sequence[*num_steps].params.program_cursor_update_now_params.pipe_ctx = current_mpc_pipe; + block_sequence[*num_steps].func = PROGRAM_CURSOR_UPDATE_NOW; + (*num_steps)++; + } + block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe; block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; (*num_steps)++; @@ -942,8 +955,9 @@ void hwss_execute_sequence(struct dc *dc, params->pipe_control_lock_params.lock); break; case HUBP_SET_FLIP_CONTROL_GSL: - dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx, - params->set_flip_control_gsl_params.flip_immediate); + params->set_flip_control_gsl_params.hubp->funcs->hubp_set_flip_control_surface_gsl( + params->set_flip_control_gsl_params.hubp, + params->set_flip_control_gsl_params.flip_immediate); break; case HUBP_PROGRAM_TRIPLEBUFFER: dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc, @@ -1001,8 +1015,298 @@ void hwss_execute_sequence(struct dc *dc, params->wait_for_dcc_meta_propagation_params.dc, params->wait_for_dcc_meta_propagation_params.top_pipe_to_program); break; - case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST: - dc->hwss.fams2_global_control_lock_fast(params); + case DMUB_HW_CONTROL_LOCK_FAST: + dc->hwss.dmub_hw_control_lock_fast(params); + break; + case HUBP_PROGRAM_SURFACE_CONFIG: + hwss_program_surface_config(params); + break; + case HUBP_PROGRAM_MCACHE_ID: + hwss_program_mcache_id_and_split_coordinate(params); + break; + case PROGRAM_CURSOR_UPDATE_NOW: + dc->hwss.program_cursor_offload_now( + params->program_cursor_update_now_params.dc, + params->program_cursor_update_now_params.pipe_ctx); + break; + case HUBP_WAIT_PIPE_READ_START: + params->hubp_wait_pipe_read_start_params.hubp->funcs->hubp_wait_pipe_read_start( + params->hubp_wait_pipe_read_start_params.hubp); + break; + case HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM: + dc->hwss.apply_update_flags_for_phantom(params->apply_update_flags_for_phantom_params.pipe_ctx); + break; + case HWS_UPDATE_PHANTOM_VP_POSITION: + dc->hwss.update_phantom_vp_position(params->update_phantom_vp_position_params.dc, + params->update_phantom_vp_position_params.context, + params->update_phantom_vp_position_params.pipe_ctx); + break; + case OPTC_SET_ODM_COMBINE: + hwss_set_odm_combine(params); + break; + case OPTC_SET_ODM_BYPASS: + hwss_set_odm_bypass(params); + break; + case OPP_PIPE_CLOCK_CONTROL: + hwss_opp_pipe_clock_control(params); + break; + case OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL: + hwss_opp_program_left_edge_extra_pixel(params); + break; + case DCCG_SET_DTO_DSCCLK: + hwss_dccg_set_dto_dscclk(params); + break; + case DSC_SET_CONFIG: + hwss_dsc_set_config(params); + break; + case DSC_ENABLE: + hwss_dsc_enable(params); + break; + case TG_SET_DSC_CONFIG: + hwss_tg_set_dsc_config(params); + break; + case DSC_DISCONNECT: + hwss_dsc_disconnect(params); + break; + case DSC_READ_STATE: + hwss_dsc_read_state(params); + break; + case DSC_CALCULATE_AND_SET_CONFIG: + hwss_dsc_calculate_and_set_config(params); + break; + case DSC_ENABLE_WITH_OPP: + hwss_dsc_enable_with_opp(params); + break; + case TG_PROGRAM_GLOBAL_SYNC: + hwss_tg_program_global_sync(params); + break; + case TG_WAIT_FOR_STATE: + hwss_tg_wait_for_state(params); + break; + case TG_SET_VTG_PARAMS: + hwss_tg_set_vtg_params(params); + break; + case TG_SETUP_VERTICAL_INTERRUPT2: + hwss_tg_setup_vertical_interrupt2(params); + break; + case DPP_SET_HDR_MULTIPLIER: + hwss_dpp_set_hdr_multiplier(params); + break; + case HUBP_PROGRAM_DET_SIZE: + hwss_program_det_size(params); + break; + case HUBP_PROGRAM_DET_SEGMENTS: + hwss_program_det_segments(params); + break; + case OPP_SET_DYN_EXPANSION: + hwss_opp_set_dyn_expansion(params); + break; + case OPP_PROGRAM_FMT: + hwss_opp_program_fmt(params); + break; + case OPP_PROGRAM_BIT_DEPTH_REDUCTION: + hwss_opp_program_bit_depth_reduction(params); + break; + case OPP_SET_DISP_PATTERN_GENERATOR: + hwss_opp_set_disp_pattern_generator(params); + break; + case ABM_SET_PIPE: + hwss_set_abm_pipe(params); + break; + case ABM_SET_LEVEL: + hwss_set_abm_level(params); + break; + case ABM_SET_IMMEDIATE_DISABLE: + hwss_set_abm_immediate_disable(params); + break; + case MPC_REMOVE_MPCC: + hwss_mpc_remove_mpcc(params); + break; + case OPP_SET_MPCC_DISCONNECT_PENDING: + hwss_opp_set_mpcc_disconnect_pending(params); + break; + case DC_SET_OPTIMIZED_REQUIRED: + hwss_dc_set_optimized_required(params); + break; + case HUBP_DISCONNECT: + hwss_hubp_disconnect(params); + break; + case HUBBUB_FORCE_PSTATE_CHANGE_CONTROL: + hwss_hubbub_force_pstate_change_control(params); + break; + case TG_ENABLE_CRTC: + hwss_tg_enable_crtc(params); + break; + case TG_SET_GSL: + hwss_tg_set_gsl(params); + break; + case TG_SET_GSL_SOURCE_SELECT: + hwss_tg_set_gsl_source_select(params); + break; + case HUBP_WAIT_FLIP_PENDING: + hwss_hubp_wait_flip_pending(params); + break; + case TG_WAIT_DOUBLE_BUFFER_PENDING: + hwss_tg_wait_double_buffer_pending(params); + break; + case UPDATE_FORCE_PSTATE: + hwss_update_force_pstate(params); + break; + case HUBBUB_APPLY_DEDCN21_147_WA: + hwss_hubbub_apply_dedcn21_147_wa(params); + break; + case HUBBUB_ALLOW_SELF_REFRESH_CONTROL: + hwss_hubbub_allow_self_refresh_control(params); + break; + case TG_GET_FRAME_COUNT: + hwss_tg_get_frame_count(params); + break; + case MPC_SET_DWB_MUX: + hwss_mpc_set_dwb_mux(params); + break; + case MPC_DISABLE_DWB_MUX: + hwss_mpc_disable_dwb_mux(params); + break; + case MCIF_WB_CONFIG_BUF: + hwss_mcif_wb_config_buf(params); + break; + case MCIF_WB_CONFIG_ARB: + hwss_mcif_wb_config_arb(params); + break; + case MCIF_WB_ENABLE: + hwss_mcif_wb_enable(params); + break; + case MCIF_WB_DISABLE: + hwss_mcif_wb_disable(params); + break; + case DWBC_ENABLE: + hwss_dwbc_enable(params); + break; + case DWBC_DISABLE: + hwss_dwbc_disable(params); + break; + case DWBC_UPDATE: + hwss_dwbc_update(params); + break; + case HUBP_UPDATE_MALL_SEL: + hwss_hubp_update_mall_sel(params); + break; + case HUBP_PREPARE_SUBVP_BUFFERING: + hwss_hubp_prepare_subvp_buffering(params); + break; + case HUBP_SET_BLANK_EN: + hwss_hubp_set_blank_en(params); + break; + case HUBP_DISABLE_CONTROL: + hwss_hubp_disable_control(params); + break; + case HUBBUB_SOFT_RESET: + hwss_hubbub_soft_reset(params); + break; + case HUBP_CLK_CNTL: + hwss_hubp_clk_cntl(params); + break; + case HUBP_INIT: + hwss_hubp_init(params); + break; + case HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS: + hwss_hubp_set_vm_system_aperture_settings(params); + break; + case HUBP_SET_FLIP_INT: + hwss_hubp_set_flip_int(params); + break; + case DPP_DPPCLK_CONTROL: + hwss_dpp_dppclk_control(params); + break; + case DISABLE_PHANTOM_CRTC: + hwss_disable_phantom_crtc(params); + break; + case DSC_PG_STATUS: + hwss_dsc_pg_status(params); + break; + case DSC_WAIT_DISCONNECT_PENDING_CLEAR: + hwss_dsc_wait_disconnect_pending_clear(params); + break; + case DSC_DISABLE: + hwss_dsc_disable(params); + break; + case DCCG_SET_REF_DSCCLK: + hwss_dccg_set_ref_dscclk(params); + break; + case DPP_PG_CONTROL: + hwss_dpp_pg_control(params); + break; + case HUBP_PG_CONTROL: + hwss_hubp_pg_control(params); + break; + case HUBP_RESET: + hwss_hubp_reset(params); + break; + case DPP_RESET: + hwss_dpp_reset(params); + break; + case DPP_ROOT_CLOCK_CONTROL: + hwss_dpp_root_clock_control(params); + break; + case DC_IP_REQUEST_CNTL: + hwss_dc_ip_request_cntl(params); + break; + case DCCG_UPDATE_DPP_DTO: + hwss_dccg_update_dpp_dto(params); + break; + case HUBP_VTG_SEL: + hwss_hubp_vtg_sel(params); + break; + case HUBP_SETUP2: + hwss_hubp_setup2(params); + break; + case HUBP_SETUP: + hwss_hubp_setup(params); + break; + case HUBP_SET_UNBOUNDED_REQUESTING: + hwss_hubp_set_unbounded_requesting(params); + break; + case HUBP_SETUP_INTERDEPENDENT2: + hwss_hubp_setup_interdependent2(params); + break; + case HUBP_SETUP_INTERDEPENDENT: + hwss_hubp_setup_interdependent(params); + break; + case DPP_SET_CURSOR_MATRIX: + hwss_dpp_set_cursor_matrix(params); + break; + case MPC_UPDATE_BLENDING: + hwss_mpc_update_blending(params); + break; + case MPC_ASSERT_IDLE_MPCC: + hwss_mpc_assert_idle_mpcc(params); + break; + case MPC_INSERT_PLANE: + hwss_mpc_insert_plane(params); + break; + case DPP_SET_SCALER: + hwss_dpp_set_scaler(params); + break; + case HUBP_MEM_PROGRAM_VIEWPORT: + hwss_hubp_mem_program_viewport(params); + break; + case SET_CURSOR_ATTRIBUTE: + hwss_set_cursor_attribute(params); + break; + case SET_CURSOR_POSITION: + hwss_set_cursor_position(params); + break; + case SET_CURSOR_SDR_WHITE_LEVEL: + hwss_set_cursor_sdr_white_level(params); + break; + case PROGRAM_OUTPUT_CSC: + hwss_program_output_csc(params); + break; + case HUBP_SET_BLANK: + hwss_hubp_set_blank(params); + break; + case PHANTOM_HUBP_POST_ENABLE: + hwss_phantom_hubp_post_enable(params); break; default: ASSERT(false); @@ -1011,6 +1315,338 @@ void hwss_execute_sequence(struct dc *dc, } } +/** + * Helper function to add OPTC pipe control lock to block sequence + */ +void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool lock) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.lock = lock; + seq_state->steps[*seq_state->num_steps].func = OPTC_PIPE_CONTROL_LOCK; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP set flip control GSL to block sequence + */ +void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool flip_immediate) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.flip_immediate = flip_immediate; + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_CONTROL_GSL; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP program triplebuffer to block sequence + */ +void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enableTripleBuffer) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.enableTripleBuffer = enableTripleBuffer; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP update plane address to block sequence + */ +void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_PLANE_ADDR; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add DPP set input transfer function to block sequence + */ +void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_plane_state *plane_state) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.plane_state = plane_state; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add DPP program gamut remap to block sequence + */ +void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_gamut_remap_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_GAMUT_REMAP; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add DPP program bias and scale to block sequence + */ +void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_bias_and_scale_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add OPTC program manual trigger to block sequence + */ +void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_manual_trigger_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add DPP set output transfer function to block sequence + */ +void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_stream_state *stream) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.stream = stream; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add MPC update visual confirm to block sequence + */ +void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.mpcc_id = mpcc_id; + seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_VISUAL_CONFIRM; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add MPC power on MPC mem PWR to block sequence + */ +void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = mpcc_id; + seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.power_on = power_on; + seq_state->steps[*seq_state->num_steps].func = MPC_POWER_ON_MPC_MEM_PWR; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add MPC set output CSC to block sequence + */ +void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int opp_id, + const uint16_t *regval, + enum mpc_output_csc_mode ocsc_mode) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.opp_id = opp_id; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.regval = regval; + seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.ocsc_mode = ocsc_mode; + seq_state->steps[*seq_state->num_steps].func = MPC_SET_OUTPUT_CSC; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add MPC set OCSC default to block sequence + */ +void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state, + struct mpc *mpc, + int opp_id, + enum dc_color_space colorspace, + enum mpc_output_csc_mode ocsc_mode) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.opp_id = opp_id; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.color_space = colorspace; + seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.ocsc_mode = ocsc_mode; + seq_state->steps[*seq_state->num_steps].func = MPC_SET_OCSC_DEFAULT; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add DMUB send DMCUB command to block sequence + */ +void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state, + struct dc_context *ctx, + union dmub_rb_cmd *cmd, + enum dm_dmub_wait_type wait_type) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.ctx = ctx; + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.cmd = cmd; + seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.wait_type = wait_type; + seq_state->steps[*seq_state->num_steps].func = DMUB_SEND_DMCUB_CMD; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add DMUB SubVP save surface address to block sequence + */ +void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state, + struct dc_dmub_srv *dc_dmub_srv, + struct dc_plane_address *addr, + uint8_t subvp_index) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc_dmub_srv; + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.addr = addr; + seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.subvp_index = subvp_index; + seq_state->steps[*seq_state->num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP wait for DCC meta propagation to block sequence + */ +void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *top_pipe_to_program) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = top_pipe_to_program; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP wait pipe read start to block sequence + */ +void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubp_wait_pipe_read_start_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_PIPE_READ_START; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HWS apply update flags for phantom to block sequence + */ +void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.apply_update_flags_for_phantom_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HWS update phantom VP position to block sequence + */ +void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.context = context; + seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = HWS_UPDATE_PHANTOM_VP_POSITION; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add OPTC set ODM combine to block sequence + */ +void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state, + struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count, + int odm_slice_width, int last_odm_slice_width) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.tg = tg; + memcpy(seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_inst, opp_inst, sizeof(int) * MAX_PIPES); + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_head_count = opp_head_count; + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.odm_slice_width = odm_slice_width; + seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.last_odm_slice_width = last_odm_slice_width; + seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_COMBINE; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add OPTC set ODM bypass to block sequence + */ +void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dc_crtc_timing *timing) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.timing = timing; + seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_BYPASS; + (*seq_state->num_steps)++; + } +} + void hwss_send_dmcub_cmd(union block_sequence_params *params) { struct dc_context *ctx = params->send_dmcub_cmd_params.ctx; @@ -1020,6 +1656,276 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params) dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type); } +/** + * Helper function to add TG program global sync to block sequence + */ +void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int vready_offset, + unsigned int vstartup_lines, + unsigned int vupdate_offset_pixels, + unsigned int vupdate_vupdate_width_pixels, + unsigned int pstate_keepout_start_lines) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vready_offset = vready_offset; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vstartup_lines = vstartup_lines; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_offset_pixels = vupdate_offset_pixels; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_vupdate_width_pixels = vupdate_vupdate_width_pixels; + seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.pstate_keepout_start_lines = pstate_keepout_start_lines; + seq_state->steps[*seq_state->num_steps].func = TG_PROGRAM_GLOBAL_SYNC; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add TG wait for state to block sequence + */ +void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state, + struct timing_generator *tg, + enum crtc_state state) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.state = state; + seq_state->steps[*seq_state->num_steps].func = TG_WAIT_FOR_STATE; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add TG set VTG params to block sequence + */ +void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct dc_crtc_timing *dc_crtc_timing, + bool program_fp2) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.timing = dc_crtc_timing; + seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.program_fp2 = program_fp2; + seq_state->steps[*seq_state->num_steps].func = TG_SET_VTG_PARAMS; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add TG setup vertical interrupt2 to block sequence + */ +void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state, + struct timing_generator *tg, int start_line) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.start_line = start_line; + seq_state->steps[*seq_state->num_steps].func = TG_SETUP_VERTICAL_INTERRUPT2; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add DPP set HDR multiplier to block sequence + */ +void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state, + struct dpp *dpp, uint32_t hw_mult) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.hw_mult = hw_mult; + seq_state->steps[*seq_state->num_steps].func = DPP_SET_HDR_MULTIPLIER; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP program DET size to block sequence + */ +void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + unsigned int hubp_inst, + unsigned int det_buffer_size_kb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.program_det_size_params.det_buffer_size_kb = det_buffer_size_kb; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SIZE; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_hubp_pipe_mcache_regs *mcache_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.mcache_regs = mcache_regs; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_MCACHE_ID; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool enable, + bool wait) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.enable = enable; + seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.wait = wait; + seq_state->steps[*seq_state->num_steps].func = HUBBUB_FORCE_PSTATE_CHANGE_CONTROL; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP program DET segments to block sequence + */ +void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + unsigned int hubp_inst, + unsigned int det_size) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.det_size = det_size; + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SEGMENTS; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add OPP set dynamic expansion to block sequence + */ +void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_color_space color_space, + enum dc_color_depth color_depth, + enum signal_type signal) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_depth = color_depth; + seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.signal = signal; + seq_state->steps[*seq_state->num_steps].func = OPP_SET_DYN_EXPANSION; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add OPP program FMT to block sequence + */ +void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.fmt_bit_depth = fmt_bit_depth; + seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.clamping = clamping; + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_FMT; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, + bool is_otg_master) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.pixel_encoding = pixel_encoding; + seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.is_otg_master = is_otg_master; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add ABM set pipe to block sequence + */ +void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].func = ABM_SET_PIPE; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add ABM set level to block sequence + */ +void hwss_add_abm_set_level(struct block_sequence_state *seq_state, + struct abm *abm, + uint32_t abm_level) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm = abm; + seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm_level = abm_level; + seq_state->steps[*seq_state->num_steps].func = ABM_SET_LEVEL; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add TG enable CRTC to block sequence + */ +void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_enable_crtc_params.tg = tg; + seq_state->steps[*seq_state->num_steps].func = TG_ENABLE_CRTC; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP wait flip pending to block sequence + */ +void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state, + struct hubp *hubp, + unsigned int timeout_us, + unsigned int polling_interval_us) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.timeout_us = timeout_us; + seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.polling_interval_us = polling_interval_us; + seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FLIP_PENDING; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add TG wait double buffer pending to block sequence + */ +void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int timeout_us, + unsigned int polling_interval_us) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.timeout_us = timeout_us; + seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.polling_interval_us = polling_interval_us; + seq_state->steps[*seq_state->num_steps].func = TG_WAIT_DOUBLE_BUFFER_PENDING; + (*seq_state->num_steps)++; + } +} + void hwss_program_manual_trigger(union block_sequence_params *params) { struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx; @@ -1046,12 +1952,6 @@ void hwss_setup_dpp(union block_sequence_params *params) plane_state->color_space, NULL); } - - if (dpp && dpp->funcs->set_cursor_matrix) { - dpp->funcs->set_cursor_matrix(dpp, - plane_state->color_space, - plane_state->cursor_csc_color_matrix); - } } void hwss_program_bias_and_scale(union block_sequence_params *params) @@ -1062,9 +1962,8 @@ void hwss_program_bias_and_scale(union block_sequence_params *params) struct dc_bias_and_scale bns_params = plane_state->bias_and_scale; //TODO :for CNVC set scale and bias registers if necessary - if (dpp->funcs->dpp_program_bias_and_scale) { + if (dpp->funcs->dpp_program_bias_and_scale) dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); - } } void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params) @@ -1114,6 +2013,39 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params) dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index); } +void hwss_program_surface_config(union block_sequence_params *params) +{ + struct hubp *hubp = params->program_surface_config_params.hubp; + enum surface_pixel_format format = params->program_surface_config_params.format; + struct dc_tiling_info *tiling_info = params->program_surface_config_params.tiling_info; + struct plane_size size = params->program_surface_config_params.plane_size; + enum dc_rotation_angle rotation = params->program_surface_config_params.rotation; + struct dc_plane_dcc_param *dcc = params->program_surface_config_params.dcc; + bool horizontal_mirror = params->program_surface_config_params.horizontal_mirror; + int compat_level = params->program_surface_config_params.compat_level; + + hubp->funcs->hubp_program_surface_config( + hubp, + format, + tiling_info, + &size, + rotation, + dcc, + horizontal_mirror, + compat_level); + + hubp->power_gated = false; +} + +void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params) +{ + struct hubp *hubp = params->program_mcache_id_and_split_coordinate.hubp; + struct dml2_hubp_pipe_mcache_regs *mcache_regs = params->program_mcache_id_and_split_coordinate.mcache_regs; + + hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, mcache_regs); + +} + void get_surface_tile_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color) @@ -1188,6 +2120,7 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; + for (i = 0; i < MAX_PIPES; i++) { int count = 0; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; @@ -1264,3 +2197,1848 @@ void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_cont if (dc->hwss.program_outstanding_updates) dc->hwss.program_outstanding_updates(dc, dc_context); } + +void hwss_set_odm_combine(union block_sequence_params *params) +{ + struct timing_generator *tg = params->set_odm_combine_params.tg; + int *opp_inst = params->set_odm_combine_params.opp_inst; + int opp_head_count = params->set_odm_combine_params.opp_head_count; + int odm_slice_width = params->set_odm_combine_params.odm_slice_width; + int last_odm_slice_width = params->set_odm_combine_params.last_odm_slice_width; + + if (tg && tg->funcs->set_odm_combine) + tg->funcs->set_odm_combine(tg, opp_inst, opp_head_count, + odm_slice_width, last_odm_slice_width); +} + +void hwss_set_odm_bypass(union block_sequence_params *params) +{ + struct timing_generator *tg = params->set_odm_bypass_params.tg; + const struct dc_crtc_timing *timing = params->set_odm_bypass_params.timing; + + if (tg && tg->funcs->set_odm_bypass) + tg->funcs->set_odm_bypass(tg, timing); +} + +void hwss_opp_pipe_clock_control(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_pipe_clock_control_params.opp; + bool enable = params->opp_pipe_clock_control_params.enable; + + if (opp && opp->funcs->opp_pipe_clock_control) + opp->funcs->opp_pipe_clock_control(opp, enable); +} + +void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_left_edge_extra_pixel_params.opp; + enum dc_pixel_encoding pixel_encoding = params->opp_program_left_edge_extra_pixel_params.pixel_encoding; + bool is_otg_master = params->opp_program_left_edge_extra_pixel_params.is_otg_master; + + if (opp && opp->funcs->opp_program_left_edge_extra_pixel) + opp->funcs->opp_program_left_edge_extra_pixel(opp, pixel_encoding, is_otg_master); +} + +void hwss_dccg_set_dto_dscclk(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_set_dto_dscclk_params.dccg; + int inst = params->dccg_set_dto_dscclk_params.inst; + int num_slices_h = params->dccg_set_dto_dscclk_params.num_slices_h; + + if (dccg && dccg->funcs->set_dto_dscclk) + dccg->funcs->set_dto_dscclk(dccg, inst, num_slices_h); +} + +void hwss_dsc_set_config(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_set_config_params.dsc; + struct dsc_config *dsc_cfg = params->dsc_set_config_params.dsc_cfg; + struct dsc_optc_config *dsc_optc_cfg = params->dsc_set_config_params.dsc_optc_cfg; + + if (dsc && dsc->funcs->dsc_set_config) + dsc->funcs->dsc_set_config(dsc, dsc_cfg, dsc_optc_cfg); +} + +void hwss_dsc_enable(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_enable_params.dsc; + int opp_inst = params->dsc_enable_params.opp_inst; + + if (dsc && dsc->funcs->dsc_enable) + dsc->funcs->dsc_enable(dsc, opp_inst); +} + +void hwss_tg_set_dsc_config(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_dsc_config_params.tg; + enum optc_dsc_mode optc_dsc_mode = OPTC_DSC_DISABLED; + uint32_t bytes_per_pixel = 0; + uint32_t slice_width = 0; + + if (params->tg_set_dsc_config_params.enable) { + struct dsc_optc_config *dsc_optc_cfg = params->tg_set_dsc_config_params.dsc_optc_cfg; + + if (dsc_optc_cfg) { + bytes_per_pixel = dsc_optc_cfg->bytes_per_pixel; + slice_width = dsc_optc_cfg->slice_width; + optc_dsc_mode = dsc_optc_cfg->is_pixel_format_444 ? + OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; + } + } + + if (tg && tg->funcs->set_dsc_config) + tg->funcs->set_dsc_config(tg, optc_dsc_mode, bytes_per_pixel, slice_width); +} + +void hwss_dsc_disconnect(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_disconnect_params.dsc; + + if (dsc && dsc->funcs->dsc_disconnect) + dsc->funcs->dsc_disconnect(dsc); +} + +void hwss_dsc_read_state(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_read_state_params.dsc; + struct dcn_dsc_state *dsc_state = params->dsc_read_state_params.dsc_state; + + if (dsc && dsc->funcs->dsc_read_state) + dsc->funcs->dsc_read_state(dsc, dsc_state); +} + +void hwss_dsc_calculate_and_set_config(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->dsc_calculate_and_set_config_params.pipe_ctx; + struct pipe_ctx *top_pipe = pipe_ctx; + bool enable = params->dsc_calculate_and_set_config_params.enable; + int opp_cnt = params->dsc_calculate_and_set_config_params.opp_cnt; + + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc_stream_state *stream = pipe_ctx->stream; + + if (!dsc || !enable) + return; + + /* Calculate DSC configuration - extracted from dcn32_update_dsc_on_stream */ + struct dsc_config dsc_cfg; + + while (top_pipe->prev_odm_pipe) + top_pipe = top_pipe->prev_odm_pipe; + + dsc_cfg.pic_width = (stream->timing.h_addressable + top_pipe->dsc_padding_params.dsc_hactive_padding + + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = top_pipe->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = top_pipe->dsc_padding_params.dsc_hactive_padding; + + /* Set DSC configuration */ + if (dsc->funcs->dsc_set_config) + dsc->funcs->dsc_set_config(dsc, &dsc_cfg, + ¶ms->dsc_calculate_and_set_config_params.dsc_optc_cfg); +} + +void hwss_dsc_enable_with_opp(union block_sequence_params *params) +{ + struct pipe_ctx *pipe_ctx = params->dsc_enable_with_opp_params.pipe_ctx; + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + + if (dsc && dsc->funcs->dsc_enable) + dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); +} + +void hwss_tg_program_global_sync(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_program_global_sync_params.tg; + int vready_offset = params->tg_program_global_sync_params.vready_offset; + unsigned int vstartup_lines = params->tg_program_global_sync_params.vstartup_lines; + unsigned int vupdate_offset_pixels = params->tg_program_global_sync_params.vupdate_offset_pixels; + unsigned int vupdate_vupdate_width_pixels = params->tg_program_global_sync_params.vupdate_vupdate_width_pixels; + unsigned int pstate_keepout_start_lines = params->tg_program_global_sync_params.pstate_keepout_start_lines; + + if (tg->funcs->program_global_sync) { + tg->funcs->program_global_sync(tg, vready_offset, vstartup_lines, + vupdate_offset_pixels, vupdate_vupdate_width_pixels, pstate_keepout_start_lines); + } +} + +void hwss_tg_wait_for_state(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_wait_for_state_params.tg; + enum crtc_state state = params->tg_wait_for_state_params.state; + + if (tg->funcs->wait_for_state) + tg->funcs->wait_for_state(tg, state); +} + +void hwss_tg_set_vtg_params(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_vtg_params_params.tg; + struct dc_crtc_timing *timing = params->tg_set_vtg_params_params.timing; + bool program_fp2 = params->tg_set_vtg_params_params.program_fp2; + + if (tg->funcs->set_vtg_params) + tg->funcs->set_vtg_params(tg, timing, program_fp2); +} + +void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_setup_vertical_interrupt2_params.tg; + int start_line = params->tg_setup_vertical_interrupt2_params.start_line; + + if (tg->funcs->setup_vertical_interrupt2) + tg->funcs->setup_vertical_interrupt2(tg, start_line); +} + +void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_hdr_multiplier_params.dpp; + uint32_t hw_mult = params->dpp_set_hdr_multiplier_params.hw_mult; + + if (dpp->funcs->dpp_set_hdr_multiplier) + dpp->funcs->dpp_set_hdr_multiplier(dpp, hw_mult); +} + +void hwss_program_det_size(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->program_det_size_params.hubbub; + unsigned int hubp_inst = params->program_det_size_params.hubp_inst; + unsigned int det_buffer_size_kb = params->program_det_size_params.det_buffer_size_kb; + + if (hubbub->funcs->program_det_size) + hubbub->funcs->program_det_size(hubbub, hubp_inst, det_buffer_size_kb); +} + +void hwss_program_det_segments(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->program_det_segments_params.hubbub; + unsigned int hubp_inst = params->program_det_segments_params.hubp_inst; + unsigned int det_size = params->program_det_segments_params.det_size; + + if (hubbub->funcs->program_det_segments) + hubbub->funcs->program_det_segments(hubbub, hubp_inst, det_size); +} + +void hwss_opp_set_dyn_expansion(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_dyn_expansion_params.opp; + enum dc_color_space color_space = params->opp_set_dyn_expansion_params.color_space; + enum dc_color_depth color_depth = params->opp_set_dyn_expansion_params.color_depth; + enum signal_type signal = params->opp_set_dyn_expansion_params.signal; + + if (opp->funcs->opp_set_dyn_expansion) + opp->funcs->opp_set_dyn_expansion(opp, color_space, color_depth, signal); +} + +void hwss_opp_program_fmt(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_fmt_params.opp; + struct bit_depth_reduction_params *fmt_bit_depth = params->opp_program_fmt_params.fmt_bit_depth; + struct clamping_and_pixel_encoding_params *clamping = params->opp_program_fmt_params.clamping; + + if (opp->funcs->opp_program_fmt) + opp->funcs->opp_program_fmt(opp, fmt_bit_depth, clamping); +} + +void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_program_bit_depth_reduction_params.opp; + bool use_default_params = params->opp_program_bit_depth_reduction_params.use_default_params; + struct pipe_ctx *pipe_ctx = params->opp_program_bit_depth_reduction_params.pipe_ctx; + struct bit_depth_reduction_params bit_depth_params; + + if (use_default_params) + memset(&bit_depth_params, 0, sizeof(bit_depth_params)); + else + resource_build_bit_depth_reduction_params(pipe_ctx->stream, &bit_depth_params); + + if (opp->funcs->opp_program_bit_depth_reduction) + opp->funcs->opp_program_bit_depth_reduction(opp, &bit_depth_params); +} + +void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_disp_pattern_generator_params.opp; + enum controller_dp_test_pattern test_pattern = params->opp_set_disp_pattern_generator_params.test_pattern; + enum controller_dp_color_space color_space = params->opp_set_disp_pattern_generator_params.color_space; + enum dc_color_depth color_depth = params->opp_set_disp_pattern_generator_params.color_depth; + struct tg_color *solid_color = params->opp_set_disp_pattern_generator_params.use_solid_color ? + ¶ms->opp_set_disp_pattern_generator_params.solid_color : NULL; + int width = params->opp_set_disp_pattern_generator_params.width; + int height = params->opp_set_disp_pattern_generator_params.height; + int offset = params->opp_set_disp_pattern_generator_params.offset; + + if (opp && opp->funcs->opp_set_disp_pattern_generator) { + opp->funcs->opp_set_disp_pattern_generator(opp, test_pattern, color_space, + color_depth, solid_color, width, height, offset); + } +} + +void hwss_set_abm_pipe(union block_sequence_params *params) +{ + struct dc *dc = params->set_abm_pipe_params.dc; + struct pipe_ctx *pipe_ctx = params->set_abm_pipe_params.pipe_ctx; + + dc->hwss.set_pipe(pipe_ctx); +} + +void hwss_set_abm_level(union block_sequence_params *params) +{ + struct abm *abm = params->set_abm_level_params.abm; + unsigned int abm_level = params->set_abm_level_params.abm_level; + + if (abm->funcs->set_abm_level) + abm->funcs->set_abm_level(abm, abm_level); +} + +void hwss_set_abm_immediate_disable(union block_sequence_params *params) +{ + struct dc *dc = params->set_abm_immediate_disable_params.dc; + struct pipe_ctx *pipe_ctx = params->set_abm_immediate_disable_params.pipe_ctx; + + if (dc && dc->hwss.set_abm_immediate_disable) + dc->hwss.set_abm_immediate_disable(pipe_ctx); +} + +void hwss_mpc_remove_mpcc(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_remove_mpcc_params.mpc; + struct mpc_tree *mpc_tree_params = params->mpc_remove_mpcc_params.mpc_tree_params; + struct mpcc *mpcc_to_remove = params->mpc_remove_mpcc_params.mpcc_to_remove; + + mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); +} + +void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params) +{ + struct output_pixel_processor *opp = params->opp_set_mpcc_disconnect_pending_params.opp; + int mpcc_inst = params->opp_set_mpcc_disconnect_pending_params.mpcc_inst; + bool pending = params->opp_set_mpcc_disconnect_pending_params.pending; + + opp->mpcc_disconnect_pending[mpcc_inst] = pending; +} + +void hwss_dc_set_optimized_required(union block_sequence_params *params) +{ + struct dc *dc = params->dc_set_optimized_required_params.dc; + bool optimized_required = params->dc_set_optimized_required_params.optimized_required; + + dc->optimized_required = optimized_required; +} + +void hwss_hubp_disconnect(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_disconnect_params.hubp; + + if (hubp->funcs->hubp_disconnect) + hubp->funcs->hubp_disconnect(hubp); +} + +void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_force_pstate_change_control_params.hubbub; + bool enable = params->hubbub_force_pstate_change_control_params.enable; + bool wait = params->hubbub_force_pstate_change_control_params.wait; + + if (hubbub->funcs->force_pstate_change_control) { + hubbub->funcs->force_pstate_change_control(hubbub, enable, wait); + /* Add delay when enabling pstate change control */ + if (enable) + udelay(500); + } +} + +void hwss_tg_enable_crtc(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_enable_crtc_params.tg; + + if (tg->funcs->enable_crtc) + tg->funcs->enable_crtc(tg); +} + +void hwss_tg_set_gsl(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_gsl_params.tg; + struct gsl_params *gsl = ¶ms->tg_set_gsl_params.gsl; + + if (tg->funcs->set_gsl) + tg->funcs->set_gsl(tg, gsl); +} + +void hwss_tg_set_gsl_source_select(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_set_gsl_source_select_params.tg; + int group_idx = params->tg_set_gsl_source_select_params.group_idx; + uint32_t gsl_ready_signal = params->tg_set_gsl_source_select_params.gsl_ready_signal; + + if (tg->funcs->set_gsl_source_select) + tg->funcs->set_gsl_source_select(tg, group_idx, gsl_ready_signal); +} + +void hwss_hubp_wait_flip_pending(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_wait_flip_pending_params.hubp; + unsigned int timeout_us = params->hubp_wait_flip_pending_params.timeout_us; + unsigned int polling_interval_us = params->hubp_wait_flip_pending_params.polling_interval_us; + int j = 0; + + for (j = 0; j < timeout_us / polling_interval_us + && hubp->funcs->hubp_is_flip_pending(hubp); j++) + udelay(polling_interval_us); +} + +void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_wait_double_buffer_pending_params.tg; + unsigned int timeout_us = params->tg_wait_double_buffer_pending_params.timeout_us; + unsigned int polling_interval_us = params->tg_wait_double_buffer_pending_params.polling_interval_us; + int j = 0; + + if (tg->funcs->get_optc_double_buffer_pending) { + for (j = 0; j < timeout_us / polling_interval_us + && tg->funcs->get_optc_double_buffer_pending(tg); j++) + udelay(polling_interval_us); + } +} + +void hwss_update_force_pstate(union block_sequence_params *params) +{ + struct dc *dc = params->update_force_pstate_params.dc; + struct dc_state *context = params->update_force_pstate_params.context; + struct dce_hwseq *hwseq = dc->hwseq; + + if (hwseq->funcs.update_force_pstate) + hwseq->funcs.update_force_pstate(dc, context); +} + +void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_apply_dedcn21_147_wa_params.hubbub; + + hubbub->funcs->apply_DEDCN21_147_wa(hubbub); +} + +void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_allow_self_refresh_control_params.hubbub; + bool allow = params->hubbub_allow_self_refresh_control_params.allow; + + hubbub->funcs->allow_self_refresh_control(hubbub, allow); + + if (!allow && params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied) + *params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = true; +} + +void hwss_tg_get_frame_count(union block_sequence_params *params) +{ + struct timing_generator *tg = params->tg_get_frame_count_params.tg; + unsigned int *frame_count = params->tg_get_frame_count_params.frame_count; + + *frame_count = tg->funcs->get_frame_count(tg); +} + +void hwss_mpc_set_dwb_mux(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_set_dwb_mux_params.mpc; + int dwb_id = params->mpc_set_dwb_mux_params.dwb_id; + int mpcc_id = params->mpc_set_dwb_mux_params.mpcc_id; + + if (mpc->funcs->set_dwb_mux) + mpc->funcs->set_dwb_mux(mpc, dwb_id, mpcc_id); +} + +void hwss_mpc_disable_dwb_mux(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_disable_dwb_mux_params.mpc; + unsigned int dwb_id = params->mpc_disable_dwb_mux_params.dwb_id; + + if (mpc->funcs->disable_dwb_mux) + mpc->funcs->disable_dwb_mux(mpc, dwb_id); +} + +void hwss_mcif_wb_config_buf(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_config_buf_params.mcif_wb; + struct mcif_buf_params *mcif_buf_params = params->mcif_wb_config_buf_params.mcif_buf_params; + unsigned int dest_height = params->mcif_wb_config_buf_params.dest_height; + + if (mcif_wb->funcs->config_mcif_buf) + mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, dest_height); +} + +void hwss_mcif_wb_config_arb(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_config_arb_params.mcif_wb; + struct mcif_arb_params *mcif_arb_params = params->mcif_wb_config_arb_params.mcif_arb_params; + + if (mcif_wb->funcs->config_mcif_arb) + mcif_wb->funcs->config_mcif_arb(mcif_wb, mcif_arb_params); +} + +void hwss_mcif_wb_enable(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_enable_params.mcif_wb; + + if (mcif_wb->funcs->enable_mcif) + mcif_wb->funcs->enable_mcif(mcif_wb); +} + +void hwss_mcif_wb_disable(union block_sequence_params *params) +{ + struct mcif_wb *mcif_wb = params->mcif_wb_disable_params.mcif_wb; + + if (mcif_wb->funcs->disable_mcif) + mcif_wb->funcs->disable_mcif(mcif_wb); +} + +void hwss_dwbc_enable(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_enable_params.dwb; + struct dc_dwb_params *dwb_params = params->dwbc_enable_params.dwb_params; + + if (dwb->funcs->enable) + dwb->funcs->enable(dwb, dwb_params); +} + +void hwss_dwbc_disable(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_disable_params.dwb; + + if (dwb->funcs->disable) + dwb->funcs->disable(dwb); +} + +void hwss_dwbc_update(union block_sequence_params *params) +{ + struct dwbc *dwb = params->dwbc_update_params.dwb; + struct dc_dwb_params *dwb_params = params->dwbc_update_params.dwb_params; + + if (dwb->funcs->update) + dwb->funcs->update(dwb, dwb_params); +} + +void hwss_hubp_update_mall_sel(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_update_mall_sel_params.hubp; + uint32_t mall_sel = params->hubp_update_mall_sel_params.mall_sel; + bool cache_cursor = params->hubp_update_mall_sel_params.cache_cursor; + + if (hubp && hubp->funcs->hubp_update_mall_sel) + hubp->funcs->hubp_update_mall_sel(hubp, mall_sel, cache_cursor); +} + +void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_prepare_subvp_buffering_params.hubp; + bool enable = params->hubp_prepare_subvp_buffering_params.enable; + + if (hubp && hubp->funcs->hubp_prepare_subvp_buffering) + hubp->funcs->hubp_prepare_subvp_buffering(hubp, enable); +} + +void hwss_hubp_set_blank_en(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_blank_en_params.hubp; + bool enable = params->hubp_set_blank_en_params.enable; + + if (hubp && hubp->funcs->set_hubp_blank_en) + hubp->funcs->set_hubp_blank_en(hubp, enable); +} + +void hwss_hubp_disable_control(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_disable_control_params.hubp; + bool disable = params->hubp_disable_control_params.disable; + + if (hubp && hubp->funcs->hubp_disable_control) + hubp->funcs->hubp_disable_control(hubp, disable); +} + +void hwss_hubbub_soft_reset(union block_sequence_params *params) +{ + struct hubbub *hubbub = params->hubbub_soft_reset_params.hubbub; + bool reset = params->hubbub_soft_reset_params.reset; + + if (hubbub) + params->hubbub_soft_reset_params.hubbub_soft_reset(hubbub, reset); +} + +void hwss_hubp_clk_cntl(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_clk_cntl_params.hubp; + bool enable = params->hubp_clk_cntl_params.enable; + + if (hubp && hubp->funcs->hubp_clk_cntl) { + hubp->funcs->hubp_clk_cntl(hubp, enable); + hubp->power_gated = !enable; + } +} + +void hwss_hubp_init(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_init_params.hubp; + + if (hubp && hubp->funcs->hubp_init) + hubp->funcs->hubp_init(hubp); +} + +void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_vm_system_aperture_settings_params.hubp; + struct vm_system_aperture_param apt; + + apt.sys_default = params->hubp_set_vm_system_aperture_settings_params.sys_default; + apt.sys_high = params->hubp_set_vm_system_aperture_settings_params.sys_high; + apt.sys_low = params->hubp_set_vm_system_aperture_settings_params.sys_low; + + if (hubp && hubp->funcs->hubp_set_vm_system_aperture_settings) + hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt); +} + +void hwss_hubp_set_flip_int(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_flip_int_params.hubp; + + if (hubp && hubp->funcs->hubp_set_flip_int) + hubp->funcs->hubp_set_flip_int(hubp); +} + +void hwss_dpp_dppclk_control(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_dppclk_control_params.dpp; + bool dppclk_div = params->dpp_dppclk_control_params.dppclk_div; + bool enable = params->dpp_dppclk_control_params.enable; + + if (dpp && dpp->funcs->dpp_dppclk_control) + dpp->funcs->dpp_dppclk_control(dpp, dppclk_div, enable); +} + +void hwss_disable_phantom_crtc(union block_sequence_params *params) +{ + struct timing_generator *tg = params->disable_phantom_crtc_params.tg; + + if (tg && tg->funcs->disable_phantom_crtc) + tg->funcs->disable_phantom_crtc(tg); +} + +void hwss_dsc_pg_status(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dsc_pg_status_params.hws; + int dsc_inst = params->dsc_pg_status_params.dsc_inst; + + if (hws && hws->funcs.dsc_pg_status) + params->dsc_pg_status_params.is_ungated = hws->funcs.dsc_pg_status(hws, dsc_inst); +} + +void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_wait_disconnect_pending_clear_params.dsc; + + if (!params->dsc_wait_disconnect_pending_clear_params.is_ungated) + return; + if (*params->dsc_wait_disconnect_pending_clear_params.is_ungated == false) + return; + + if (dsc && dsc->funcs->dsc_wait_disconnect_pending_clear) + dsc->funcs->dsc_wait_disconnect_pending_clear(dsc); +} + +void hwss_dsc_disable(union block_sequence_params *params) +{ + struct display_stream_compressor *dsc = params->dsc_disable_params.dsc; + + if (!params->dsc_disable_params.is_ungated) + return; + if (*params->dsc_disable_params.is_ungated == false) + return; + + if (dsc && dsc->funcs->dsc_disable) + dsc->funcs->dsc_disable(dsc); +} + +void hwss_dccg_set_ref_dscclk(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_set_ref_dscclk_params.dccg; + int dsc_inst = params->dccg_set_ref_dscclk_params.dsc_inst; + + if (!params->dccg_set_ref_dscclk_params.is_ungated) + return; + if (*params->dccg_set_ref_dscclk_params.is_ungated == false) + return; + + if (dccg && dccg->funcs->set_ref_dscclk) + dccg->funcs->set_ref_dscclk(dccg, dsc_inst); +} + +void hwss_dpp_pg_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dpp_pg_control_params.hws; + unsigned int dpp_inst = params->dpp_pg_control_params.dpp_inst; + bool power_on = params->dpp_pg_control_params.power_on; + + if (hws->funcs.dpp_pg_control) + hws->funcs.dpp_pg_control(hws, dpp_inst, power_on); +} + +void hwss_hubp_pg_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->hubp_pg_control_params.hws; + unsigned int hubp_inst = params->hubp_pg_control_params.hubp_inst; + bool power_on = params->hubp_pg_control_params.power_on; + + if (hws->funcs.hubp_pg_control) + hws->funcs.hubp_pg_control(hws, hubp_inst, power_on); +} + +void hwss_hubp_reset(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_reset_params.hubp; + + if (hubp && hubp->funcs->hubp_reset) + hubp->funcs->hubp_reset(hubp); +} + +void hwss_dpp_reset(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_reset_params.dpp; + + if (dpp && dpp->funcs->dpp_reset) + dpp->funcs->dpp_reset(dpp); +} + +void hwss_dpp_root_clock_control(union block_sequence_params *params) +{ + struct dce_hwseq *hws = params->dpp_root_clock_control_params.hws; + unsigned int dpp_inst = params->dpp_root_clock_control_params.dpp_inst; + bool clock_on = params->dpp_root_clock_control_params.clock_on; + + if (hws->funcs.dpp_root_clock_control) + hws->funcs.dpp_root_clock_control(hws, dpp_inst, clock_on); +} + +void hwss_dc_ip_request_cntl(union block_sequence_params *params) +{ + struct dc *dc = params->dc_ip_request_cntl_params.dc; + bool enable = params->dc_ip_request_cntl_params.enable; + struct dce_hwseq *hws = dc->hwseq; + + if (hws->funcs.dc_ip_request_cntl) + hws->funcs.dc_ip_request_cntl(dc, enable); +} + +void hwss_dccg_update_dpp_dto(union block_sequence_params *params) +{ + struct dccg *dccg = params->dccg_update_dpp_dto_params.dccg; + int dpp_inst = params->dccg_update_dpp_dto_params.dpp_inst; + int dppclk_khz = params->dccg_update_dpp_dto_params.dppclk_khz; + + if (dccg && dccg->funcs->update_dpp_dto) + dccg->funcs->update_dpp_dto(dccg, dpp_inst, dppclk_khz); +} + +void hwss_hubp_vtg_sel(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_vtg_sel_params.hubp; + uint32_t otg_inst = params->hubp_vtg_sel_params.otg_inst; + + if (hubp && hubp->funcs->hubp_vtg_sel) + hubp->funcs->hubp_vtg_sel(hubp, otg_inst); +} + +void hwss_hubp_setup2(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup2_params.hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup2_params.hubp_regs; + union dml2_global_sync_programming *global_sync = params->hubp_setup2_params.global_sync; + struct dc_crtc_timing *timing = params->hubp_setup2_params.timing; + + if (hubp && hubp->funcs->hubp_setup2) + hubp->funcs->hubp_setup2(hubp, hubp_regs, global_sync, timing); +} + +void hwss_hubp_setup(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_params.hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_params.dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_params.ttu_regs; + struct _vcs_dpi_display_rq_regs_st *rq_regs = params->hubp_setup_params.rq_regs; + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest = params->hubp_setup_params.pipe_dest; + + if (hubp && hubp->funcs->hubp_setup) + hubp->funcs->hubp_setup(hubp, dlg_regs, ttu_regs, rq_regs, pipe_dest); +} + +void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_unbounded_requesting_params.hubp; + bool unbounded_req = params->hubp_set_unbounded_requesting_params.unbounded_req; + + if (hubp && hubp->funcs->set_unbounded_requesting) + hubp->funcs->set_unbounded_requesting(hubp, unbounded_req); +} + +void hwss_hubp_setup_interdependent2(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_interdependent2_params.hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup_interdependent2_params.hubp_regs; + + if (hubp && hubp->funcs->hubp_setup_interdependent2) + hubp->funcs->hubp_setup_interdependent2(hubp, hubp_regs); +} + +void hwss_hubp_setup_interdependent(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_setup_interdependent_params.hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_interdependent_params.dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_interdependent_params.ttu_regs; + + if (hubp && hubp->funcs->hubp_setup_interdependent) + hubp->funcs->hubp_setup_interdependent(hubp, dlg_regs, ttu_regs); +} + +void hwss_dpp_set_cursor_matrix(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_cursor_matrix_params.dpp; + enum dc_color_space color_space = params->dpp_set_cursor_matrix_params.color_space; + struct dc_csc_transform *cursor_csc_color_matrix = params->dpp_set_cursor_matrix_params.cursor_csc_color_matrix; + + if (dpp && dpp->funcs->set_cursor_matrix) + dpp->funcs->set_cursor_matrix(dpp, color_space, *cursor_csc_color_matrix); +} + +void hwss_mpc_update_mpcc(union block_sequence_params *params) +{ + struct dc *dc = params->mpc_update_mpcc_params.dc; + struct pipe_ctx *pipe_ctx = params->mpc_update_mpcc_params.pipe_ctx; + struct dce_hwseq *hws = dc->hwseq; + + if (hws->funcs.update_mpcc) + hws->funcs.update_mpcc(dc, pipe_ctx); +} + +void hwss_mpc_update_blending(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_update_blending_params.mpc; + struct mpcc_blnd_cfg *blnd_cfg = ¶ms->mpc_update_blending_params.blnd_cfg; + int mpcc_id = params->mpc_update_blending_params.mpcc_id; + + if (mpc && mpc->funcs->update_blending) + mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id); +} + +void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_assert_idle_mpcc_params.mpc; + int mpcc_id = params->mpc_assert_idle_mpcc_params.mpcc_id; + + if (mpc && mpc->funcs->wait_for_idle) + mpc->funcs->wait_for_idle(mpc, mpcc_id); +} + +void hwss_mpc_insert_plane(union block_sequence_params *params) +{ + struct mpc *mpc = params->mpc_insert_plane_params.mpc; + struct mpc_tree *tree = params->mpc_insert_plane_params.mpc_tree_params; + struct mpcc_blnd_cfg *blnd_cfg = ¶ms->mpc_insert_plane_params.blnd_cfg; + struct mpcc_sm_cfg *sm_cfg = params->mpc_insert_plane_params.sm_cfg; + struct mpcc *insert_above_mpcc = params->mpc_insert_plane_params.insert_above_mpcc; + int mpcc_id = params->mpc_insert_plane_params.mpcc_id; + int dpp_id = params->mpc_insert_plane_params.dpp_id; + + if (mpc && mpc->funcs->insert_plane) + mpc->funcs->insert_plane(mpc, tree, blnd_cfg, sm_cfg, insert_above_mpcc, + dpp_id, mpcc_id); +} + +void hwss_dpp_set_scaler(union block_sequence_params *params) +{ + struct dpp *dpp = params->dpp_set_scaler_params.dpp; + const struct scaler_data *scl_data = params->dpp_set_scaler_params.scl_data; + + if (dpp && dpp->funcs->dpp_set_scaler) + dpp->funcs->dpp_set_scaler(dpp, scl_data); +} + +void hwss_hubp_mem_program_viewport(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_mem_program_viewport_params.hubp; + const struct rect *viewport = params->hubp_mem_program_viewport_params.viewport; + const struct rect *viewport_c = params->hubp_mem_program_viewport_params.viewport_c; + + if (hubp && hubp->funcs->mem_program_viewport) + hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c); +} + +void hwss_set_cursor_attribute(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_attribute_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_attribute_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_attribute) + dc->hwss.set_cursor_attribute(pipe_ctx); +} + +void hwss_set_cursor_position(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_position_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_position_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_position) + dc->hwss.set_cursor_position(pipe_ctx); +} + +void hwss_set_cursor_sdr_white_level(union block_sequence_params *params) +{ + struct dc *dc = params->set_cursor_sdr_white_level_params.dc; + struct pipe_ctx *pipe_ctx = params->set_cursor_sdr_white_level_params.pipe_ctx; + + if (dc && dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); +} + +void hwss_program_output_csc(union block_sequence_params *params) +{ + struct dc *dc = params->program_output_csc_params.dc; + struct pipe_ctx *pipe_ctx = params->program_output_csc_params.pipe_ctx; + enum dc_color_space colorspace = params->program_output_csc_params.colorspace; + uint16_t *matrix = params->program_output_csc_params.matrix; + int opp_id = params->program_output_csc_params.opp_id; + + if (dc && dc->hwss.program_output_csc) + dc->hwss.program_output_csc(dc, pipe_ctx, colorspace, matrix, opp_id); +} + +void hwss_hubp_set_blank(union block_sequence_params *params) +{ + struct hubp *hubp = params->hubp_set_blank_params.hubp; + bool blank = params->hubp_set_blank_params.blank; + + if (hubp && hubp->funcs->set_blank) + hubp->funcs->set_blank(hubp, blank); +} + +void hwss_phantom_hubp_post_enable(union block_sequence_params *params) +{ + struct hubp *hubp = params->phantom_hubp_post_enable_params.hubp; + + if (hubp && hubp->funcs->phantom_hubp_post_enable) + hubp->funcs->phantom_hubp_post_enable(hubp); +} + +void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, int inst, int num_slices_h) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_SET_DTO_DSCCLK; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.inst = inst; + seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.num_slices_h = num_slices_h; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_CALCULATE_AND_SET_CONFIG; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.enable = enable; + seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.opp_cnt = opp_cnt; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_REMOVE_MPCC; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc_tree_params = mpc_tree_params; + seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpcc_to_remove = mpcc_to_remove; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, int mpcc_inst, bool pending) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_SET_MPCC_DISCONNECT_PENDING; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.mpcc_inst = mpcc_inst; + seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.pending = pending; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_DISCONNECT; + seq_state->steps[*seq_state->num_steps].params.hubp_disconnect_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_ENABLE_WITH_OPP; + seq_state->steps[*seq_state->num_steps].params.dsc_enable_with_opp_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dsc_optc_config *dsc_optc_cfg, bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_DSC_CONFIG; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.dsc_optc_cfg = dsc_optc_cfg; + seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_DISCONNECT; + seq_state->steps[*seq_state->num_steps].params.dsc_disconnect_params.dsc = dsc; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state, + struct dc *dc, bool optimized_required) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DC_SET_OPTIMIZED_REQUIRED; + seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.optimized_required = optimized_required; + (*seq_state->num_steps)++; + } +} + +void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = ABM_SET_IMMEDIATE_DISABLE; + seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, + enum dc_color_depth color_depth, + struct tg_color solid_color, + bool use_solid_color, + int width, + int height, + int offset) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_SET_DISP_PATTERN_GENERATOR; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.test_pattern = test_pattern; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_depth = color_depth; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.solid_color = solid_color; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.use_solid_color = use_solid_color; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.width = width; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.height = height; + seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.offset = offset; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add MPC update blending to block sequence + */ +void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpcc_blnd_cfg blnd_cfg, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_BLENDING; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.blnd_cfg = blnd_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add MPC insert plane to block sequence + */ +void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpc_tree *mpc_tree_params, + struct mpcc_blnd_cfg blnd_cfg, + struct mpcc_sm_cfg *sm_cfg, + struct mpcc *insert_above_mpcc, + int dpp_id, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_INSERT_PLANE; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc_tree_params = mpc_tree_params; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.blnd_cfg = blnd_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.sm_cfg = sm_cfg; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.insert_above_mpcc = insert_above_mpcc; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.dpp_id = dpp_id; + seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add MPC assert idle MPCC to block sequence + */ +void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_ASSERT_IDLE_MPCC; + seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +/** + * Helper function to add HUBP set blank to block sequence + */ +void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool blank) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.blank = blank; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool use_default_params, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_BIT_DEPTH_REDUCTION; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.use_default_params = use_default_params; + seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state, + struct dc *dc, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DC_IP_REQUEST_CNTL; + seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_update(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_UPDATE; + seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb = dwb; + seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb_params = dwb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_buf_params *mcif_buf_params, + unsigned int dest_height) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_BUF; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_wb = mcif_wb; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_buf_params = mcif_buf_params; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.dest_height = dest_height; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_arb_params *mcif_arb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_ARB; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_wb = mcif_wb; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_arb_params = mcif_arb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_ENABLE; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_enable_params.mcif_wb = mcif_wb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MCIF_WB_DISABLE; + seq_state->steps[*seq_state->num_steps].params.mcif_wb_disable_params.mcif_wb = mcif_wb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + int dwb_id, + int mpcc_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_SET_DWB_MUX; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.dwb_id = dwb_id; + seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpcc_id = mpcc_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + unsigned int dwb_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = MPC_DISABLE_DWB_MUX; + seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.mpc = mpc; + seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.dwb_id = dwb_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_enable(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_ENABLE; + seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb = dwb; + seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb_params = dwb_params; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dwbc_disable(struct block_sequence_state *seq_state, + struct dwbc *dwb) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DWBC_DISABLE; + seq_state->steps[*seq_state->num_steps].params.dwbc_disable_params.dwb = dwb; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct gsl_params gsl) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.gsl = gsl; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int group_idx, + uint32_t gsl_ready_signal) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL_SOURCE_SELECT; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.group_idx = group_idx; + seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.gsl_ready_signal = gsl_ready_signal; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t mall_sel, + bool cache_cursor) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_MALL_SEL; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.mall_sel = mall_sel; + seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.cache_cursor = cache_cursor; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PREPARE_SUBVP_BUFFERING; + seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK_EN; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool disable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_DISABLE_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.disable = disable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset), + bool reset) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_SOFT_RESET; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub_soft_reset = hubbub_soft_reset; + seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.reset = reset; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_CLK_CNTL; + seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state, + struct dpp *dpp, + bool dppclk_div, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_DPPCLK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dppclk_div = dppclk_div; + seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DISABLE_PHANTOM_CRTC; + seq_state->steps[*seq_state->num_steps].params.disable_phantom_crtc_params.tg = tg; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + int dsc_inst, + bool is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_PG_STATUS; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.dsc_inst = dsc_inst; + seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_WAIT_DISCONNECT_PENDING_CLEAR; + seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.dsc = dsc; + seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dsc_disable(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DSC_DISABLE; + seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.dsc = dsc; + seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dsc_inst, + bool *is_ungated) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_SET_REF_DSCCLK; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dsc_inst = dsc_inst; + seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.is_ungated = is_ungated; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_ROOT_CLOCK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.clock_on = clock_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_PG_CONTROL; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.power_on = power_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PG_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hws = hws; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hubp_inst = hubp_inst; + seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.power_on = power_on; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_init(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_INIT; + seq_state->steps[*seq_state->num_steps].params.hubp_init_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_reset(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_RESET; + seq_state->steps[*seq_state->num_steps].params.hubp_reset_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_reset(struct block_sequence_state *seq_state, + struct dpp *dpp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_RESET; + seq_state->steps[*seq_state->num_steps].params.dpp_reset_params.dpp = dpp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool enable) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = OPP_PIPE_CLOCK_CONTROL; + seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.opp = opp; + seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.enable = enable; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint64_t sys_default, + uint64_t sys_low, + uint64_t sys_high) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_default.quad_part = sys_default; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_low.quad_part = sys_low; + seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_high.quad_part = sys_high; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_INT; + seq_state->steps[*seq_state->num_steps].params.hubp_set_flip_int_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dpp_inst, + int dppclk_khz) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DCCG_UPDATE_DPP_DTO; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dccg = dccg; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dpp_inst = dpp_inst; + seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dppclk_khz = dppclk_khz; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t otg_inst) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_VTG_SEL; + seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.otg_inst = otg_inst; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs, + union dml2_global_sync_programming *global_sync, + struct dc_crtc_timing *timing) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP2; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp_regs = hubp_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.global_sync = global_sync; + seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.timing = timing; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs, + struct _vcs_dpi_display_rq_regs_st *rq_regs, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.dlg_regs = dlg_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.ttu_regs = ttu_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.rq_regs = rq_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.pipe_dest = pipe_dest; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool unbounded_req) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SET_UNBOUNDED_REQUESTING; + seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.unbounded_req = unbounded_req; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT2; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp_regs = hubp_regs; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.dlg_regs = dlg_regs; + seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.ttu_regs = ttu_regs; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state, + struct hubp *hubp, + enum surface_pixel_format format, + struct dc_tiling_info *tiling_info, + struct plane_size plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + int compat_level) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_SURFACE_CONFIG; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.format = format; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.tiling_info = tiling_info; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.plane_size = plane_size; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.rotation = rotation; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.dcc = dcc; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.horizontal_mirror = horizontal_mirror; + seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.compat_level = compat_level; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SETUP_DPP; + seq_state->steps[*seq_state->num_steps].params.setup_dpp_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state, + struct dpp *dpp, + enum dc_color_space color_space, + struct dc_csc_transform *cursor_csc_color_matrix) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SET_CURSOR_MATRIX; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.color_space = color_space; + seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.cursor_csc_color_matrix = cursor_csc_color_matrix; + (*seq_state->num_steps)++; + } +} + +void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state, + struct dpp *dpp, + const struct scaler_data *scl_data) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = DPP_SET_SCALER; + seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.dpp = dpp; + seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.scl_data = scl_data; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state, + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBP_MEM_PROGRAM_VIEWPORT; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.hubp = hubp; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport = viewport; + seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport_c = viewport_c; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_ATTRIBUTE; + seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_POSITION; + seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_SDR_WHITE_LEVEL; + seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.pipe_ctx = pipe_ctx; + (*seq_state->num_steps)++; + } +} + +void hwss_add_program_output_csc(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = PROGRAM_OUTPUT_CSC; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.pipe_ctx = pipe_ctx; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.colorspace = colorspace; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.matrix = matrix; + seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.opp_id = opp_id; + (*seq_state->num_steps)++; + } +} + +void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state, + struct hubp *hubp) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = PHANTOM_HUBP_POST_ENABLE; + seq_state->steps[*seq_state->num_steps].params.phantom_hubp_post_enable_params.hubp = hubp; + (*seq_state->num_steps)++; + } +} + +void hwss_add_update_force_pstate(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = UPDATE_FORCE_PSTATE; + seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.dc = dc; + seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.context = context; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state, + struct hubbub *hubbub) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_APPLY_DEDCN21_147_WA; + seq_state->steps[*seq_state->num_steps].params.hubbub_apply_dedcn21_147_wa_params.hubbub = hubbub; + (*seq_state->num_steps)++; + } +} + +void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool allow, + bool *disallow_self_refresh_applied) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = HUBBUB_ALLOW_SELF_REFRESH_CONTROL; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.hubbub = hubbub; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.allow = allow; + seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = disallow_self_refresh_applied; + (*seq_state->num_steps)++; + } +} + +void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int *frame_count) +{ + if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) { + seq_state->steps[*seq_state->num_steps].func = TG_GET_FRAME_COUNT; + seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.tg = tg; + seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.frame_count = frame_count; + (*seq_state->num_steps)++; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index a180f68f711c..deb23d20bca6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -522,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link( struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc) { struct link_encoder *link_enc = NULL; - enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS]; + enum engine_id encs_assigned[MAX_LINK_ENCODERS]; int i; - for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++) + for (i = 0; i < MAX_LINK_ENCODERS; i++) encs_assigned[i] = ENGINE_ID_UNKNOWN; /* Add assigned encoders to list. */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index bc5dedf5f60c..dc0c4065a92c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -95,7 +95,7 @@ #define DC_LOGGER \ dc->ctx->logger #define DC_LOGGER_INIT(logger) -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define UNABLE_TO_SPLIT -1 @@ -446,6 +446,14 @@ bool resource_construct( DC_ERR("DC: failed to create stream_encoder!\n"); pool->stream_enc_count++; } + + for (i = 0; i < caps->num_analog_stream_encoder; i++) { + pool->stream_enc[caps->num_stream_encoder + i] = + create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx); + if (pool->stream_enc[caps->num_stream_encoder + i] == NULL) + DC_ERR("DC: failed to create analog stream_encoder %d!\n", i); + pool->stream_enc_count++; + } } pool->hpo_dp_stream_enc_count = 0; @@ -2690,17 +2698,40 @@ static inline int find_fixed_dio_link_enc(const struct dc_link *link) } static inline int find_free_dio_link_enc(const struct resource_context *res_ctx, - const struct dc_link *link, const struct resource_pool *pool) + const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream) { - int i; + int i, j = -1; + int stream_enc_inst = -1; int enc_count = pool->dig_link_enc_count; - /* for dpia, check preferred encoder first and then the next one */ - for (i = 0; i < enc_count; i++) - if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0) - break; + /* Find stream encoder instance for the stream */ + if (stream) { + for (i = 0; i < pool->pipe_count; i++) { + if ((res_ctx->pipe_ctx[i].stream == stream) && + (res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) { + stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id; + break; + } + } + } - return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1; + /* Assign dpia preferred > stream enc instance > available */ + for (i = 0; i < enc_count; i++) { + if (res_ctx->dio_link_enc_ref_cnts[i] == 0) { + if (j == -1) + j = i; + + if (link->dpia_preferred_eng_id == i) { + j = i; + break; + } + + if (stream_enc_inst == i) { + j = stream_enc_inst; + } + } + } + return j; } static inline void acquire_dio_link_enc( @@ -2781,7 +2812,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc, retain_dio_link_enc(res_ctx, enc_index); } else { if (stream->link->is_dig_mapping_flexible) - enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool); + enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream); else { int link_index = 0; @@ -2791,7 +2822,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc, * one into the acquiring link. */ if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) { - int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool); + int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream); if (new_enc_index >= 0) swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index); @@ -5201,7 +5232,7 @@ struct link_encoder *get_temp_dio_link_enc( enc_index = link->eng_id; if (enc_index < 0) - enc_index = find_free_dio_link_enc(res_ctx, link, pool); + enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL); if (enc_index >= 0) link_enc = pool->link_encoders[enc_index]; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index c61300a7cb1c..2de8ef4a58ec 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -35,8 +35,8 @@ #include "link_enc_cfg.h" #if defined(CONFIG_DRM_AMD_DC_FP) -#include "dml2/dml2_wrapper.h" -#include "dml2/dml2_internal_types.h" +#include "dml2_0/dml2_wrapper.h" +#include "dml2_0/dml2_internal_types.h" #endif #define DC_LOGGER \ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 9ac2d41f8fca..6d309c320253 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -224,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status( return dc_state_get_stream_status(dc->current_state, stream); } +const struct dc_stream_status *dc_stream_get_status_const( + const struct dc_stream_state *stream) +{ + struct dc *dc = stream->ctx->dc; + + return dc_state_get_stream_status(dc->current_state, stream); +} + void program_cursor_attributes( struct dc *dc, struct dc_stream_state *stream) @@ -231,6 +239,7 @@ void program_cursor_attributes( int i; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; + bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc); if (!stream) return; @@ -245,9 +254,14 @@ void program_cursor_attributes( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - dc->hwss.cursor_lock(dc, pipe_to_program, true); - if (pipe_to_program->next_odm_pipe) - dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); + + if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) { + dc->hwss.begin_cursor_offload_update(dc, pipe_ctx); + } else { + dc->hwss.cursor_lock(dc, pipe_to_program, true); + if (pipe_to_program->next_odm_pipe) + dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true); + } } dc->hwss.set_cursor_attribute(pipe_ctx); @@ -255,12 +269,18 @@ void program_cursor_attributes( dc_send_update_cursor_info_to_dmu(pipe_ctx, i); if (dc->hwss.set_cursor_sdr_white_level) dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe) + dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx); } if (pipe_to_program) { - dc->hwss.cursor_lock(dc, pipe_to_program, false); - if (pipe_to_program->next_odm_pipe) - dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); + if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) { + dc->hwss.commit_cursor_offload_update(dc, pipe_to_program); + } else { + dc->hwss.cursor_lock(dc, pipe_to_program, false); + if (pipe_to_program->next_odm_pipe) + dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false); + } } } @@ -366,6 +386,7 @@ void program_cursor_position( int i; struct resource_context *res_ctx; struct pipe_ctx *pipe_to_program = NULL; + bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc); if (!stream) return; @@ -384,16 +405,27 @@ void program_cursor_position( if (!pipe_to_program) { pipe_to_program = pipe_ctx; - dc->hwss.cursor_lock(dc, pipe_to_program, true); + + if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) + dc->hwss.begin_cursor_offload_update(dc, pipe_ctx); + else + dc->hwss.cursor_lock(dc, pipe_to_program, true); } dc->hwss.set_cursor_position(pipe_ctx); + if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe) + dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx); + if (dc->ctx->dmub_srv) dc_send_update_cursor_info_to_dmu(pipe_ctx, i); } - if (pipe_to_program) - dc->hwss.cursor_lock(dc, pipe_to_program, false); + if (pipe_to_program) { + if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) + dc->hwss.commit_cursor_offload_update(dc, pipe_to_program); + else + dc->hwss.cursor_lock(dc, pipe_to_program, false); + } } bool dc_stream_set_cursor_position( @@ -855,9 +887,11 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) stream->sink->sink_signal != SIGNAL_TYPE_NONE) { DC_LOG_DC( - "\tdispname: %s signal: %x\n", + "\tsignal: %x dispname: %s manufacturer_id: 0x%x product_id: 0x%x\n", + stream->signal, stream->sink->edid_caps.display_name, - stream->signal); + stream->sink->edid_caps.manufacturer_id, + stream->sink->edid_caps.product_id); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 98f0b6b3c213..010d9315b96b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -42,7 +42,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dmub/inc/dmub_cmd.h" @@ -54,8 +54,16 @@ struct abm_save_restore; struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; +struct dcn_hubbub_reg_state; +struct dcn_hubp_reg_state; +struct dcn_dpp_reg_state; +struct dcn_mpc_reg_state; +struct dcn_opp_reg_state; +struct dcn_dsc_reg_state; +struct dcn_optc_reg_state; +struct dcn_dccg_reg_state; -#define DC_VER "3.2.351" +#define DC_VER "3.2.356" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC @@ -278,6 +286,15 @@ struct dc_scl_caps { bool sharpener_support; }; +struct dc_check_config { + /** + * max video plane width that can be safely assumed to be always + * supported by single DPP pipe. + */ + unsigned int max_optimizable_video_width; + bool enable_legacy_fast_update; +}; + struct dc_caps { uint32_t max_streams; uint32_t max_links; @@ -293,11 +310,6 @@ struct dc_caps { unsigned int max_cursor_size; unsigned int max_buffered_cursor_size; unsigned int max_video_width; - /* - * max video plane width that can be safely assumed to be always - * supported by single DPP pipe. - */ - unsigned int max_optimizable_video_width; unsigned int min_horizontal_blanking_period; int linear_pitch_alignment; bool dcc_const_color; @@ -455,6 +467,19 @@ enum surface_update_type { UPDATE_TYPE_FULL, /* may need to shuffle resources */ }; +enum dc_lock_descriptor { + LOCK_DESCRIPTOR_NONE = 0x0, + LOCK_DESCRIPTOR_STATE = 0x1, + LOCK_DESCRIPTOR_LINK = 0x2, + LOCK_DESCRIPTOR_STREAM = 0x4, + LOCK_DESCRIPTOR_PLANE = 0x8, +}; + +struct surface_update_descriptor { + enum surface_update_type update_type; + enum dc_lock_descriptor lock_descriptor; +}; + /* Forward declaration*/ struct dc; struct dc_plane_state; @@ -530,6 +555,7 @@ struct dc_config { bool set_pipe_unlock_order; bool enable_dpia_pre_training; bool unify_link_enc_assignment; + bool enable_cursor_offload; struct spl_sharpness_range dcn_sharpness_range; struct spl_sharpness_range dcn_override_sharpness_range; }; @@ -849,8 +875,7 @@ union dpia_debug_options { uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ uint32_t disable_usb4_pm_support:1; /* bit 5 */ uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */ - uint32_t enable_bw_allocation_mode:1; /* bit 7 */ - uint32_t reserved:24; + uint32_t reserved:25; } bits; uint32_t raw; }; @@ -1120,7 +1145,6 @@ struct dc_debug_options { uint32_t fpo_vactive_min_active_margin_us; uint32_t fpo_vactive_max_blank_us; bool enable_hpo_pg_support; - bool enable_legacy_fast_update; bool disable_dc_mode_overwrite; bool replay_skip_crtc_disabled; bool ignore_pg;/*do nothing, let pmfw control it*/ @@ -1152,7 +1176,6 @@ struct dc_debug_options { bool enable_ips_visual_confirm; unsigned int sharpen_policy; unsigned int scale_to_sharpness_policy; - bool skip_full_updated_if_possible; unsigned int enable_oled_edp_power_up_opt; bool enable_hblank_borrow; bool force_subvp_df_throttle; @@ -1164,6 +1187,7 @@ struct dc_debug_options { unsigned int auxless_alpm_lfps_t1t2_us; short auxless_alpm_lfps_t1t2_offset_us; bool disable_stutter_for_wm_program; + bool enable_block_sequence_programming; }; @@ -1702,6 +1726,7 @@ struct dc { struct dc_debug_options debug; struct dc_versions versions; struct dc_caps caps; + struct dc_check_config check_config; struct dc_cap_funcs cap_funcs; struct dc_config config; struct dc_bounding_box_overrides bb_overrides; @@ -1830,20 +1855,14 @@ struct dc_surface_update { }; struct dc_underflow_debug_data { - uint32_t otg_inst; - uint32_t otg_underflow; - uint32_t h_position; - uint32_t v_position; - uint32_t otg_frame_count; - struct dc_underflow_per_hubp_debug_data { - uint32_t hubp_underflow; - uint32_t hubp_in_blank; - uint32_t hubp_readline; - uint32_t det_config_error; - } hubps[MAX_PIPES]; - uint32_t curr_det_sizes[MAX_PIPES]; - uint32_t target_det_sizes[MAX_PIPES]; - uint32_t compbuf_config_error; + struct dcn_hubbub_reg_state *hubbub_reg_state; + struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES]; + struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES]; + struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES]; + struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES]; + struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES]; + struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES]; + struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES]; }; /* @@ -2721,6 +2740,8 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context); bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index); +void dc_log_preos_dmcub_info(const struct dc *dc); + /* DSC Interfaces */ #include "dc_dsc.h" @@ -2736,7 +2757,7 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream); bool dc_is_cursor_limit_pending(struct dc *dc); -bool dc_can_clear_cursor_limit(struct dc *dc); +bool dc_can_clear_cursor_limit(const struct dc *dc); /** * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data. diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 5fa5e2b63fb7..40d7a7d83c40 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -91,9 +91,17 @@ struct dc_vbios_funcs { struct device_id id); /* COMMANDS */ + enum bp_result (*select_crtc_source)( + struct dc_bios *bios, + struct bp_crtc_source_select *bp_params); enum bp_result (*encoder_control)( struct dc_bios *bios, struct bp_encoder_control *cntl); + enum bp_result (*dac_load_detection)( + struct dc_bios *bios, + enum engine_id engine_id, + enum dal_device_type device_type, + uint32_t enum_id); enum bp_result (*transmitter_control)( struct dc_bios *bios, struct bp_transmitter_control *cntl); @@ -165,6 +173,7 @@ struct dc_vbios_funcs { }; struct bios_registers { + uint32_t BIOS_SCRATCH_0; uint32_t BIOS_SCRATCH_3; uint32_t BIOS_SCRATCH_6; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 53a088ebddef..fffbf1983143 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -442,7 +442,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru int i = 0, k = 0; int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. uint8_t visual_confirm_enabled; - int pipe_idx = 0; struct dc_stream_status *stream_status = NULL; if (dc == NULL) @@ -457,7 +456,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; if (should_manage_pstate) { - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) @@ -472,7 +471,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; break; } - pipe_idx++; } } @@ -872,7 +870,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, bool enable) { uint8_t cmd_pipe_index = 0; - uint32_t i, pipe_idx; + uint32_t i; uint8_t subvp_count = 0; union dmub_rb_cmd cmd; struct pipe_ctx *subvp_pipes[2]; @@ -899,7 +897,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, if (enable) { // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); @@ -922,7 +920,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc, populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); } - pipe_idx++; } if (subvp_count == 2) { update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes); @@ -1174,6 +1171,100 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index); } +void dc_dmub_srv_cursor_offload_init(struct dc *dc) +{ + struct dmub_rb_cmd_cursor_offload_init *init; + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + union dmub_rb_cmd cmd; + + if (!dc->config.enable_cursor_offload) + return; + + if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support) + return; + + if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr) + return; + + if (!dc_dmub_srv->dmub->cursor_offload_v1) + return; + + if (!dc_dmub_srv->dmub->shared_state) + return; + + memset(&cmd, 0, sizeof(cmd)); + + init = &cmd.cursor_offload_init; + init->header.type = DMUB_CMD__CURSOR_OFFLOAD; + init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT; + init->header.payload_bytes = sizeof(init->init_data); + init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr; + init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); + + dc_dmub_srv->cursor_offload_enabled = true; +} + +void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream, bool enable) +{ + struct pipe_ctx const *pipe_ctx; + struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl; + union dmub_rb_cmd cmd; + + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + if (!stream) + return; + + pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); + if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream) + return; + + memset(&cmd, 0, sizeof(cmd)); + + cntl = &cmd.cursor_offload_stream_ctnl; + cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD; + cntl->header.sub_type = + enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE; + cntl->header.payload_bytes = sizeof(cntl->data); + + cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst; + cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull, + stream->timing.pix_clk_100hz / 10)); + + cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ? + stream->adjust.v_total_max : + stream->timing.v_total; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, + enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); +} + +void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe) +{ + struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl; + union dmub_rb_cmd cmd; + + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + if (!pipe || !pipe->stream || !pipe->stream_res.tg) + return; + + memset(&cmd, 0, sizeof(cmd)); + + cntl = &cmd.cursor_offload_stream_ctnl; + cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD; + cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM; + cntl->header.payload_bytes = sizeof(cntl->data); + cntl->data.otg_inst = pipe->stream_res.tg->inst; + + dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); +} + bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) { struct dc_context *dc_ctx; @@ -2231,6 +2322,11 @@ bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uin return result; } +bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc) +{ + return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled; +} + void dc_dmub_srv_release_hw(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; @@ -2248,3 +2344,24 @@ void dc_dmub_srv_release_hw(const struct dc *dc) dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } + +void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_srv *dmub; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return; + + dmub = dc_dmub_srv->dmub; + + if (dmub_srv_get_preos_info(dmub)) { + DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__); + DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version); + DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options); + DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status); + DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr); + DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size); + DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base); + DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 7ef93444ef3c..72e0a41f39f0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -56,6 +56,7 @@ struct dc_dmub_srv { union dmub_shared_state_ips_driver_signals driver_signals; bool idle_allowed; bool needs_idle_wake; + bool cursor_offload_enabled; }; bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv); @@ -325,10 +326,52 @@ bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t struct dmub_ips_residency_info *driver_info, enum ips_residency_mode ips_mode); +/** + * dc_dmub_srv_cursor_offload_init() - Enables or disables cursor offloading for a stream. + * + * @dc: pointer to DC object + */ +void dc_dmub_srv_cursor_offload_init(struct dc *dc); + +/** + * dc_dmub_srv_control_cursor_offload() - Enables or disables cursor offloading for a stream. + * + * @dc: pointer to DC object + * @context: the DC context to reference for pipe allocations + * @stream: the stream to control + * @enable: true to enable cursor offload, false to disable + */ +void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream, bool enable); + +/** + * dc_dmub_srv_program_cursor_now() - Requests immediate cursor programming for a given pipe. + * + * @dc: pointer to DC object + * @pipe: top-most pipe for a stream. + */ +void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe); + +/** + * dc_dmub_srv_is_cursor_offload_enabled() - Checks if cursor offload is supported. + * + * @dc: pointer to DC object + * + * Return: true if cursor offload is supported, false otherwise + */ +bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc); + /** * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required. * * @dc - pointer to DC object */ void dc_dmub_srv_release_hw(const struct dc *dc); + +/** + * dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info. + * + * @dc - pointer to DC object + */ +void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index 55704d4457ef..37d1a79e8241 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -147,6 +147,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->prefer_easf = false; else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2) spl_in->disable_easf = true; + else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 3) + spl_in->override_easf = true; /* Translate adaptive sharpening preference */ unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness; unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 76cf9fdedab0..321cfe92d799 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -473,12 +473,11 @@ void dc_enable_stereo( /* Triggers multi-stream synchronization. */ void dc_trigger_sync(struct dc *dc, struct dc_state *context); -enum surface_update_type dc_check_update_surfaces_for_stream( - struct dc *dc, +struct surface_update_descriptor dc_check_update_surfaces_for_stream( + const struct dc_check_config *check_config, struct dc_surface_update *updates, int surface_count, - struct dc_stream_update *stream_update, - const struct dc_stream_status *stream_status); + struct dc_stream_update *stream_update); /** * Create a new default stream for the requested sink @@ -492,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); void dc_stream_release(struct dc_stream_state *dc_stream); -struct dc_stream_status *dc_stream_get_status( - struct dc_stream_state *dc_stream); +struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream); +const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream); /******************************************************************************* * Cursor interfaces - To manages the cursor within a stream diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index b5aa03a3e39c..ea6b71c43d2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -185,6 +185,10 @@ struct dc_panel_patch { unsigned int wait_after_dpcd_poweroff_ms; }; +/** + * struct dc_edid_caps - Capabilities read from EDID. + * @analog: Whether the monitor is analog. Used by DVI-I handling. + */ struct dc_edid_caps { /* sink identification */ uint16_t manufacturer_id; @@ -212,6 +216,8 @@ struct dc_edid_caps { bool edid_hdmi; bool hdr_supported; bool rr_capable; + bool scdc_present; + bool analog; struct dc_panel_patch panel_patch; }; @@ -347,7 +353,8 @@ enum dc_connection_type { dc_connection_none, dc_connection_single, dc_connection_mst_branch, - dc_connection_sst_branch + dc_connection_sst_branch, + dc_connection_dac_load }; struct dc_csc_adjustments { diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c index 5999b2da3a01..33d8bd91cb01 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c @@ -148,7 +148,7 @@ struct dccg *dccg2_create( const struct dccg_shift *dccg_shift, const struct dccg_mask *dccg_mask) { - struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC); + struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL); struct dccg *base; if (dccg_dcn == NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h index a9b88f5e0c04..8bdffd9ff31b 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h @@ -425,7 +425,69 @@ struct dccg_mask { uint32_t SYMCLKD_CLOCK_ENABLE; \ uint32_t SYMCLKE_CLOCK_ENABLE; \ uint32_t DP_DTO_MODULO[MAX_PIPES]; \ - uint32_t DP_DTO_PHASE[MAX_PIPES] + uint32_t DP_DTO_PHASE[MAX_PIPES]; \ + uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \ + uint32_t DCCG_AUDIO_DTO0_MODULE; \ + uint32_t DCCG_AUDIO_DTO0_PHASE; \ + uint32_t DCCG_AUDIO_DTO1_MODULE; \ + uint32_t DCCG_AUDIO_DTO1_PHASE; \ + uint32_t DCCG_CAC_STATUS; \ + uint32_t DCCG_CAC_STATUS2; \ + uint32_t DCCG_DISP_CNTL_REG; \ + uint32_t DCCG_DS_CNTL; \ + uint32_t DCCG_DS_DTO_INCR; \ + uint32_t DCCG_DS_DTO_MODULO; \ + uint32_t DCCG_DS_HW_CAL_INTERVAL; \ + uint32_t DCCG_GTC_CNTL; \ + uint32_t DCCG_GTC_CURRENT; \ + uint32_t DCCG_GTC_DTO_INCR; \ + uint32_t DCCG_GTC_DTO_MODULO; \ + uint32_t DCCG_PERFMON_CNTL; \ + uint32_t DCCG_PERFMON_CNTL2; \ + uint32_t DCCG_SOFT_RESET; \ + uint32_t DCCG_TEST_CLK_SEL; \ + uint32_t DCCG_VSYNC_CNT_CTRL; \ + uint32_t DCCG_VSYNC_CNT_INT_CTRL; \ + uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \ + uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \ + uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DP_DTO_DBUF_EN; \ + uint32_t DPIACLK_540M_DTO_MODULO; \ + uint32_t DPIACLK_540M_DTO_PHASE; \ + uint32_t DPIACLK_810M_DTO_MODULO; \ + uint32_t DPIACLK_810M_DTO_PHASE; \ + uint32_t DPIACLK_DTO_CNTL; \ + uint32_t DPIASYMCLK_CNTL; \ + uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \ + uint32_t DPREFCLK_CNTL; \ + uint32_t DTBCLK_DTO_DBUF_EN; \ + uint32_t FORCE_SYMCLK_DISABLE; \ + uint32_t HDMICHARCLK0_CLOCK_CNTL; \ + uint32_t MICROSECOND_TIME_BASE_DIV; \ + uint32_t MILLISECOND_TIME_BASE_DIV; \ + uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG0_PIXEL_RATE_CNTL; \ + uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG1_PIXEL_RATE_CNTL; \ + uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG2_PIXEL_RATE_CNTL; \ + uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \ + uint32_t OTG3_PIXEL_RATE_CNTL; \ + uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \ + uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \ + uint32_t REFCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \ + uint32_t SYMCLK_PSP_CNTL + struct dccg_registers { DCCG_REG_VARIABLE_LIST; }; diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c index 8664f0c4c9b7..97df04b7e39d 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c @@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg, OTG_DROP_PIXEL[otg_inst], 1); } +void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL); + dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO); + dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE); + dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE); + dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE); + dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE); + dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE); + dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE); + dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS); + dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2); + dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG); + dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL); + dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR); + dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO); + dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL); + dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL); + dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2); + dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3); + dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4); + dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5); + dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6); + dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL); + dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL); + dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT); + dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR); + dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO); + dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL); + dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2); + dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET); + dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL); + dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL); + dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL); + dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE); + dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE); + dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL); + dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN); + dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]); + dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]); + dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]); + dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]); + dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]); + dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]); + dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]); + dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]); + dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO); + dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE); + dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO); + dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE); + dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL); + dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL); + dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL); + dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL); + dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]); + dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]); + dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]); + dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]); + dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL); + dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL); + dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL); + dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM); + dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM); + dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM); + dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM); + dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN); + dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]); + dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]); + dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]); + dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]); + dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]); + dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]); + dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]); + dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]); + dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL); + dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE); + dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL); + dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL); + dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM); + dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV); + dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV); + dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV); + dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL); + dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL); + dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL); + dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL); + dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL); + dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL); + dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL); + dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL); + dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL); + dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL); + dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL); + dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL); + dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG); + dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL); + dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL); + dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL); + dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE); + dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE); + dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE); + dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE); + dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE); +} + static const struct dccg_funcs dccg31_funcs = { .update_dpp_dto = dccg31_update_dpp_dto, .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, @@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = { .set_dispclk_change_mode = dccg31_set_dispclk_change_mode, .disable_dsc = dccg31_disable_dscclk, .enable_dsc = dccg31_enable_dscclk, + .dccg_read_reg_state = dccg31_read_reg_state, }; struct dccg *dccg31_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h index cd261051dc2c..bf659920d4cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h @@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst); void dccg31_enable_dscclk(struct dccg *dccg, int inst); +void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state); + #endif //__DCN31_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c index 8f6edd8e9beb..ef3db6beba25 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c @@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = { .get_pixel_rate_div = dccg314_get_pixel_rate_div, .trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync, .set_valid_pixel_rate = dccg314_set_valid_pixel_rate, - .set_dtbclk_p_src = dccg314_set_dtbclk_p_src + .set_dtbclk_p_src = dccg314_set_dtbclk_p_src, + .dccg_read_reg_state = dccg31_read_reg_state }; struct dccg *dccg314_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h index 60ea1d248deb..a609635f35db 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h @@ -74,8 +74,7 @@ SR(DCCG_GATE_DISABLE_CNTL3),\ SR(HDMISTREAMCLK0_DTO_PARAM),\ SR(OTG_PIXEL_RATE_DIV),\ - SR(DTBCLK_P_CNTL),\ - SR(DCCG_AUDIO_DTO_SOURCE) + SR(DTBCLK_P_CNTL) #define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c index de6d62401362..bd2f528137b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c @@ -1114,6 +1114,16 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg) if (dispclk_rdivider_value != 0) REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); } +static void dccg35_wait_for_dentist_change_done( + struct dccg *dccg) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL); + + REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000); +} static void dcn35_set_dppclk_enable(struct dccg *dccg, uint32_t dpp_inst, uint32_t enable) @@ -1174,9 +1184,9 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, dcn35_set_dppclk_enable(dccg, dpp_inst, true); } else { dcn35_set_dppclk_enable(dccg, dpp_inst, false); - /*we have this in hwss: disable_plane*/ - //dccg35_set_dppclk_rcg(dccg, dpp_inst, true); + dccg35_set_dppclk_rcg(dccg, dpp_inst, true); } + udelay(10); dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } @@ -1300,6 +1310,8 @@ static void dccg35_set_pixel_rate_div( BREAK_TO_DEBUGGER(); return; } + if (otg_inst < 4) + dccg35_wait_for_dentist_change_done(dccg); } static void dccg35_set_dtbclk_p_src( @@ -1411,7 +1423,7 @@ static void dccg35_set_dtbclk_dto( __func__, params->otg_inst, params->pixclk_khz, params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo); - } else { + } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) { switch (params->otg_inst) { case 0: REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0); @@ -1664,7 +1676,7 @@ static void dccg35_dpp_root_clock_control( { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - if (dccg->dpp_clock_gated[dpp_inst] == clock_on) + if (dccg->dpp_clock_gated[dpp_inst] != clock_on) return; if (clock_on) { @@ -1682,9 +1694,12 @@ static void dccg35_dpp_root_clock_control( DPPCLK0_DTO_PHASE, 0, DPPCLK0_DTO_MODULO, 1); /*we have this in hwss: disable_plane*/ - //dccg35_set_dppclk_rcg(dccg, dpp_inst, true); + dccg35_set_dppclk_rcg(dccg, dpp_inst, true); } + // wait for clock to fully ramp + udelay(10); + dccg->dpp_clock_gated[dpp_inst] = !clock_on; DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on); } @@ -2438,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = { .disable_symclk_se = dccg35_disable_symclk_se, .set_dtbclk_p_src = dccg35_set_dtbclk_p_src, .dccg_root_gate_disable_control = dccg35_root_gate_disable_control, + .dccg_read_reg_state = dccg31_read_reg_state, }; struct dccg *dccg35_create( diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h index 51f98c5c51c4..7b9c36456cd9 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h @@ -41,8 +41,9 @@ SR(SYMCLKA_CLOCK_ENABLE),\ SR(SYMCLKB_CLOCK_ENABLE),\ SR(SYMCLKC_CLOCK_ENABLE),\ - SR(SYMCLKD_CLOCK_ENABLE),\ - SR(SYMCLKE_CLOCK_ENABLE) + SR(SYMCLKD_CLOCK_ENABLE), \ + SR(SYMCLKE_CLOCK_ENABLE),\ + SR(SYMCLK_PSP_CNTL) #define DCCG_MASK_SH_LIST_DCN35(mask_sh) \ DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\ @@ -231,6 +232,14 @@ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\ + DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\ struct dccg *dccg35_create( struct dc_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index 0b8ed9b94d3c..663a18ee5162 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = { .enable_symclk_se = dccg401_enable_symclk_se, .disable_symclk_se = dccg401_disable_symclk_se, .set_dtbclk_p_src = dccg401_set_dtbclk_p_src, + .dccg_read_reg_state = dccg31_read_reg_state }; struct dccg *dccg401_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index a6006776333d..2dcf394edf22 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -283,7 +283,7 @@ struct abm *dce_abm_create( const struct dce_abm_shift *abm_shift, const struct dce_abm_mask *abm_mask) { - struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC); + struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL); if (abm_dce == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index a8e79104b684..5f8fba45d98d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -1126,7 +1126,7 @@ struct dmcu *dcn10_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); @@ -1147,7 +1147,7 @@ struct dmcu *dcn20_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); @@ -1168,7 +1168,7 @@ struct dmcu *dcn21_dmcu_create( const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_mask *dmcu_mask) { - struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC); + struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL); if (dmcu_dce == NULL) { BREAK_TO_DEBUGGER(); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 0c50fe266c8a..87dbb8d7ed27 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -302,6 +302,10 @@ static void setup_panel_mode( if (ctx->dc->caps.psp_setup_panel_mode) return; + /* The code below is only applicable to encoders with a digital transmitter. */ + if (enc110->base.transmitter == TRANSMITTER_UNKNOWN) + return; + ASSERT(REG(DP_DPHY_INTERNAL_CTRL)); value = REG_READ(DP_DPHY_INTERNAL_CTRL); @@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output( return true; } +static bool dce110_link_encoder_validate_rgb_output( + const struct dce110_link_encoder *enc110, + const struct dc_crtc_timing *crtc_timing) +{ + /* When the VBIOS doesn't specify any limits, use 400 MHz. + * The value comes from amdgpu_atombios_get_clock_info. + */ + uint32_t max_pixel_clock_khz = 400000; + + if (enc110->base.ctx->dc_bios->fw_info_valid && + enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) { + max_pixel_clock_khz = + enc110->base.ctx->dc_bios->fw_info.max_pixel_clock; + } + + if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10) + return false; + + if (crtc_timing->display_color_depth != COLOR_DEPTH_888) + return false; + + if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB) + return false; + + return true; +} + void dce110_link_encoder_construct( struct dce110_link_encoder *enc110, const struct encoder_init_data *init_data, @@ -824,6 +855,7 @@ void dce110_link_encoder_construct( enc110->base.connector = init_data->connector; enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; + enc110->base.analog_engine = init_data->analog_engine; enc110->base.features = *enc_features; @@ -847,6 +879,11 @@ void dce110_link_encoder_construct( SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; + if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII || + enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + enc110->base.analog_engine != ENGINE_ID_UNKNOWN) + enc110->base.output_signals |= SIGNAL_TYPE_RGB; + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer @@ -885,6 +922,13 @@ void dce110_link_encoder_construct( enc110->base.preferred_engine = ENGINE_ID_DIGG; break; default: + if (init_data->analog_engine != ENGINE_ID_UNKNOWN) { + /* The connector is analog-only, ie. VGA */ + enc110->base.preferred_engine = init_data->analog_engine; + enc110->base.output_signals = SIGNAL_TYPE_RGB; + enc110->base.transmitter = TRANSMITTER_UNKNOWN; + break; + } ASSERT_CRITICAL(false); enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } @@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream( is_valid = dce110_link_encoder_validate_dp_output( enc110, &stream->timing); break; + case SIGNAL_TYPE_RGB: + is_valid = dce110_link_encoder_validate_rgb_output( + enc110, &stream->timing); + break; case SIGNAL_TYPE_EDP: case SIGNAL_TYPE_LVDS: is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB; @@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init( cntl.coherent = false; cntl.hpd_sel = enc110->base.hpd_source; + /* The code below is only applicable to encoders with a digital transmitter. */ + if (enc110->base.transmitter == TRANSMITTER_UNKNOWN) + return; + if (enc110->base.connector.id == CONNECTOR_ID_EDP) cntl.signal = SIGNAL_TYPE_EDP; @@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup( /* DP MST */ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5); break; + case SIGNAL_TYPE_RGB: + break; default: ASSERT_CRITICAL(false); /* invalid mode ! */ @@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; + switch (enc->analog_engine) { + case ENGINE_ID_DACA: + REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0); + break; + case ENGINE_ID_DACB: + /* DACB doesn't seem to be present on DCE6+, + * although there are references to it in the register file. + */ + DC_LOG_ERROR("%s DACB is unsupported\n", __func__); + break; + default: + break; + } + + /* The code below only applies to connectors that support digital signals. */ + if (enc->transmitter == TRANSMITTER_UNKNOWN) + return; + if (!dce110_is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ return; @@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct( enc110->base.connector = init_data->connector; enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; + enc110->base.analog_engine = init_data->analog_engine; enc110->base.features = *enc_features; @@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct( SIGNAL_TYPE_EDP | SIGNAL_TYPE_HDMI_TYPE_A; + if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII || + enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + enc110->base.analog_engine != ENGINE_ID_UNKNOWN) + enc110->base.output_signals |= SIGNAL_TYPE_RGB; + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer @@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct( enc110->base.preferred_engine = ENGINE_ID_DIGG; break; default: + if (init_data->analog_engine != ENGINE_ID_UNKNOWN) { + /* The connector is analog-only, ie. VGA */ + enc110->base.preferred_engine = init_data->analog_engine; + enc110->base.output_signals = SIGNAL_TYPE_RGB; + enc110->base.transmitter = TRANSMITTER_UNKNOWN; + break; + } ASSERT_CRITICAL(false); enc110->base.preferred_engine = ENGINE_ID_UNKNOWN; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 261c70e01e33..c58b69bc319b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -101,18 +101,21 @@ SRI(DP_SEC_CNTL, DP, id), \ SRI(DP_VID_STREAM_CNTL, DP, id), \ SRI(DP_DPHY_FAST_TRAINING, DP, id), \ - SRI(DP_SEC_CNTL1, DP, id) + SRI(DP_SEC_CNTL1, DP, id), \ + SR(DAC_ENABLE) #endif #define LE_DCE80_REG_LIST(id)\ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ - LE_COMMON_REG_LIST_BASE(id) + LE_COMMON_REG_LIST_BASE(id), \ + SR(DAC_ENABLE) #define LE_DCE100_REG_LIST(id)\ LE_COMMON_REG_LIST_BASE(id), \ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ - SR(DCI_MEM_PWR_STATUS) + SR(DCI_MEM_PWR_STATUS), \ + SR(DAC_ENABLE) #define LE_DCE110_REG_LIST(id)\ LE_COMMON_REG_LIST_BASE(id), \ @@ -181,6 +184,9 @@ struct dce110_link_enc_registers { uint32_t DP_DPHY_BS_SR_SWAP_CNTL; uint32_t DP_DPHY_HBR2_PATTERN_CONTROL; uint32_t DP_SEC_CNTL1; + + /* DAC registers */ + uint32_t DAC_ENABLE; }; struct dce110_link_encoder { @@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output( enum signal_type signal, const struct dc_crtc_timing *crtc_timing); -bool dce110_link_encoder_validate_rgb_output( - const struct dce110_link_encoder *enc110, - const struct dc_crtc_timing *crtc_timing); - bool dce110_link_encoder_validate_dp_output( const struct dce110_link_encoder *enc110, const struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 1130d7619b26..f8996ee2856b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -1567,3 +1567,17 @@ void dce110_stream_encoder_construct( enc110->se_shift = se_shift; enc110->se_mask = se_mask; } + +static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {0}; + +void dce110_analog_stream_encoder_construct( + struct dce110_stream_encoder *enc110, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id) +{ + enc110->base.funcs = &dce110_an_str_enc_funcs; + enc110->base.ctx = ctx; + enc110->base.id = eng_id; + enc110->base.bp = bp; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h index cc5020a8e1e1..068de1392121 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h @@ -708,6 +708,11 @@ void dce110_stream_encoder_construct( const struct dce_stream_encoder_shift *se_shift, const struct dce_stream_encoder_mask *se_mask); +void dce110_analog_stream_encoder_construct( + struct dce110_stream_encoder *enc110, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id); void dce110_se_audio_mute_control( struct stream_encoder *enc, bool mute); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index d37ecfdde4f1..39f5fa73c43e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -61,27 +61,30 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, dc_dmub_srv_wait_for_inbox0_ack(dmub_srv); } -bool should_use_dmub_lock(struct dc_link *link) +bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link) { /* ASIC doesn't support DMUB */ - if (!link->ctx->dmub_srv) + if (!dc->ctx->dmub_srv) return false; - if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) - return true; + if (link) { - if (link->replay_settings.replay_feature_enabled) - return true; - - /* only use HW lock for PSR1 on single eDP */ - if (link->psr_settings.psr_version == DC_PSR_VERSION_1) { - struct dc_link *edp_links[MAX_NUM_EDP]; - int edp_num; - - dc_get_edp_links(link->dc, edp_links, &edp_num); - - if (edp_num == 1) + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true; + + if (link->replay_settings.replay_feature_enabled) + return true; + + /* only use HW lock for PSR1 on single eDP */ + if (link->psr_settings.psr_version == DC_PSR_VERSION_1) { + struct dc_link *edp_links[MAX_NUM_EDP]; + int edp_num; + + dc_get_edp_links(dc, edp_links, &edp_num); + + if (edp_num == 1) + return true; + } } return false; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h index 5a72b168fb4a..9f53d2ea5fa5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h @@ -37,6 +37,14 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv, void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_cmd_lock_hw hw_lock_cmd); -bool should_use_dmub_lock(struct dc_link *link); +/** + * should_use_dmub_inbox1_lock() - Checks if the DMCUB hardware lock via inbox1 should be used. + * + * @dc: pointer to DC object + * @link: optional pointer to the link object to check for enabled link features + * + * Return: true if the inbox1 lock should be used, false otherwise + */ +bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link); #endif /*_DMUB_HW_LOCK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile deleted file mode 100644 index 4c21ce42054c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ /dev/null @@ -1,141 +0,0 @@ -# SPDX-License-Identifier: MIT */ -# -# Copyright 2023 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# Authors: AMD -# -# Makefile for dml2. - -dml2_ccflags := $(CC_FLAGS_FPU) -dml2_rcflags := $(CC_FLAGS_NO_FPU) - -ifneq ($(CONFIG_FRAME_WARN),0) - ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) - ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy) - frame_warn_limit := 4096 - else - frame_warn_limit := 3072 - endif - else - frame_warn_limit := 2048 - endif - - ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y) - frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit) - endif -endif - -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2 -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/ -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc -subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/ - -CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags) - -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags) - -DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \ - dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \ - dml_display_rq_dlg_calc.o - -AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DML2) - -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags) - -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags) - -DML21 := src/dml2_top/dml2_top_interfaces.o -DML21 += src/dml2_top/dml2_top_soc15.o -DML21 += src/dml2_core/dml2_core_dcn4.o -DML21 += src/dml2_core/dml2_core_utils.o -DML21 += src/dml2_core/dml2_core_factory.o -DML21 += src/dml2_core/dml2_core_dcn4_calcs.o -DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o -DML21 += src/dml2_dpmm/dml2_dpmm_factory.o -DML21 += src/dml2_mcg/dml2_mcg_dcn4.o -DML21 += src/dml2_mcg/dml2_mcg_factory.o -DML21 += src/dml2_pmo/dml2_pmo_dcn3.o -DML21 += src/dml2_pmo/dml2_pmo_factory.o -DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o -DML21 += src/dml2_standalone_libraries/lib_float_math.o -DML21 += dml21_translation_helper.o -DML21 += dml21_wrapper.o -DML21 += dml21_utils.o - -AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21)) - -AMD_DISPLAY_FILES += $(AMD_DAL_DML21) - diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile new file mode 100644 index 000000000000..97e068b6bf6b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: MIT */ +# +# Copyright 2023 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Authors: AMD +# +# Makefile for dml2. + +dml2_ccflags := $(CC_FLAGS_FPU) +dml2_rcflags := $(CC_FLAGS_NO_FPU) + +ifneq ($(CONFIG_FRAME_WARN),0) + ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y) + ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy) + frame_warn_limit := 4096 + else + frame_warn_limit := 3072 + endif + else + frame_warn_limit := 2056 + endif + + ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y) + frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit) + endif +endif + +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0 +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/ +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/ + +CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags) + +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags) + +DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \ + dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \ + dml_display_rq_dlg_calc.o + +AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DML2) + +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags) + +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags) + +DML21 := src/dml2_top/dml2_top_interfaces.o +DML21 += src/dml2_top/dml2_top_soc15.o +DML21 += src/dml2_core/dml2_core_dcn4.o +DML21 += src/dml2_core/dml2_core_utils.o +DML21 += src/dml2_core/dml2_core_factory.o +DML21 += src/dml2_core/dml2_core_dcn4_calcs.o +DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o +DML21 += src/dml2_dpmm/dml2_dpmm_factory.o +DML21 += src/dml2_mcg/dml2_mcg_dcn4.o +DML21 += src/dml2_mcg/dml2_mcg_factory.o +DML21 += src/dml2_pmo/dml2_pmo_dcn3.o +DML21 += src/dml2_pmo/dml2_pmo_factory.o +DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o +DML21 += src/dml2_standalone_libraries/lib_float_math.o +DML21 += dml21_translation_helper.o +DML21 += dml21_wrapper.o +DML21 += dml21_utils.o + +AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DML21) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h similarity index 93% rename from drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h index e450445bc05d..b954c9648fbe 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h @@ -53,17 +53,17 @@ typedef const void *const_pvoid; typedef const char *const_pchar; typedef struct rgba_struct { - uint8 a; - uint8 r; - uint8 g; - uint8 b; + uint8 a; + uint8 r; + uint8 g; + uint8 b; } rgba_t; typedef struct { - uint8 blue; - uint8 green; - uint8 red; - uint8 alpha; + uint8 blue; + uint8 green; + uint8 red; + uint8 alpha; } gen_color_t; typedef union { @@ -87,7 +87,7 @@ typedef union { } uintfloat64; #ifndef UNREFERENCED_PARAMETER -#define UNREFERENCED_PARAMETER(x) x = x +#define UNREFERENCED_PARAMETER(x) (x = x) #endif #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c index 4b9b2e84d381..c468f492b876 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c @@ -10205,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe); } + #define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \ { \ dml_uint_t plane_idx; \ @@ -10333,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow); dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte); dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte); + diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h index dbeb08466092..3b1d92e7697f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h @@ -274,7 +274,6 @@ enum dml_clk_cfg_policy { dml_use_state_freq = 2 }; - struct soc_state_bounding_box_st { dml_float_t socclk_mhz; dml_float_t dscclk_mhz; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h similarity index 95% rename from drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h index 14d389525296..e574c81edf5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h @@ -52,7 +52,7 @@ #define __DML_VBA_DEBUG__ #define __DML_VBA_ENABLE_INLINE_CHECK_ 0 #define __DML_VBA_MIN_VSTARTUP__ 9 //config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); /* Populate stream, plane mappings and other fields in display config. */ - DC_FP_START(); result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); - DC_FP_END(); if (!result) return false; @@ -281,9 +279,7 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); mode_support->dml2_instance = dml_init->dml2_instance; - DC_FP_START(); dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); - DC_FP_END(); dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; DC_FP_START(); is_supported = dml2_check_mode_supported(mode_support); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h index 793e1c038efd..16a4f97bca4e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #ifndef __DML_DML_DCN4_SOC_BB__ #define __DML_DML_DCN4_SOC_BB__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h index 91955bbe24b8..8e5a30287220 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h @@ -46,7 +46,6 @@ struct dml2_display_dlg_regs { uint32_t dst_y_delta_drq_limit; uint32_t refcyc_per_vm_dmdata; uint32_t dmdata_dl_delta; - uint32_t dst_y_svp_drq_limit; // MRQ uint32_t refcyc_per_meta_chunk_vblank_l; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h similarity index 97% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h index e8dc6471c0be..13749c9fcf18 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h @@ -49,6 +49,11 @@ enum dml2_source_format_class { dml2_422_packed_12 = 18 }; +enum dml2_sample_positioning { + dml2_interstitial = 0, + dml2_cosited = 1 +}; + enum dml2_rotation_angle { dml2_rotation_0 = 0, dml2_rotation_90 = 1, @@ -222,7 +227,11 @@ struct dml2_composition_cfg { struct { bool enabled; + bool easf_enabled; + bool isharp_enabled; bool upsp_enabled; + enum dml2_sample_positioning upsp_sample_positioning; + unsigned int upsp_vtaps; struct { double h_ratio; double v_ratio; @@ -385,6 +394,7 @@ struct dml2_plane_parameters { // The actual reserved vblank time used for the corresponding stream in mode_programming would be at least as much as this per-plane override. long reserved_vblank_time_ns; unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay + unsigned int vactive_latency_to_hide_for_pstate_admissibility_us; unsigned int gpuvm_min_page_size_kbytes; unsigned int hostvm_min_page_size_kbytes; @@ -456,6 +466,7 @@ struct dml2_display_cfg { bool enable; bool value; } force_nom_det_size_kbytes; + bool mode_support_check_disable; bool mcache_admissibility_check_disable; bool surface_viewport_size_check_disable; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h similarity index 96% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h index 176f55947664..4a9a0d5a09b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h @@ -145,6 +145,8 @@ struct dml2_soc_bb { struct dml2_soc_vmin_clock_limits vmin_limit; double lower_bound_bandwidth_dchub; + double fraction_of_urgent_bandwidth_nominal_target; + double fraction_of_urgent_bandwidth_flip_target; unsigned int dprefclk_mhz; unsigned int xtalclk_mhz; unsigned int pcie_refclk_mhz; @@ -170,6 +172,7 @@ struct dml2_soc_bb { struct dml2_ip_capabilities { unsigned int pipe_count; unsigned int otg_count; + unsigned int TDLUT_33cube_count; unsigned int num_dsc; unsigned int max_num_dp2p0_streams; unsigned int max_num_hdmi_frl_outputs; @@ -188,7 +191,9 @@ struct dml2_ip_capabilities { unsigned int subvp_prefetch_end_to_mall_start_us; unsigned int subvp_fw_processing_delay; unsigned int max_vactive_det_fill_delay_us; - + unsigned int ppt_max_allow_delay_ns; + unsigned int temp_read_max_allow_delay_us; + unsigned int dummy_pstate_max_allow_delay_us; /* FAMS2 delays */ struct { unsigned int max_allow_delay_us; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h similarity index 98% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h index 41adb1104d0f..8646ce5f1c01 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h @@ -70,6 +70,8 @@ struct dml2_pmo_options { bool disable_dyn_odm; bool disable_dyn_odm_for_multi_stream; bool disable_dyn_odm_for_stream_with_svp; + struct dml2_pmo_pstate_strategy *override_strategy_lists[DML2_MAX_PLANES]; + unsigned int num_override_strategies_per_list[DML2_MAX_PLANES]; }; struct dml2_options { @@ -193,6 +195,14 @@ struct dml2_mcache_surface_allocation { } informative; }; +enum dml2_pstate_type { + dml2_pstate_type_uclk, + dml2_pstate_type_ppt, + dml2_pstate_type_temp_read, + dml2_pstate_type_dummy_pstate, + dml2_pstate_type_count +}; + enum dml2_pstate_method { dml2_pstate_method_na = 0, /* hw exclusive modes */ @@ -310,6 +320,7 @@ struct dml2_mode_support_info { bool NumberOfOTGSupport; bool NumberOfHDMIFRLSupport; bool NumberOfDP2p0Support; + bool NumberOfTDLUT33cubeSupport; bool WritebackScaleRatioAndTapsSupport; bool CursorSupport; bool PitchSupport; @@ -357,6 +368,8 @@ struct dml2_mode_support_info { unsigned int AlignedCPitch[DML2_MAX_PLANES]; bool g6_temp_read_support; bool temp_read_or_ppt_support; + bool qos_bandwidth_support; + bool dcfclk_support; }; // dml2_mode_support_info struct dml2_display_cfg_programming { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c index 6ee37386f672..eba948e187c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c @@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = { .writeback_interface_buffer_size_kbytes = 90, //Number of pipes after DCN Pipe harvesting .max_num_dpp = 4, + .max_num_opp = 4, .max_num_otg = 4, .max_num_wb = 1, .max_dchub_pscl_bw_pix_per_clk = 4, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index bf62d42b3f78..f809c4073b43 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -1303,6 +1303,7 @@ static double TruncToValidBPP( MinDSCBPP = 8; MaxDSCBPP = 16; } else { + if (Output == dml2_hdmi || Output == dml2_hdmifrl) { NonDSCBPP0 = 24; NonDSCBPP1 = 24; @@ -1320,6 +1321,7 @@ static double TruncToValidBPP( MaxDSCBPP = 16; } } + if (Output == dml2_dp2p0) { MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0; } else if (DSCEnable && Output == dml2_dp) { @@ -4047,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode, bool UseDSC, unsigned int NumberOfDSCSlices, unsigned int TotalNumberOfActiveDPP, + unsigned int TotalNumberOfActiveOPP, unsigned int MaxNumDPP, + unsigned int MaxNumOPP, double DISPCLKRequired, unsigned int NumberOfDPPRequired, unsigned int MaxHActiveForDSC, @@ -4063,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode, if (DISPCLKRequired > MaxDispclk) return false; - if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP) + if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP) return false; if (are_odm_segments_symmetrical) { if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle)) @@ -4109,7 +4113,9 @@ static noinline_for_stack void CalculateODMMode( double MaxDispclk, bool DSCEnable, unsigned int TotalNumberOfActiveDPP, + unsigned int TotalNumberOfActiveOPP, unsigned int MaxNumDPP, + unsigned int MaxNumOPP, double PixelClock, unsigned int NumberOfDSCSlices, @@ -4179,7 +4185,9 @@ static noinline_for_stack void CalculateODMMode( UseDSC, NumberOfDSCSlices, TotalNumberOfActiveDPP, + TotalNumberOfActiveOPP, MaxNumDPP, + MaxNumOPP, DISPCLKRequired, NumberOfDPPRequired, MaxHActiveForDSC, @@ -8358,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params); mode_lib->ms.TotalNumberOfActiveDPP = 0; + mode_lib->ms.TotalNumberOfActiveOPP = 0; mode_lib->ms.support.TotalAvailablePipesSupport = true; for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { @@ -8393,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.max_dispclk_freq_mhz, false, // DSCEnable mode_lib->ms.TotalNumberOfActiveDPP, + mode_lib->ms.TotalNumberOfActiveOPP, mode_lib->ip.max_num_dpp, + mode_lib->ip.max_num_opp, ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000), mode_lib->ms.support.NumberOfDSCSlices[k], @@ -8412,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out mode_lib->ms.max_dispclk_freq_mhz, true, // DSCEnable mode_lib->ms.TotalNumberOfActiveDPP, + mode_lib->ms.TotalNumberOfActiveOPP, mode_lib->ip.max_num_dpp, + mode_lib->ip.max_num_opp, ((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000), mode_lib->ms.support.NumberOfDSCSlices[k], @@ -8516,20 +8529,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 1; + mode_lib->ms.NoOfOPP[k] = 1; if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 4; + mode_lib->ms.NoOfOPP[k] = 4; } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 3; + mode_lib->ms.NoOfOPP[k] = 3; } else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 2; + mode_lib->ms.NoOfOPP[k] = 2; } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) { mode_lib->ms.MPCCombine[k] = true; mode_lib->ms.NoOfDPP[k] = 2; - mode_lib->ms.TotalNumberOfActiveDPP++; } else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) { mode_lib->ms.MPCCombine[k] = false; mode_lib->ms.NoOfDPP[k] = 1; @@ -8540,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) { mode_lib->ms.MPCCombine[k] = true; mode_lib->ms.NoOfDPP[k] = 2; - mode_lib->ms.TotalNumberOfActiveDPP++; } } #if defined(__DML_VBA_DEBUG__) @@ -8548,8 +8563,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out #endif } + mode_lib->ms.TotalNumberOfActiveDPP = 0; + mode_lib->ms.TotalNumberOfActiveOPP = 0; + for (k = 0; k < mode_lib->ms.num_active_planes; ++k) { + mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k]; + mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k]; + } if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp) mode_lib->ms.support.TotalAvailablePipesSupport = false; + if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp) + mode_lib->ms.support.TotalAvailablePipesSupport = false; mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0; @@ -12756,7 +12779,7 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna { const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index]; const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index]; - const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index]; + const struct dml2_pstate_meta *stream_pstate_meta = &display_cfg->stage3.stream_pstate_meta[plane_descriptor->stream_index]; struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base; union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state; @@ -12771,24 +12794,24 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* from display configuration */ base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total; base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total; - base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal - + base_programming->vblank_start = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch); - base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal - + base_programming->vblank_end = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch - stream_descriptor->timing.v_active); base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled; /* from meta */ base_programming->otg_vline_time_ns = - (unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0); - base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines; - base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines; - base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines; - base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal - + (unsigned int)(stream_pstate_meta->otg_vline_time_us * 1000.0); + base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_pstate_meta->scheduling_delay_otg_vlines; + base_programming->contention_delay_otg_vlines = (uint8_t)stream_pstate_meta->contention_delay_otg_vlines; + base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines; + base_programming->drr_keepout_otg_vline = (uint16_t)(stream_pstate_meta->nom_vtotal - stream_descriptor->timing.v_front_porch - - stream_fams2_meta->method_drr.programming_delay_otg_vlines); - base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines; - base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal; + stream_pstate_meta->method_drr.programming_delay_otg_vlines); + base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_pstate_meta->allow_to_target_delay_otg_vlines; + base_programming->max_vtotal = (uint16_t)stream_pstate_meta->max_vtotal; /* from core */ base_programming->config.bits.min_ttu_vblank_usable = true; @@ -12807,11 +12830,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* legacy vactive */ base_programming->type = FAMS2_STREAM_TYPE_VACTIVE; sub_programming->legacy.vactive_det_fill_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_vactive.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_vactive.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_vblank: @@ -12819,22 +12842,22 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna /* legacy vblank */ base_programming->type = FAMS2_STREAM_TYPE_VBLANK; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_vblank.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_vblank.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_fw_drr: /* drr */ base_programming->type = FAMS2_STREAM_TYPE_DRR; sub_programming->drr.programming_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_drr.programming_delay_otg_vlines; sub_programming->drr.nom_stretched_vtotal = - (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal; + (uint16_t)stream_pstate_meta->method_drr.stretched_vtotal; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_drr.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_drr.common.allow_end_otg_vline; /* drr only clamps to vtotal min for single display */ base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1; sub_programming->drr.only_stretch_if_required = true; @@ -12847,13 +12870,13 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna (uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0); sub_programming->subvp.vratio_denominator = 1000; sub_programming->subvp.programming_delay_otg_vlines = - (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_subvp.programming_delay_otg_vlines; sub_programming->subvp.prefetch_to_mall_otg_vlines = - (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines; + (uint8_t)stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines; sub_programming->subvp.phantom_vtotal = - (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal; + (uint16_t)stream_pstate_meta->method_subvp.phantom_vtotal; sub_programming->subvp.phantom_vactive = - (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive; + (uint16_t)stream_pstate_meta->method_subvp.phantom_vactive; sub_programming->subvp.config.bits.is_multi_planar = plane_descriptor->surface.plane1.height > 0; sub_programming->subvp.config.bits.is_yuv420 = @@ -12862,9 +12885,9 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna plane_descriptor->pixel_format == dml2_420_12; base_programming->allow_start_otg_vline = - (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline; + (uint16_t)stream_pstate_meta->method_subvp.common.allow_start_otg_vline; base_programming->allow_end_otg_vline = - (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline; + (uint16_t)stream_pstate_meta->method_subvp.common.allow_end_otg_vline; base_programming->config.bits.clamp_vtotal_min = true; break; case dml2_pstate_method_reserved_hw: @@ -13027,7 +13050,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported; out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support; out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport; + out->informative.mode_support_info.NumberOfTDLUT33cubeSupport = mode_lib->ms.support.NumberOfTDLUT33cubeSupport; out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport; + out->informative.mode_support_info.qos_bandwidth_support = mode_lib->ms.support.qos_bandwidth_support; + out->informative.mode_support_info.dcfclk_support = mode_lib->ms.support.dcfclk_support; for (k = 0; k < out->display_config.num_planes; k++) { diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c similarity index 96% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c index 640087e862f8..cc4f0663c6d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c @@ -15,6 +15,8 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance memset(out, 0, sizeof(struct dml2_core_instance)); + out->project_id = project_id; + switch (project_id) { case dml2_project_dcn4x_stage1: result = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h index ffb8c09f37a5..051c31ec2f0e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h @@ -36,7 +36,9 @@ struct dml2_core_ip_params { unsigned int max_line_buffer_lines; unsigned int writeback_interface_buffer_size_kbytes; unsigned int max_num_dpp; + unsigned int max_num_opp; unsigned int max_num_otg; + unsigned int TDLUT_33cube_count; unsigned int max_num_wb; unsigned int max_dchub_pscl_bw_pix_per_clk; unsigned int max_pscl_lb_bw_pix_per_clk; @@ -46,6 +48,7 @@ struct dml2_core_ip_params { double max_vscl_ratio; unsigned int max_hscl_taps; unsigned int max_vscl_taps; + unsigned int odm_combine_support_mask; unsigned int num_dsc; unsigned int maximum_dsc_bits_per_component; unsigned int maximum_pixels_per_line_per_dsc_unit; @@ -82,7 +85,6 @@ struct dml2_core_ip_params { unsigned int subvp_swath_height_margin_lines; unsigned int subvp_fw_processing_delay_us; unsigned int subvp_pstate_allow_width_us; - // MRQ bool dcn_mrq_present; unsigned int zero_size_buffer_entries; @@ -103,6 +105,8 @@ struct dml2_core_internal_DmlPipe { unsigned int DPPPerSurface; bool ScalerEnabled; bool UPSPEnabled; + unsigned int UPSPVTaps; + enum dml2_sample_positioning UPSPSamplePositioning; enum dml2_rotation_angle RotationAngle; bool mirrored; unsigned int ViewportHeight; @@ -230,6 +234,7 @@ struct dml2_core_internal_mode_support_info { bool MSOOrODMSplitWithNonDPLink; bool NotEnoughLanesForMSO; bool NumberOfOTGSupport; + bool NumberOfTDLUT33cubeSupport; bool NumberOfHDMIFRLSupport; bool NumberOfDP2p0Support; bool WritebackScaleRatioAndTapsSupport; @@ -566,6 +571,7 @@ struct dml2_core_internal_mode_support { enum dml2_odm_mode ODMMode[DML2_MAX_PLANES]; unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES]; unsigned int NoOfDPP[DML2_MAX_PLANES]; + unsigned int NoOfOPP[DML2_MAX_PLANES]; bool MPCCombine[DML2_MAX_PLANES]; double dcfclk_deepsleep; double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES]; @@ -576,6 +582,7 @@ struct dml2_core_internal_mode_support { bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES]; bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES]; unsigned int TotalNumberOfActiveDPP; + unsigned int TotalNumberOfActiveOPP; unsigned int TotalNumberOfSingleDPPSurfaces; unsigned int TotalNumberOfDCCActiveDPP; unsigned int Total3dlutActive; @@ -1306,7 +1313,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params { unsigned int HostVMMinPageSize; unsigned int DCCMetaBufferSizeBytes; bool mrq_present; - enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; + enum dml2_pstate_method *pstate_switch_modes; // Output bool *PTEBufferSizeNotExceeded; @@ -2308,6 +2315,7 @@ struct dml2_core_calcs_mode_support_ex { const struct dml2_display_cfg *in_display_cfg; const struct dml2_mcg_min_clock_table *min_clk_table; int min_clk_index; + enum dml2_project_id project_id; //unsigned int in_state_index; struct dml2_core_internal_mode_support_info *out_evaluation_info; }; @@ -2320,6 +2328,7 @@ struct dml2_core_calcs_mode_programming_ex { const struct dml2_mcg_min_clock_table *min_clk_table; const struct core_display_cfg_support_info *cfg_support_info; int min_clk_index; + enum dml2_project_id project_id; struct dml2_display_cfg_programming *programming; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h similarity index 97% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h index 02da6f45cbf7..f54fde8fba90 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h @@ -10,4 +10,4 @@ bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out); bool mcg_dcn4_unit_test(void); -#endif +#endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c similarity index 83% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c index d88b3e0082dd..5769c2638f9a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c @@ -642,6 +642,11 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) int i = 0; struct dml2_pmo_instance *pmo = in_out->instance; + unsigned int base_list_size = 0; + const struct dml2_pmo_pstate_strategy *base_list = NULL; + unsigned int *expanded_list_size = NULL; + struct dml2_pmo_pstate_strategy *expanded_list = NULL; + pmo->soc_bb = in_out->soc_bb; pmo->ip_caps = in_out->ip_caps; pmo->mpc_combine_limit = 2; @@ -656,53 +661,71 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out) pmo->options = in_out->options; /* generate permutations of p-state configs from base strategy list */ - for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) { - switch (i) { + for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) { + switch (i+1) { case 1: - DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_1_display; + base_list_size = base_strategy_list_1_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display; - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_1_display, - base_strategy_list_1_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 2: - DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_2_display; + base_list_size = base_strategy_list_2_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display; - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_2_display, - base_strategy_list_2_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 3: - DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_3_display; + base_list_size = base_strategy_list_3_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display; - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_3_display, - base_strategy_list_3_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; case 4: - DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) { + base_list = pmo->options->override_strategy_lists[i]; + base_list_size = pmo->options->num_override_strategies_per_list[i]; + } else { + base_list = base_strategy_list_4_display; + base_list_size = base_strategy_list_4_display_size; + } + + expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i]; + expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display; - /* populate list */ - pmo_dcn4_fams2_expand_base_pstate_strategies( - base_strategy_list_4_display, - base_strategy_list_4_display_size, - i, - pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display, - &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]); break; } + + DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES); + + /* populate list */ + pmo_dcn4_fams2_expand_base_pstate_strategies( + base_list, + base_list_size, + i + 1, + expanded_list, + expanded_list_size); } return true; @@ -1026,13 +1049,13 @@ static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo, return synchronizable; } -static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta) +static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta) { - return fams2_meta->contention_delay_otg_vlines + - fams2_meta->method_subvp.programming_delay_otg_vlines + - fams2_meta->method_subvp.phantom_vtotal + - fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + - fams2_meta->dram_clk_change_blackout_otg_vlines; + return pstate_meta->contention_delay_otg_vlines + + pstate_meta->method_subvp.programming_delay_otg_vlines + + pstate_meta->method_subvp.phantom_vtotal + + pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + + pstate_meta->blackout_otg_vlines; } static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo, @@ -1042,29 +1065,29 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo, unsigned int i; for (i = 0; i < DML2_MAX_PLANES; i++) { const struct dml2_stream_parameters *stream_descriptor; - const struct dml2_fams2_meta *stream_fams2_meta; + const struct dml2_pstate_meta *stream_pstate_meta; if (is_bit_set_in_bitfield(mask, i)) { stream_descriptor = &display_config->display_config.stream_descriptors[i]; - stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i]; + stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i]; if (!stream_descriptor->timing.drr_config.enabled) return false; /* cannot support required vtotal */ - if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) { + if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) { return false; } /* check rr is within bounds */ - if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min || - stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) { + if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min || + stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) { return false; } /* check required stretch is allowed */ if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 && - stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) { + stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) { return false; } } @@ -1079,7 +1102,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, { const struct dml2_stream_parameters *stream_descriptor; const struct dml2_plane_parameters *plane_descriptor; - const struct dml2_fams2_meta *stream_fams2_meta; + const struct dml2_pstate_meta *stream_pstate_meta; unsigned int microschedule_vlines; unsigned int i; unsigned int mcaches_per_plane; @@ -1124,13 +1147,13 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, for (i = 0; i < DML2_MAX_PLANES; i++) { if (is_bit_set_in_bitfield(mask, i)) { stream_descriptor = &display_config->display_config.stream_descriptors[i]; - stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i]; + stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i]; if (stream_descriptor->overrides.disable_subvp) { return false; } - microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]); + microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]); /* block if using an interlaced timing */ if (stream_descriptor->timing.interlaced) { @@ -1141,8 +1164,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo, * 2) refresh rate must be within the allowed bounds */ if (microschedule_vlines >= stream_descriptor->timing.v_active || - (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min || - stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) { + (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min || + stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) { return false; } } @@ -1232,43 +1255,43 @@ static bool all_planes_match_method(const struct display_configuation_with_meta } static void build_method_scheduling_params( - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta, - struct dml2_fams2_meta *stream_fams2_meta) + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta, + struct dml2_pstate_meta *stream_pstate_meta) { - stream_method_fams2_meta->allow_time_us = - (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) * - stream_fams2_meta->otg_vline_time_us; - if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) { + stream_method_pstate_meta->allow_time_us = + (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) * + stream_pstate_meta->otg_vline_time_us; + if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) { /* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/ - stream_method_fams2_meta->disallow_time_us = 0.0; + stream_method_pstate_meta->disallow_time_us = 0.0; } else { - stream_method_fams2_meta->disallow_time_us = - stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us; + stream_method_pstate_meta->disallow_time_us = + stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us; } } -static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta( +static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta( struct dml2_pmo_instance *pmo, enum dml2_pstate_method stream_pstate_method, int stream_idx) { - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL; + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL; switch (stream_pstate_method) { case dml2_pstate_method_vactive: case dml2_pstate_method_fw_vactive_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common; break; case dml2_pstate_method_vblank: case dml2_pstate_method_fw_vblank_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common; break; case dml2_pstate_method_fw_svp: case dml2_pstate_method_fw_svp_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common; break; case dml2_pstate_method_fw_drr: - stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common; + stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common; break; case dml2_pstate_method_reserved_hw: case dml2_pstate_method_reserved_fw: @@ -1277,10 +1300,10 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta( case dml2_pstate_method_count: case dml2_pstate_method_na: default: - stream_method_fams2_meta = NULL; + stream_method_pstate_meta = NULL; } - return stream_method_fams2_meta; + return stream_method_pstate_meta; } static bool is_timing_group_schedulable( @@ -1288,10 +1311,10 @@ static bool is_timing_group_schedulable( const struct display_configuation_with_meta *display_cfg, const struct dml2_pmo_pstate_strategy *pstate_strategy, const unsigned int timing_group_idx, - struct dml2_fams2_per_method_common_meta *group_fams2_meta) + struct dml2_pstate_per_method_common_meta *group_pstate_meta) { unsigned int i; - struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta; + struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta; unsigned int base_stream_idx = 0; struct dml2_pmo_scratch *s = &pmo->scratch; @@ -1305,31 +1328,31 @@ static bool is_timing_group_schedulable( } /* init allow start and end lines for timing group */ - stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx); - if (!stream_method_fams2_meta) + stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx); + if (!stream_method_pstate_meta) return false; - group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline; - group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline; - group_fams2_meta->period_us = stream_method_fams2_meta->period_us; + group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline; + group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline; + group_pstate_meta->period_us = stream_method_pstate_meta->period_us; for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) { if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) { - stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i); - if (!stream_method_fams2_meta) + stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i); + if (!stream_method_pstate_meta) continue; - if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) { + if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) { /* set group allow start to larger otg vline */ - group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline; + group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline; } - if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) { + if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) { /* set group allow end to smaller otg vline */ - group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline; + group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline; } /* check waveform still has positive width */ - if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) { + if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) { /* timing group is not schedulable */ return false; } @@ -1337,10 +1360,10 @@ static bool is_timing_group_schedulable( } /* calculate the rest of the meta */ - build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]); + build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]); - return group_fams2_meta->allow_time_us > 0.0 && - group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us; + return group_pstate_meta->allow_time_us > 0.0 && + group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us; } static bool is_config_schedulable( @@ -1354,7 +1377,7 @@ static bool is_config_schedulable( double max_allow_delay_us = 0.0; - memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta)); + memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta)); memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES); /* search for a general solution to the schedule */ @@ -1369,12 +1392,12 @@ static bool is_config_schedulable( for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) { s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i; s->pmo_dcn4.sorted_group_gtl_period_index[i] = i; - if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) { + if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) { /* synchronized timing group was not schedulable */ schedulable = false; break; } - max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us; + max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us; } if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) { @@ -1391,8 +1414,8 @@ static bool is_config_schedulable( bool swapped = false; for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) { - double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us; - double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us; + double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us; + double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us; if (j_disallow_us < jp1_disallow_us) { /* swap as A < B */ swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j], @@ -1410,19 +1433,19 @@ static bool is_config_schedulable( * other display, or when >2 streams continue to halve the remaining allow time. */ for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) { - if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) { + if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) { /* this timing group always allows */ continue; } - double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us; + double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us; for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) { unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j]; /* stream can't overlap itself */ - if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) { + if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) { max_allow_time_us = math_min2( - s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us, - (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2); + s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us, + (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2); if (max_allow_time_us < 0.0) { /* failed exit early */ @@ -1450,8 +1473,8 @@ static bool is_config_schedulable( bool swapped = false; for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) { - double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us; - double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us; + double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us; + double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us; if (j_period_us < jp1_period_us) { /* swap as A < B */ swap(s->pmo_dcn4.sorted_group_gtl_period_index[j], @@ -1470,7 +1493,7 @@ static bool is_config_schedulable( unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i]; unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1]; - if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us || + if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us || (s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) { schedulable = false; break; @@ -1492,18 +1515,18 @@ static bool is_config_schedulable( /* default period_0 > period_1 */ unsigned int lrg_idx = 0; unsigned int sml_idx = 1; - if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) { + if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) { /* period_0 < period_1 */ lrg_idx = 1; sml_idx = 0; } - period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us; - shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio)); - max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us; - max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us; + period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us; + shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio)); + max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us; + max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us; if (shift_per_period > 0.0 && - shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us && + shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us && max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) { schedulable = true; } @@ -1661,7 +1684,7 @@ static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_c return max_vactive_fill_us; } -static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, +static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo, struct display_configuation_with_meta *display_config, int stream_index) { @@ -1669,7 +1692,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index]; const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index]; const struct dml2_timing_cfg *timing = &stream_descriptor->timing; - struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index]; /* worst case all other streams require some programming at the same time, 0 if only 1 stream */ unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us + @@ -1677,142 +1700,142 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo, (display_config->display_config.num_streams - 1); /* common */ - stream_fams2_meta->valid = true; - stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0; - stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active; - stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / - (stream_fams2_meta->nom_vtotal * timing->h_total); - stream_fams2_meta->nom_frame_time_us = - (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us; - stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active; + stream_pstate_meta->valid = true; + stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0; + stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active; + stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / + (stream_pstate_meta->nom_vtotal * timing->h_total); + stream_pstate_meta->nom_frame_time_us = + (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us; + stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active; if (stream_descriptor->timing.drr_config.enabled == true) { if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) { - stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / + stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / ((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9); } else { /* assume min of 48Hz */ - stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / + stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz / (48000000.0 * stream_descriptor->timing.h_total) * 1e9); } } else { - stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal; + stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal; } - stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / - (stream_fams2_meta->max_vtotal * timing->h_total); - stream_fams2_meta->max_frame_time_us = - (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us; + stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 / + (stream_pstate_meta->max_vtotal * timing->h_total); + stream_pstate_meta->max_frame_time_us = + (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us; - stream_fams2_meta->scheduling_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->contention_delay_otg_vlines = - (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->scheduling_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->contention_delay_otg_vlines = + (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us); /* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */ - stream_fams2_meta->allow_to_target_delay_otg_vlines = - (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1; - stream_fams2_meta->min_allow_width_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->allow_to_target_delay_otg_vlines = + (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1; + stream_pstate_meta->min_allow_width_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us); /* this value should account for urgent latency */ - stream_fams2_meta->dram_clk_change_blackout_otg_vlines = + stream_pstate_meta->blackout_otg_vlines = (unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us / - stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->otg_vline_time_us); /* scheduling params should be built based on the worst case for allow_time:disallow_time */ /* vactive */ if (display_config->display_config.num_streams == 1) { /* for single stream, guarantee at least an instant of allow */ - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor( + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor( math_max2(0.0, - timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines)); + timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines)); } else { /* for multi stream, bound to a max fill time defined by IP caps */ - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = - (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us); + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = + (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us); } - stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us; + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us; - if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) { - stream_fams2_meta->method_vactive.common.allow_start_otg_vline = - timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; - stream_fams2_meta->method_vactive.common.allow_end_otg_vline = - stream_fams2_meta->vblank_start - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; + if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) { + stream_pstate_meta->method_vactive.common.allow_start_otg_vline = + timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines; + stream_pstate_meta->method_vactive.common.allow_end_otg_vline = + stream_pstate_meta->vblank_start - + stream_pstate_meta->blackout_otg_vlines; } else { - stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0; - stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0; + stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0; + stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0; } - stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta); + stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta); /* vblank */ - stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start; - stream_fams2_meta->method_vblank.common.allow_end_otg_vline = - stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1; - stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta); + stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start; + stream_pstate_meta->method_vblank.common.allow_end_otg_vline = + stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1; + stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta); /* subvp */ - stream_fams2_meta->method_subvp.programming_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_subvp.phantom_vactive = - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + + stream_pstate_meta->method_subvp.programming_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_subvp.phantom_vactive = + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + stream_info->phantom_min_v_active; - stream_fams2_meta->method_subvp.phantom_vfp = - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines; + stream_pstate_meta->method_subvp.phantom_vfp = + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines; /* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/ - stream_fams2_meta->method_subvp.phantom_vtotal = + stream_pstate_meta->method_subvp.phantom_vtotal = stream_info->phantom_v_startup + - stream_fams2_meta->method_subvp.phantom_vfp + + stream_pstate_meta->method_subvp.phantom_vfp + 1 + - stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines + - stream_fams2_meta->method_subvp.phantom_vactive; - stream_fams2_meta->method_subvp.common.allow_start_otg_vline = + stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines + + stream_pstate_meta->method_subvp.phantom_vactive; + stream_pstate_meta->method_subvp.common.allow_start_otg_vline = stream_descriptor->timing.v_blank_end + - stream_fams2_meta->contention_delay_otg_vlines + - stream_fams2_meta->method_subvp.programming_delay_otg_vlines + - stream_fams2_meta->method_subvp.phantom_vtotal + - stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + - stream_fams2_meta->allow_to_target_delay_otg_vlines; - stream_fams2_meta->method_subvp.common.allow_end_otg_vline = - stream_fams2_meta->vblank_start - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; - stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us; - build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta); + stream_pstate_meta->contention_delay_otg_vlines + + stream_pstate_meta->method_subvp.programming_delay_otg_vlines + + stream_pstate_meta->method_subvp.phantom_vtotal + + stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines + + stream_pstate_meta->allow_to_target_delay_otg_vlines; + stream_pstate_meta->method_subvp.common.allow_end_otg_vline = + stream_pstate_meta->vblank_start - + stream_pstate_meta->blackout_otg_vlines; + stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us; + build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta); /* drr */ - stream_fams2_meta->method_drr.programming_delay_otg_vlines = - (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us); - stream_fams2_meta->method_drr.common.allow_start_otg_vline = - stream_fams2_meta->vblank_start + - stream_fams2_meta->allow_to_target_delay_otg_vlines; - stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us; + stream_pstate_meta->method_drr.programming_delay_otg_vlines = + (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us); + stream_pstate_meta->method_drr.common.allow_start_otg_vline = + stream_pstate_meta->vblank_start + + stream_pstate_meta->allow_to_target_delay_otg_vlines; + stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us; if (display_config->display_config.num_streams <= 1) { /* only need to stretch vblank for blackout time */ - stream_fams2_meta->method_drr.stretched_vtotal = - stream_fams2_meta->nom_vtotal + - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; + stream_pstate_meta->method_drr.stretched_vtotal = + stream_pstate_meta->nom_vtotal + + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + + stream_pstate_meta->blackout_otg_vlines; } else { /* multi display needs to always be schedulable */ - stream_fams2_meta->method_drr.stretched_vtotal = - stream_fams2_meta->nom_vtotal * 2 + - stream_fams2_meta->allow_to_target_delay_otg_vlines + - stream_fams2_meta->min_allow_width_otg_vlines + - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; + stream_pstate_meta->method_drr.stretched_vtotal = + stream_pstate_meta->nom_vtotal * 2 + + stream_pstate_meta->allow_to_target_delay_otg_vlines + + stream_pstate_meta->min_allow_width_otg_vlines + + stream_pstate_meta->blackout_otg_vlines; } - stream_fams2_meta->method_drr.common.allow_end_otg_vline = - stream_fams2_meta->method_drr.stretched_vtotal - - stream_fams2_meta->dram_clk_change_blackout_otg_vlines; - build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta); + stream_pstate_meta->method_drr.common.allow_end_otg_vline = + stream_pstate_meta->method_drr.stretched_vtotal - + stream_pstate_meta->blackout_otg_vlines; + build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta); } static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo, @@ -1820,14 +1843,14 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo, int stream_index) { struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index]; - struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index]; stream_svp_meta->valid = true; /* PMO FAMS2 precaulcates these values */ - stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive; - stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp; - stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal; + stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive; + stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp; + stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal; } bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out) @@ -1879,7 +1902,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index); /* FAMS2 meta */ - build_fams2_meta_per_stream(pmo, display_config, stream_index); + build_pstate_meta_per_stream(pmo, display_config, stream_index); /* SVP meta */ build_subvp_meta_per_stream(pmo, display_config, stream_index); @@ -2077,7 +2100,7 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me if (!pmo->options->disable_vactive_det_fill_bw_pad) { display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us = - (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); + (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); } } } @@ -2098,7 +2121,7 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit if (!pmo->options->disable_vactive_det_fill_bw_pad) { display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us = - (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); + (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us); } } } @@ -2144,9 +2167,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_ /* copy FAMS2 meta */ if (success) { display_config->stage3.fams2_required = fams2_required; - memcpy(&display_config->stage3.stream_fams2_meta, - &scratch->pmo_dcn4.stream_fams2_meta, - sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES); + memcpy(&display_config->stage3.stream_pstate_meta, + &scratch->pmo_dcn4.stream_pstate_meta, + sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES); } return success; @@ -2188,12 +2211,12 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp return false; for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) { - struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index]; + struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index]; if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive || s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) { if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) || - get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) { + get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) { p_state_supported = false; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h similarity index 97% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h index 7218de1824cc..b90f6263cd85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h @@ -10,4 +10,4 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out); -#endif +#endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h similarity index 96% rename from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h index d52aa82283b3..9f562f0c4797 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h @@ -255,7 +255,7 @@ struct dml2_implicit_svp_meta { unsigned long v_front_porch; }; -struct dml2_fams2_per_method_common_meta { +struct dml2_pstate_per_method_common_meta { /* generic params */ unsigned int allow_start_otg_vline; unsigned int allow_end_otg_vline; @@ -265,7 +265,7 @@ struct dml2_fams2_per_method_common_meta { double period_us; }; -struct dml2_fams2_meta { +struct dml2_pstate_meta { bool valid; double otg_vline_time_us; unsigned int scheduling_delay_otg_vlines; @@ -280,14 +280,14 @@ struct dml2_fams2_meta { unsigned int max_vtotal; double min_refresh_rate_hz; double max_frame_time_us; - unsigned int dram_clk_change_blackout_otg_vlines; + unsigned int blackout_otg_vlines; struct { double max_vactive_det_fill_delay_us; unsigned int max_vactive_det_fill_delay_otg_vlines; - struct dml2_fams2_per_method_common_meta common; + struct dml2_pstate_per_method_common_meta common; } method_vactive; struct { - struct dml2_fams2_per_method_common_meta common; + struct dml2_pstate_per_method_common_meta common; } method_vblank; struct { unsigned int programming_delay_otg_vlines; @@ -296,15 +296,24 @@ struct dml2_fams2_meta { unsigned long phantom_vactive; unsigned long phantom_vfp; unsigned long phantom_vtotal; - struct dml2_fams2_per_method_common_meta common; + struct dml2_pstate_per_method_common_meta common; } method_subvp; struct { unsigned int programming_delay_otg_vlines; unsigned int stretched_vtotal; - struct dml2_fams2_per_method_common_meta common; + struct dml2_pstate_per_method_common_meta common; } method_drr; }; +/* mask of synchronized timings by stream index */ +struct dml2_pmo_synchronized_timing_groups { + unsigned int num_timing_groups; + unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES]; + bool group_is_drr_enabled[DML2_MAX_PLANES]; + bool group_is_drr_active[DML2_MAX_PLANES]; + double group_line_time_us[DML2_MAX_PLANES]; +}; + struct dml2_optimization_stage3_state { bool performed; bool success; @@ -319,7 +328,7 @@ struct dml2_optimization_stage3_state { // Meta-data for FAMS2 bool fams2_required; - struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES]; int min_clk_index_for_latency; }; @@ -472,6 +481,7 @@ struct dml2_core_scratch { }; struct dml2_core_instance { + enum dml2_project_id project_id; struct dml2_mcg_min_clock_table *minimum_clock_table; struct dml2_core_internal_state_inputs inputs; struct dml2_core_internal_state_intermediates intermediates; @@ -619,6 +629,12 @@ struct dml2_pmo_optimize_for_stutter_in_out { #define PMO_DCN4_MAX_NUM_VARIANTS 2 #define PMO_DCN4_MAX_BASE_STRATEGIES 10 +struct dml2_scheduling_check_locals { + struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES]; + unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES]; + unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES]; +}; + struct dml2_pmo_scratch { union { struct { @@ -648,7 +664,7 @@ struct dml2_pmo_scratch { // Stores all the implicit SVP meta information indexed by stream index of the display // configuration under inspection, built at optimization stage init struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES]; - struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES]; unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE]; unsigned int num_stutter_candidates; @@ -663,7 +679,7 @@ struct dml2_pmo_scratch { double group_line_time_us[DML2_MAX_PLANES]; /* scheduling check locals */ - struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES]; + struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES]; unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES]; unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES]; double group_phase_offset[DML2_MAX_PLANES]; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h index 140ec01545db..55b3e3ca54f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h @@ -23,7 +23,7 @@ * Authors: AMD * */ - + #ifndef __DML2_INTERNAL_TYPES_H__ #define __DML2_INTERNAL_TYPES_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c index c59f825cfae9..66040c877d68 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c @@ -24,6 +24,7 @@ * */ + #include "dml2_dc_types.h" #include "dml2_internal_types.h" #include "dml2_utils.h" diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c index 3b866e876bf4..d834cb595afa 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c @@ -301,6 +301,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0; break; + case dml_project_dcn401: out->pct_ideal_fabric_bw_after_urgent = 76; //67; out->max_avg_sdp_bw_use_normal_percent = 75; //80; @@ -424,6 +425,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, p->in_states->state_array[1].dcfclk_mhz = 1434.0; p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock; break; + + case dml_project_dcn401: p->in_states->num_states = 2; transactions_per_mem_clock = 16; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h index f7d30b47beff..d459f93cf40b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h @@ -31,3 +31,4 @@ */ #include "os_types.h" #include "cmntypes.h" + diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h similarity index 100% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h similarity index 99% rename from drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h rename to drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h index 2a2f84e07ca8..7fadbe6d7af4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h +++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h @@ -23,6 +23,7 @@ * Authors: AMD * */ + #ifndef __DML_LOGGING_H__ #define __DML_LOGGING_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c index 01480a04f85e..ce91e5d28956 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c @@ -199,6 +199,8 @@ void dpp_reset(struct dpp *dpp_base) memset(&dpp->scl_data, 0, sizeof(dpp->scl_data)); memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data)); + + dpp_base->cursor_offload = false; } @@ -484,10 +486,12 @@ void dpp1_set_cursor_position( cur_en = 0; /* not visible beyond top edge*/ if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) { - REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); - - dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + if (!dpp_base->cursor_offload) + REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); } + + dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en; } void dpp1_cnv_set_optional_cursor_attributes( @@ -497,8 +501,13 @@ void dpp1_cnv_set_optional_cursor_attributes( struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); if (attr) { - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); + } + + dpp_base->att.fp_scale_bias.bits.fp_bias = attr->bias; + dpp_base->att.fp_scale_bias.bits.fp_scale = attr->scale; } } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h index f466182963f7..b12f34345a58 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h @@ -1348,7 +1348,8 @@ struct dcn_dpp_mask { uint32_t CURSOR0_COLOR1; \ uint32_t DPP_CONTROL; \ uint32_t CM_HDR_MULT_COEF; \ - uint32_t CURSOR0_FP_SCALE_BIAS; + uint32_t CURSOR0_FP_SCALE_BIAS; \ + uint32_t OBUF_CONTROL; struct dcn_dpp_registers { DPP_COMMON_REG_VARIABLE_LIST @@ -1450,7 +1451,6 @@ void dpp1_set_degamma( void dpp1_set_degamma_pwl(struct dpp *dpp_base, const struct pwl_params *params); - void dpp_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 09be2a90cc79..ef4a16117181 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -84,6 +84,22 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) } } +void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + dpp_reg_state->recout_start = REG_READ(RECOUT_START); + dpp_reg_state->recout_size = REG_READ(RECOUT_SIZE); + dpp_reg_state->scl_horz_filter_scale_ratio = REG_READ(SCL_HORZ_FILTER_SCALE_RATIO); + dpp_reg_state->scl_vert_filter_scale_ratio = REG_READ(SCL_VERT_FILTER_SCALE_RATIO); + dpp_reg_state->scl_mode = REG_READ(SCL_MODE); + dpp_reg_state->cm_control = REG_READ(CM_CONTROL); + dpp_reg_state->dpp_control = REG_READ(DPP_CONTROL); + dpp_reg_state->dscl_control = REG_READ(DSCL_CONTROL); + dpp_reg_state->obuf_control = REG_READ(OBUF_CONTROL); + dpp_reg_state->mpc_size = REG_READ(MPC_SIZE); +} + /*program post scaler scs block in dpp CM*/ void dpp3_program_post_csc( struct dpp *dpp_base, @@ -396,17 +412,21 @@ void dpp3_set_cursor_attributes( } } - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); + if (!dpp_base->cursor_offload) + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); if (color_format == CURSOR_MODE_MONO) { /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); + + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } } dpp_base->att.cur0_ctl.bits.expansion_mode = 0; @@ -578,9 +598,6 @@ static void dpp3_power_on_blnd_lut( dpp_base->ctx->dc->optimized_required = true; dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; } - } else { - REG_SET(CM_MEM_PWR_CTRL, 0, - BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); } } diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index f236824126e9..d4a70b4379ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -594,6 +594,8 @@ void dpp3_program_CM_dealpha( void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); +void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state); + bool dpp3_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c index fa67e54bf94e..8a5aa5e86850 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c @@ -134,6 +134,7 @@ static struct dpp_funcs dcn32_dpp_funcs = { .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, + .dpp_read_reg_state = dpp30_read_reg_state, }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index f7a373a3d70a..977d83bf7741 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -95,6 +95,7 @@ void dpp35_program_bias_and_scale_fcnv( static struct dpp_funcs dcn35_dpp_funcs = { .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, .dpp_read_state = dpp30_read_state, + .dpp_read_reg_state = dpp30_read_reg_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 36187f890d5d..96c2c853de42 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -248,6 +248,7 @@ static struct dpp_funcs dcn401_dpp_funcs = { .set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes, .dpp_dppclk_control = dpp1_dppclk_control, .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_read_reg_state = dpp30_read_reg_state, .set_cursor_matrix = dpp401_set_cursor_matrix, }; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 7aab77b58869..62bf7cea21d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -103,17 +103,21 @@ void dpp401_set_cursor_attributes( } } - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); + if (!dpp_base->cursor_offload) + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); if (color_format == CURSOR_MODE_MONO) { /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); + + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } } dpp_base->att.cur0_ctl.bits.expansion_mode = 0; @@ -132,10 +136,12 @@ void dpp401_set_cursor_position( uint32_t cur_en = pos->enable ? 1 : 0; if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) { - REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); - - dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + if (!dpp_base->cursor_offload) + REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en); } + + dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; + dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en; } void dpp401_set_optional_cursor_attributes( @@ -145,10 +151,17 @@ void dpp401_set_optional_cursor_attributes( struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); if (attr) { - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale); + if (!dpp_base->cursor_offload) { + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale); + } + + dpp_base->att.fp_scale_bias_g_y.bits.fp_bias_g_y = attr->bias; + dpp_base->att.fp_scale_bias_g_y.bits.fp_scale_g_y = attr->scale; + dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb = attr->bias; + dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb = attr->scale; } } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c index 89f0d999bf35..242f1e6f0d8f 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c @@ -35,6 +35,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const static const struct dsc_funcs dcn20_dsc_funcs = { .dsc_get_enc_caps = dsc2_get_enc_caps, .dsc_read_state = dsc2_read_state, + .dsc_read_reg_state = dsc2_read_reg_state, .dsc_validate_stream = dsc2_validate_stream, .dsc_set_config = dsc2_set_config, .dsc_get_packed_pps = dsc2_get_packed_pps, @@ -155,6 +156,13 @@ void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); } +void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state) +{ + struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc); + + dccg_reg_state->dsc_top_control = REG_READ(DSC_TOP_CONTROL); + dccg_reg_state->dscc_interrupt_control_status = REG_READ(DSCC_INTERRUPT_CONTROL_STATUS); +} bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg) { @@ -407,7 +415,7 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; // Need to find the ceiling value for the slice width - dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; + dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dsc_padding + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; // TODO: in addition to validating slice height (pic height must be divisible by slice height), // see what happens when the same condition doesn't apply for slice_width/pic_width. dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h index a9c04fc95bd1..2337c3a97235 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h @@ -606,6 +606,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc, uint8_t *dsc_packed_pps); void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); +void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state); bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c index 6f4f5a3c4861..f9c6377ac66c 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c @@ -32,6 +32,7 @@ static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe); static const struct dsc_funcs dcn35_dsc_funcs = { .dsc_get_enc_caps = dsc2_get_enc_caps, .dsc_read_state = dsc2_read_state, + .dsc_read_reg_state = dsc2_read_reg_state, .dsc_validate_stream = dsc2_validate_stream, .dsc_set_config = dsc2_set_config, .dsc_get_packed_pps = dsc2_get_packed_pps, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c index 7bd92ae8b13e..c1bdbb38c690 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c @@ -26,6 +26,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = { .dsc_disconnect = dsc401_disconnect, .dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear, .dsc_get_single_enc_caps = dsc401_get_single_enc_caps, + .dsc_read_reg_state = dsc2_read_reg_state }; /* Macro definitios for REG_SET macros*/ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index b0bd1f9425b5..81c83d5fe042 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -41,6 +41,7 @@ struct dsc_config { enum dc_color_depth color_depth; /* Bits per component */ bool is_odm; struct dc_dsc_config dc_dsc_cfg; + uint32_t dsc_padding; }; @@ -65,6 +66,10 @@ struct dcn_dsc_state { uint32_t dsc_opp_source; }; +struct dcn_dsc_reg_state { + uint32_t dsc_top_control; + uint32_t dscc_interrupt_control_status; +}; /* DSC encoder capabilities * They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps. @@ -99,6 +104,7 @@ struct dsc_enc_caps { struct dsc_funcs { void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); + void (*dsc_read_reg_state)(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state); bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c index e7e5f6d4778e..181a93dc46e6 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c @@ -440,33 +440,15 @@ void hubbub3_init_watermarks(struct hubbub *hubbub) REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg); } -void hubbub3_get_det_sizes(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes) +void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - REG_GET_2(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, &curr_det_sizes[0], - DET0_SIZE, &target_det_sizes[0]); - - REG_GET_2(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, &curr_det_sizes[1], - DET1_SIZE, &target_det_sizes[1]); - - REG_GET_2(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, &curr_det_sizes[2], - DET2_SIZE, &target_det_sizes[2]); - - REG_GET_2(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, &curr_det_sizes[3], - DET3_SIZE, &target_det_sizes[3]); - -} - -uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t compbuf_config_error = 0; - - REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, - &compbuf_config_error); - - return compbuf_config_error; + hubbub_reg_state->det0_ctrl = REG_READ(DCHUBBUB_DET0_CTRL); + hubbub_reg_state->det1_ctrl = REG_READ(DCHUBBUB_DET1_CTRL); + hubbub_reg_state->det2_ctrl = REG_READ(DCHUBBUB_DET2_CTRL); + hubbub_reg_state->det3_ctrl = REG_READ(DCHUBBUB_DET3_CTRL); + hubbub_reg_state->compbuf_ctrl = REG_READ(DCHUBBUB_COMPBUF_CTRL); } static const struct hubbub_funcs hubbub30_funcs = { @@ -486,8 +468,7 @@ static const struct hubbub_funcs hubbub30_funcs = { .force_pstate_change_control = hubbub3_force_pstate_change_control, .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub3_construct(struct dcn20_hubbub *hubbub3, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h index 49a469969d36..9e14de3ccaee 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h @@ -133,10 +133,6 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub, void hubbub3_init_watermarks(struct hubbub *hubbub); -void hubbub3_get_det_sizes(struct hubbub *hubbub, - uint32_t *curr_det_sizes, - uint32_t *target_det_sizes); - -uint32_t hubbub3_compbuf_config_error(struct hubbub *hubbub); +void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state); #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c index cdb20251a154..d1aaa58b7db3 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c @@ -1071,8 +1071,7 @@ static const struct hubbub_funcs hubbub31_funcs = { .program_compbuf_size = dcn31_program_compbuf_size, .init_crb = dcn31_init_crb, .hubbub_read_state = hubbub2_read_state, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub31_construct(struct dcn20_hubbub *hubbub31, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c index 4d4ca6d77bbd..237331b35378 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c @@ -1037,8 +1037,7 @@ static const struct hubbub_funcs hubbub32_funcs = { .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .set_request_limit = hubbub32_set_request_limit, .get_mall_en = hubbub32_get_mall_en, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub32_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c index a443722a8632..1b7746a6549a 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c @@ -589,8 +589,7 @@ static const struct hubbub_funcs hubbub35_funcs = { .hubbub_read_state = hubbub2_read_state, .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, .dchubbub_init = hubbub35_init, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub35_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c index a36273a52880..d11afd1ce72a 100644 --- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c @@ -1247,8 +1247,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = { .program_compbuf_segments = dcn401_program_compbuf_segments, .wait_for_det_update = dcn401_wait_for_det_update, .program_arbiter = dcn401_program_arbiter, - .get_det_sizes = hubbub3_get_det_sizes, - .compbuf_config_error = hubbub3_compbuf_config_error, + .hubbub_read_reg_state = hubbub3_read_reg_state }; void hubbub401_construct(struct dcn20_hubbub *hubbub2, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c index 9b026600b90e..6378e3fd7249 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c @@ -550,6 +550,7 @@ void hubp_reset(struct hubp *hubp) { memset(&hubp->pos, 0, sizeof(hubp->pos)); memset(&hubp->att, 0, sizeof(hubp->att)); + hubp->cursor_offload = false; } void hubp1_program_surface_config( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h index cf2eb9793008..f2571076fc50 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h @@ -105,7 +105,9 @@ SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\ SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\ SRI(HUBP_CLK_CNTL, HUBP, id),\ - SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id) + SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id),\ + SRI(HUBP_MEASURE_WIN_CTRL_DCFCLK, HUBP, id),\ + SRI(HUBP_MEASURE_WIN_CTRL_DPPCLK, HUBP, id) /* Register address initialization macro for ASICs with VM */ #define HUBP_REG_LIST_DCN_VM(id)\ @@ -251,7 +253,19 @@ uint32_t CURSOR_HOT_SPOT; \ uint32_t CURSOR_DST_OFFSET; \ uint32_t HUBP_CLK_CNTL; \ - uint32_t HUBPRET_READ_LINE_VALUE + uint32_t HUBPRET_READ_LINE_VALUE; \ + uint32_t HUBP_MEASURE_WIN_CTRL_DCFCLK; \ + uint32_t HUBP_MEASURE_WIN_CTRL_DPPCLK; \ + uint32_t HUBPRET_INTERRUPT; \ + uint32_t HUBPRET_MEM_PWR_CTRL; \ + uint32_t HUBPRET_MEM_PWR_STATUS; \ + uint32_t HUBPRET_READ_LINE_CTRL0; \ + uint32_t HUBPRET_READ_LINE_CTRL1; \ + uint32_t HUBPRET_READ_LINE0; \ + uint32_t HUBPRET_READ_LINE1; \ + uint32_t HUBPREQ_MEM_PWR_CTRL; \ + uint32_t HUBPREQ_MEM_PWR_STATUS + #define HUBP_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -688,6 +702,123 @@ struct dcn_fl_regs_st { uint32_t lut_fl_mode; uint32_t lut_fl_format; }; +struct dcn_hubp_reg_state { + uint32_t hubp_cntl; + uint32_t mall_config; + uint32_t mall_sub_vp; + uint32_t hubp_req_size_config; + uint32_t hubp_req_size_config_c; + uint32_t vmpg_config; + uint32_t addr_config; + uint32_t pri_viewport_dimension; + uint32_t pri_viewport_dimension_c; + uint32_t pri_viewport_start; + uint32_t pri_viewport_start_c; + uint32_t sec_viewport_dimension; + uint32_t sec_viewport_dimension_c; + uint32_t sec_viewport_start; + uint32_t sec_viewport_start_c; + uint32_t surface_config; + uint32_t tiling_config; + uint32_t clk_cntl; + uint32_t mall_status; + uint32_t measure_win_ctrl_dcfclk; + uint32_t measure_win_ctrl_dppclk; + + uint32_t blank_offset_0; + uint32_t blank_offset_1; + uint32_t cursor_settings; + uint32_t dcn_cur0_ttu_cntl0; + uint32_t dcn_cur0_ttu_cntl1; + uint32_t dcn_cur1_ttu_cntl0; + uint32_t dcn_cur1_ttu_cntl1; + uint32_t dcn_dmdat_vm_cntl; + uint32_t dcn_expansion_mode; + uint32_t dcn_global_ttu_cntl; + uint32_t dcn_surf0_ttu_cntl0; + uint32_t dcn_surf0_ttu_cntl1; + uint32_t dcn_surf1_ttu_cntl0; + uint32_t dcn_surf1_ttu_cntl1; + uint32_t dcn_ttu_qos_wm; + uint32_t dcn_vm_mx_l1_tlb_cntl; + uint32_t dcn_vm_system_aperture_high_addr; + uint32_t dcn_vm_system_aperture_low_addr; + uint32_t dcsurf_flip_control; + uint32_t dcsurf_flip_control2; + uint32_t dcsurf_primary_meta_surface_address; + uint32_t dcsurf_primary_meta_surface_address_c; + uint32_t dcsurf_primary_meta_surface_address_high; + uint32_t dcsurf_primary_meta_surface_address_high_c; + uint32_t dcsurf_primary_surface_address; + uint32_t dcsurf_primary_surface_address_c; + uint32_t dcsurf_primary_surface_address_high; + uint32_t dcsurf_primary_surface_address_high_c; + uint32_t dcsurf_secondary_meta_surface_address; + uint32_t dcsurf_secondary_meta_surface_address_c; + uint32_t dcsurf_secondary_meta_surface_address_high; + uint32_t dcsurf_secondary_meta_surface_address_high_c; + uint32_t dcsurf_secondary_surface_address; + uint32_t dcsurf_secondary_surface_address_c; + uint32_t dcsurf_secondary_surface_address_high; + uint32_t dcsurf_secondary_surface_address_high_c; + uint32_t dcsurf_surface_control; + uint32_t dcsurf_surface_earliest_inuse; + uint32_t dcsurf_surface_earliest_inuse_c; + uint32_t dcsurf_surface_earliest_inuse_high; + uint32_t dcsurf_surface_earliest_inuse_high_c; + uint32_t dcsurf_surface_flip_interrupt; + uint32_t dcsurf_surface_inuse; + uint32_t dcsurf_surface_inuse_c; + uint32_t dcsurf_surface_inuse_high; + uint32_t dcsurf_surface_inuse_high_c; + uint32_t dcsurf_surface_pitch; + uint32_t dcsurf_surface_pitch_c; + uint32_t dst_after_scaler; + uint32_t dst_dimensions; + uint32_t dst_y_delta_drq_limit; + uint32_t flip_parameters_0; + uint32_t flip_parameters_1; + uint32_t flip_parameters_2; + uint32_t flip_parameters_3; + uint32_t flip_parameters_4; + uint32_t flip_parameters_5; + uint32_t flip_parameters_6; + uint32_t hubpreq_mem_pwr_ctrl; + uint32_t hubpreq_mem_pwr_status; + uint32_t nom_parameters_0; + uint32_t nom_parameters_1; + uint32_t nom_parameters_2; + uint32_t nom_parameters_3; + uint32_t nom_parameters_4; + uint32_t nom_parameters_5; + uint32_t nom_parameters_6; + uint32_t nom_parameters_7; + uint32_t per_line_delivery; + uint32_t per_line_delivery_pre; + uint32_t prefetch_settings; + uint32_t prefetch_settings_c; + uint32_t ref_freq_to_pix_freq; + uint32_t uclk_pstate_force; + uint32_t vblank_parameters_0; + uint32_t vblank_parameters_1; + uint32_t vblank_parameters_2; + uint32_t vblank_parameters_3; + uint32_t vblank_parameters_4; + uint32_t vblank_parameters_5; + uint32_t vblank_parameters_6; + uint32_t vmid_settings_0; + + uint32_t hubpret_control; + uint32_t hubpret_interrupt; + uint32_t hubpret_mem_pwr_ctrl; + uint32_t hubpret_mem_pwr_status; + uint32_t hubpret_read_line_ctrl0; + uint32_t hubpret_read_line_ctrl1; + uint32_t hubpret_read_line_status; + uint32_t hubpret_read_line_value; + uint32_t hubpret_read_line0; + uint32_t hubpret_read_line1; +}; struct dcn_hubp_state { struct _vcs_dpi_display_dlg_regs_st dlg_attr; @@ -718,7 +849,6 @@ struct dcn_hubp_state { uint32_t hubp_cntl; uint32_t flip_control; }; - struct dcn10_hubp { struct hubp base; struct dcn_hubp_state state; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c index 91259b896e03..92288de4cc10 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c @@ -613,26 +613,28 @@ void hubp2_cursor_set_attributes( hubp->curs_attr = *attr; - REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, - CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); - REG_UPDATE(CURSOR_SURFACE_ADDRESS, - CURSOR_SURFACE_ADDRESS, attr->address.low_part); + if (!hubp->cursor_offload) { + REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, + CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); + REG_UPDATE(CURSOR_SURFACE_ADDRESS, + CURSOR_SURFACE_ADDRESS, attr->address.low_part); - REG_UPDATE_2(CURSOR_SIZE, - CURSOR_WIDTH, attr->width, - CURSOR_HEIGHT, attr->height); + REG_UPDATE_2(CURSOR_SIZE, + CURSOR_WIDTH, attr->width, + CURSOR_HEIGHT, attr->height); - REG_UPDATE_4(CURSOR_CONTROL, - CURSOR_MODE, attr->color_format, - CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, - CURSOR_PITCH, hw_pitch, - CURSOR_LINES_PER_CHUNK, lpc); + REG_UPDATE_4(CURSOR_CONTROL, + CURSOR_MODE, attr->color_format, + CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, + CURSOR_PITCH, hw_pitch, + CURSOR_LINES_PER_CHUNK, lpc); - REG_SET_2(CURSOR_SETTINGS, 0, - /* no shift of the cursor HDL schedule */ - CURSOR0_DST_Y_OFFSET, 0, - /* used to shift the cursor chunk request deadline */ - CURSOR0_CHUNK_HDL_ADJUST, 3); + REG_SET_2(CURSOR_SETTINGS, 0, + /* no shift of the cursor HDL schedule */ + CURSOR0_DST_Y_OFFSET, 0, + /* used to shift the cursor chunk request deadline */ + CURSOR0_CHUNK_HDL_ADJUST, 3); + } hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part; hubp->att.SURFACE_ADDR = attr->address.low_part; @@ -1059,23 +1061,28 @@ void hubp2_cursor_set_position( cur_en = 0; /* not visible beyond top edge*/ if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) { - if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) + bool cursor_not_programmed = hubp->att.SURFACE_ADDR == 0 && hubp->att.SURFACE_ADDR_HIGH == 0; + + if (cur_en && cursor_not_programmed) hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr); - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, cur_en); + if (!hubp->cursor_offload) + REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en); } - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, pos->x, - CURSOR_Y_POSITION, pos->y); + if (!hubp->cursor_offload) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, pos->x, + CURSOR_Y_POSITION, pos->y); - REG_SET_2(CURSOR_HOT_SPOT, 0, - CURSOR_HOT_SPOT_X, pos->x_hotspot, - CURSOR_HOT_SPOT_Y, pos->y_hotspot); + REG_SET_2(CURSOR_HOT_SPOT, 0, + CURSOR_HOT_SPOT_X, pos->x_hotspot, + CURSOR_HOT_SPOT_Y, pos->y_hotspot); + + REG_SET(CURSOR_DST_OFFSET, 0, + CURSOR_DST_X_OFFSET, dst_x_offset); + } - REG_SET(CURSOR_DST_OFFSET, 0, - CURSOR_DST_X_OFFSET, dst_x_offset); /* TODO Handle surface pixel formats other than 4:4:4 */ /* Cursor Position Register Config */ hubp->pos.cur_ctl.bits.cur_enable = cur_en; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h index f325db555102..7062e6653062 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h @@ -145,7 +145,8 @@ uint32_t FLIP_PARAMETERS_2;\ uint32_t DCN_CUR1_TTU_CNTL0;\ uint32_t DCN_CUR1_TTU_CNTL1;\ - uint32_t VMID_SETTINGS_0 + uint32_t VMID_SETTINGS_0;\ + uint32_t DST_Y_DELTA_DRQ_LIMIT /*shared with dcn3.x*/ #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ @@ -176,7 +177,10 @@ uint32_t HUBP_3DLUT_CONTROL;\ uint32_t HUBP_3DLUT_DLG_PARAM;\ uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\ - uint32_t DCHUBP_MCACHEID_CONFIG + uint32_t DCHUBP_MCACHEID_CONFIG;\ + uint32_t DCHUBP_MALL_SUB_VP;\ + uint32_t DCHUBP_ADDR_CONFIG;\ + uint32_t HUBP_MALL_STATUS #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \ DCN_HUBP_REG_FIELD_BASE_LIST(type); \ diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c index e2740482e1cf..08ea0a1b9e7f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c @@ -73,8 +73,6 @@ * On any mode switch, if the new reg values are smaller than the current values, * then update the regs with the new values. * - * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142 - * */ void apply_DEDCN21_142_wa_for_hostvm_deadline( struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c index 556214b2227d..0cc6f4558989 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c @@ -476,6 +476,126 @@ void hubp3_read_state(struct hubp *hubp) } +void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + reg_state->hubp_cntl = REG_READ(DCHUBP_CNTL); + reg_state->mall_config = REG_READ(DCHUBP_MALL_CONFIG); + reg_state->mall_sub_vp = REG_READ(DCHUBP_MALL_SUB_VP); + reg_state->hubp_req_size_config = REG_READ(DCHUBP_REQ_SIZE_CONFIG); + reg_state->hubp_req_size_config_c = REG_READ(DCHUBP_REQ_SIZE_CONFIG_C); + reg_state->vmpg_config = REG_READ(DCHUBP_VMPG_CONFIG); + reg_state->addr_config = REG_READ(DCSURF_ADDR_CONFIG); + reg_state->pri_viewport_dimension = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION); + reg_state->pri_viewport_dimension_c = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION_C); + reg_state->pri_viewport_start = REG_READ(DCSURF_PRI_VIEWPORT_START); + reg_state->pri_viewport_start_c = REG_READ(DCSURF_PRI_VIEWPORT_START_C); + reg_state->sec_viewport_dimension = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION); + reg_state->sec_viewport_dimension_c = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION_C); + reg_state->sec_viewport_start = REG_READ(DCSURF_SEC_VIEWPORT_START); + reg_state->sec_viewport_start_c = REG_READ(DCSURF_SEC_VIEWPORT_START_C); + reg_state->surface_config = REG_READ(DCSURF_SURFACE_CONFIG); + reg_state->tiling_config = REG_READ(DCSURF_TILING_CONFIG); + reg_state->clk_cntl = REG_READ(HUBP_CLK_CNTL); + reg_state->mall_status = REG_READ(HUBP_MALL_STATUS); + reg_state->measure_win_ctrl_dcfclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DCFCLK); + reg_state->measure_win_ctrl_dppclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DPPCLK); + + reg_state->blank_offset_0 = REG_READ(BLANK_OFFSET_0); + reg_state->blank_offset_1 = REG_READ(BLANK_OFFSET_1); + reg_state->cursor_settings = REG_READ(CURSOR_SETTINGS); + reg_state->dcn_cur0_ttu_cntl0 = REG_READ(DCN_CUR0_TTU_CNTL0); + reg_state->dcn_cur0_ttu_cntl1 = REG_READ(DCN_CUR0_TTU_CNTL1); + reg_state->dcn_cur1_ttu_cntl0 = REG_READ(DCN_CUR1_TTU_CNTL0); + reg_state->dcn_cur1_ttu_cntl1 = REG_READ(DCN_CUR1_TTU_CNTL1); + reg_state->dcn_dmdat_vm_cntl = REG_READ(DCN_DMDATA_VM_CNTL); + reg_state->dcn_expansion_mode = REG_READ(DCN_EXPANSION_MODE); + reg_state->dcn_global_ttu_cntl = REG_READ(DCN_GLOBAL_TTU_CNTL); + reg_state->dcn_surf0_ttu_cntl0 = REG_READ(DCN_SURF0_TTU_CNTL0); + reg_state->dcn_surf0_ttu_cntl1 = REG_READ(DCN_SURF0_TTU_CNTL1); + reg_state->dcn_surf1_ttu_cntl0 = REG_READ(DCN_SURF1_TTU_CNTL0); + reg_state->dcn_surf1_ttu_cntl1 = REG_READ(DCN_SURF1_TTU_CNTL1); + reg_state->dcn_ttu_qos_wm = REG_READ(DCN_TTU_QOS_WM); + reg_state->dcn_vm_mx_l1_tlb_cntl = REG_READ(DCN_VM_MX_L1_TLB_CNTL); + reg_state->dcn_vm_system_aperture_high_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR); + reg_state->dcn_vm_system_aperture_low_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_LOW_ADDR); + reg_state->dcsurf_flip_control = REG_READ(DCSURF_FLIP_CONTROL); + reg_state->dcsurf_flip_control2 = REG_READ(DCSURF_FLIP_CONTROL2); + reg_state->dcsurf_primary_meta_surface_address = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS); + reg_state->dcsurf_primary_meta_surface_address_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C); + reg_state->dcsurf_primary_meta_surface_address_high = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_primary_meta_surface_address_high_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_primary_surface_address = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS); + reg_state->dcsurf_primary_surface_address_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_C); + reg_state->dcsurf_primary_surface_address_high = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_primary_surface_address_high_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_secondary_meta_surface_address = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS); + reg_state->dcsurf_secondary_meta_surface_address_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C); + reg_state->dcsurf_secondary_meta_surface_address_high = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_secondary_meta_surface_address_high_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_secondary_surface_address = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS); + reg_state->dcsurf_secondary_surface_address_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_C); + reg_state->dcsurf_secondary_surface_address_high = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH); + reg_state->dcsurf_secondary_surface_address_high_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C); + reg_state->dcsurf_surface_control = REG_READ(DCSURF_SURFACE_CONTROL); + reg_state->dcsurf_surface_earliest_inuse = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE); + reg_state->dcsurf_surface_earliest_inuse_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_C); + reg_state->dcsurf_surface_earliest_inuse_high = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH); + reg_state->dcsurf_surface_earliest_inuse_high_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C); + reg_state->dcsurf_surface_flip_interrupt = REG_READ(DCSURF_SURFACE_FLIP_INTERRUPT); + reg_state->dcsurf_surface_inuse = REG_READ(DCSURF_SURFACE_INUSE); + reg_state->dcsurf_surface_inuse_c = REG_READ(DCSURF_SURFACE_INUSE_C); + reg_state->dcsurf_surface_inuse_high = REG_READ(DCSURF_SURFACE_INUSE_HIGH); + reg_state->dcsurf_surface_inuse_high_c = REG_READ(DCSURF_SURFACE_INUSE_HIGH_C); + reg_state->dcsurf_surface_pitch = REG_READ(DCSURF_SURFACE_PITCH); + reg_state->dcsurf_surface_pitch_c = REG_READ(DCSURF_SURFACE_PITCH_C); + reg_state->dst_after_scaler = REG_READ(DST_AFTER_SCALER); + reg_state->dst_dimensions = REG_READ(DST_DIMENSIONS); + reg_state->dst_y_delta_drq_limit = REG_READ(DST_Y_DELTA_DRQ_LIMIT); + reg_state->flip_parameters_0 = REG_READ(FLIP_PARAMETERS_0); + reg_state->flip_parameters_1 = REG_READ(FLIP_PARAMETERS_1); + reg_state->flip_parameters_2 = REG_READ(FLIP_PARAMETERS_2); + reg_state->flip_parameters_3 = REG_READ(FLIP_PARAMETERS_3); + reg_state->flip_parameters_4 = REG_READ(FLIP_PARAMETERS_4); + reg_state->flip_parameters_5 = REG_READ(FLIP_PARAMETERS_5); + reg_state->flip_parameters_6 = REG_READ(FLIP_PARAMETERS_6); + reg_state->hubpreq_mem_pwr_ctrl = REG_READ(HUBPREQ_MEM_PWR_CTRL); + reg_state->hubpreq_mem_pwr_status = REG_READ(HUBPREQ_MEM_PWR_STATUS); + reg_state->nom_parameters_0 = REG_READ(NOM_PARAMETERS_0); + reg_state->nom_parameters_1 = REG_READ(NOM_PARAMETERS_1); + reg_state->nom_parameters_2 = REG_READ(NOM_PARAMETERS_2); + reg_state->nom_parameters_3 = REG_READ(NOM_PARAMETERS_3); + reg_state->nom_parameters_4 = REG_READ(NOM_PARAMETERS_4); + reg_state->nom_parameters_5 = REG_READ(NOM_PARAMETERS_5); + reg_state->nom_parameters_6 = REG_READ(NOM_PARAMETERS_6); + reg_state->nom_parameters_7 = REG_READ(NOM_PARAMETERS_7); + reg_state->per_line_delivery = REG_READ(PER_LINE_DELIVERY); + reg_state->per_line_delivery_pre = REG_READ(PER_LINE_DELIVERY_PRE); + reg_state->prefetch_settings = REG_READ(PREFETCH_SETTINGS); + reg_state->prefetch_settings_c = REG_READ(PREFETCH_SETTINGS_C); + reg_state->ref_freq_to_pix_freq = REG_READ(REF_FREQ_TO_PIX_FREQ); + reg_state->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE); + reg_state->vblank_parameters_0 = REG_READ(VBLANK_PARAMETERS_0); + reg_state->vblank_parameters_1 = REG_READ(VBLANK_PARAMETERS_1); + reg_state->vblank_parameters_2 = REG_READ(VBLANK_PARAMETERS_2); + reg_state->vblank_parameters_3 = REG_READ(VBLANK_PARAMETERS_3); + reg_state->vblank_parameters_4 = REG_READ(VBLANK_PARAMETERS_4); + reg_state->vblank_parameters_5 = REG_READ(VBLANK_PARAMETERS_5); + reg_state->vblank_parameters_6 = REG_READ(VBLANK_PARAMETERS_6); + reg_state->vmid_settings_0 = REG_READ(VMID_SETTINGS_0); + reg_state->hubpret_control = REG_READ(HUBPRET_CONTROL); + reg_state->hubpret_interrupt = REG_READ(HUBPRET_INTERRUPT); + reg_state->hubpret_mem_pwr_ctrl = REG_READ(HUBPRET_MEM_PWR_CTRL); + reg_state->hubpret_mem_pwr_status = REG_READ(HUBPRET_MEM_PWR_STATUS); + reg_state->hubpret_read_line_ctrl0 = REG_READ(HUBPRET_READ_LINE_CTRL0); + reg_state->hubpret_read_line_ctrl1 = REG_READ(HUBPRET_READ_LINE_CTRL1); + reg_state->hubpret_read_line_status = REG_READ(HUBPRET_READ_LINE_STATUS); + reg_state->hubpret_read_line_value = REG_READ(HUBPRET_READ_LINE_VALUE); + reg_state->hubpret_read_line0 = REG_READ(HUBPRET_READ_LINE0); + reg_state->hubpret_read_line1 = REG_READ(HUBPRET_READ_LINE1); +} + void hubp3_setup( struct hubp *hubp, struct _vcs_dpi_display_dlg_regs_st *dlg_attr, @@ -505,30 +625,6 @@ void hubp3_init(struct hubp *hubp) hubp_reset(hubp); } -uint32_t hubp3_get_current_read_line(struct hubp *hubp) -{ - uint32_t read_line = 0; - struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - - REG_GET(HUBPRET_READ_LINE_VALUE, - PIPE_READ_LINE, - &read_line); - - return read_line; -} - -unsigned int hubp3_get_underflow_status(struct hubp *hubp) -{ - uint32_t hubp_underflow = 0; - struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - - REG_GET(DCHUBP_CNTL, - HUBP_UNDERFLOW_STATUS, - &hubp_underflow); - - return hubp_underflow; -} - static struct hubp_funcs dcn30_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, @@ -558,8 +654,7 @@ static struct hubp_funcs dcn30_hubp_funcs = { .hubp_soft_reset = hubp1_soft_reset, .hubp_set_flip_int = hubp1_set_flip_int, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp3_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h index 842f4eb72cc8..c767e9f4f9b3 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h @@ -296,6 +296,8 @@ void hubp3_dmdata_set_attributes( void hubp3_read_state(struct hubp *hubp); +void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state); + void hubp3_init(struct hubp *hubp); void hubp3_clear_tiling(struct hubp *hubp); diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c index 47101847c2b7..189045f85039 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c @@ -110,9 +110,7 @@ static struct hubp_funcs dcn31_hubp_funcs = { .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state, }; bool hubp31_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c index a5f23bb2a76a..a781085b046b 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c @@ -118,29 +118,7 @@ void hubp32_cursor_set_attributes( uint32_t cursor_width = ((attr->width + 63) / 64) * 64; uint32_t cursor_height = attr->height; uint32_t cursor_size = cursor_width * cursor_height; - - hubp->curs_attr = *attr; - - REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, - CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); - REG_UPDATE(CURSOR_SURFACE_ADDRESS, - CURSOR_SURFACE_ADDRESS, attr->address.low_part); - - REG_UPDATE_2(CURSOR_SIZE, - CURSOR_WIDTH, attr->width, - CURSOR_HEIGHT, attr->height); - - REG_UPDATE_4(CURSOR_CONTROL, - CURSOR_MODE, attr->color_format, - CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, - CURSOR_PITCH, hw_pitch, - CURSOR_LINES_PER_CHUNK, lpc); - - REG_SET_2(CURSOR_SETTINGS, 0, - /* no shift of the cursor HDL schedule */ - CURSOR0_DST_Y_OFFSET, 0, - /* used to shift the cursor chunk request deadline */ - CURSOR0_CHUNK_HDL_ADJUST, 3); + bool use_mall_for_cursor; switch (attr->color_format) { case CURSOR_MODE_MONO: @@ -158,11 +136,49 @@ void hubp32_cursor_set_attributes( cursor_size *= 8; break; } + use_mall_for_cursor = cursor_size > 16384 ? 1 : 0; - if (cursor_size > 16384) - REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true); - else - REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false); + hubp->curs_attr = *attr; + + if (!hubp->cursor_offload) { + REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH, + CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part); + REG_UPDATE(CURSOR_SURFACE_ADDRESS, + CURSOR_SURFACE_ADDRESS, attr->address.low_part); + + REG_UPDATE_2(CURSOR_SIZE, + CURSOR_WIDTH, attr->width, + CURSOR_HEIGHT, attr->height); + + REG_UPDATE_4(CURSOR_CONTROL, + CURSOR_MODE, attr->color_format, + CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION, + CURSOR_PITCH, hw_pitch, + CURSOR_LINES_PER_CHUNK, lpc); + + REG_SET_2(CURSOR_SETTINGS, 0, + /* no shift of the cursor HDL schedule */ + CURSOR0_DST_Y_OFFSET, 0, + /* used to shift the cursor chunk request deadline */ + CURSOR0_CHUNK_HDL_ADJUST, 3); + + REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, use_mall_for_cursor); + } + hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part; + hubp->att.SURFACE_ADDR = attr->address.low_part; + hubp->att.size.bits.width = attr->width; + hubp->att.size.bits.height = attr->height; + hubp->att.cur_ctl.bits.mode = attr->color_format; + + hubp->cur_rect.w = attr->width; + hubp->cur_rect.h = attr->height; + + hubp->att.cur_ctl.bits.pitch = hw_pitch; + hubp->att.cur_ctl.bits.line_per_chunk = lpc; + hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION; + hubp->att.settings.bits.dst_y_offset = 0; + hubp->att.settings.bits.chunk_hdl_adjust = 3; + hubp->use_mall_for_cursor = use_mall_for_cursor; } void hubp32_init(struct hubp *hubp) { @@ -206,9 +222,7 @@ static struct hubp_funcs dcn32_hubp_funcs = { .hubp_update_mall_sel = hubp32_update_mall_sel, .hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp32_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c index b140808f21af..79c583e258c7 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c @@ -209,6 +209,7 @@ static struct hubp_funcs dcn35_hubp_funcs = { .dmdata_load = hubp2_dmdata_load, .dmdata_status_done = hubp2_dmdata_status_done, .hubp_read_state = hubp3_read_state, + .hubp_read_reg_state = hubp3_read_reg_state, .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp35_init, @@ -218,9 +219,6 @@ static struct hubp_funcs dcn35_hubp_funcs = { .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank_value, .hubp_clear_tiling = hubp3_clear_tiling, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, }; bool hubp35_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index 0fcbc6a35be6..f01eae50d02f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -783,21 +783,23 @@ void hubp401_cursor_set_position( if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr); - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, cur_en); + if (!hubp->cursor_offload) + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, cur_en); } - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, x_pos, - CURSOR_Y_POSITION, y_pos); + if (!hubp->cursor_offload) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, x_pos, + CURSOR_Y_POSITION, y_pos); - REG_SET_2(CURSOR_HOT_SPOT, 0, - CURSOR_HOT_SPOT_X, pos->x_hotspot, - CURSOR_HOT_SPOT_Y, pos->y_hotspot); - - REG_SET(CURSOR_DST_OFFSET, 0, - CURSOR_DST_X_OFFSET, dst_x_offset); + REG_SET_2(CURSOR_HOT_SPOT, 0, + CURSOR_HOT_SPOT_X, pos->x_hotspot, + CURSOR_HOT_SPOT_Y, pos->y_hotspot); + REG_SET(CURSOR_DST_OFFSET, 0, + CURSOR_DST_X_OFFSET, dst_x_offset); + } /* Cursor Position Register Config */ hubp->pos.cur_ctl.bits.cur_enable = cur_en; hubp->pos.position.bits.x_pos = pos->x; @@ -1071,9 +1073,7 @@ static struct hubp_funcs dcn401_hubp_funcs = { .hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done, .hubp_clear_tiling = hubp401_clear_tiling, .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config, - .hubp_get_underflow_status = hubp3_get_underflow_status, - .hubp_get_current_read_line = hubp3_get_current_read_line, - .hubp_get_det_config_error = hubp31_get_det_config_error, + .hubp_read_reg_state = hubp3_read_reg_state }; bool hubp401_construct( diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h index fdabbeec8ffa..4570b8016de5 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h @@ -31,7 +31,7 @@ #include "dcn30/dcn30_hubp.h" #include "dcn31/dcn31_hubp.h" #include "dcn32/dcn32_hubp.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" #define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\ SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 24184b4eb352..3005115c8505 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -659,6 +659,20 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) } } +static void +dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc_link *link = pipe_ctx->stream->link; + struct dc_bios *bios = link->ctx->dc_bios; + struct bp_encoder_control encoder_control = {0}; + + encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE; + encoder_control.engine_id = link->link_enc->analog_engine; + encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10; + + bios->funcs->encoder_control(bios, &encoder_control); +} + void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = @@ -689,6 +703,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) early_control = lane_count; tg->funcs->set_early_control(tg, early_control); + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + dce110_dac_encoder_control(pipe_ctx, true); } static enum bp_result link_transmitter_control( @@ -1176,7 +1193,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.stream_enc); - dc->hwss.disable_audio_stream(pipe_ctx); + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + dc->hwss.disable_audio_stream(pipe_ctx); link_hwss->reset_stream_encoder(pipe_ctx); @@ -1196,6 +1214,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + dce110_dac_encoder_control(pipe_ctx, false); } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -1581,6 +1602,51 @@ static enum dc_status dce110_enable_stream_timing( return DC_OK; } +static void +dce110_select_crtc_source(struct pipe_ctx *pipe_ctx) +{ + struct dc_link *link = pipe_ctx->stream->link; + struct dc_bios *bios = link->ctx->dc_bios; + struct bp_crtc_source_select crtc_source_select = {0}; + enum engine_id engine_id = link->link_enc->preferred_engine; + uint8_t bit_depth; + + if (dc_is_rgb_signal(pipe_ctx->stream->signal)) + engine_id = link->link_enc->analog_engine; + + switch (pipe_ctx->stream->timing.display_color_depth) { + case COLOR_DEPTH_UNDEFINED: + bit_depth = 0; + break; + case COLOR_DEPTH_666: + bit_depth = 6; + break; + default: + case COLOR_DEPTH_888: + bit_depth = 8; + break; + case COLOR_DEPTH_101010: + bit_depth = 10; + break; + case COLOR_DEPTH_121212: + bit_depth = 12; + break; + case COLOR_DEPTH_141414: + bit_depth = 14; + break; + case COLOR_DEPTH_161616: + bit_depth = 16; + break; + } + + crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst; + crtc_source_select.bit_depth = bit_depth; + crtc_source_select.engine_id = engine_id; + crtc_source_select.sink_signal = pipe_ctx->stream->signal; + + bios->funcs->select_crtc_source(bios, &crtc_source_select); +} + enum dc_status dce110_apply_single_controller_ctx_to_hw( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -1600,6 +1666,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( hws->funcs.disable_stream_gating(dc, pipe_ctx); } + if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) { + dce110_select_crtc_source(pipe_ctx); + } + if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output = {0}; @@ -1679,7 +1749,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.tg->funcs->set_static_screen_control( pipe_ctx->stream_res.tg, event_triggers, 2); - if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) + if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && + !dc_is_rgb_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); @@ -1913,6 +1984,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) bool can_apply_edp_fast_boot = false; bool can_apply_seamless_boot = false; bool keep_edp_vdd_on = false; + bool should_clean_dsc_block = true; struct dc_bios *dcb = dc->ctx->dc_bios; DC_LOGGER_INIT(); @@ -2005,9 +2077,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) power_down_all_hw_blocks(dc); /* DSC could be enabled on eDP during VBIOS post. - * To clean up dsc blocks if eDP is in link but not active. + * To clean up dsc blocks if all eDP dpms_off is true. */ - if (edp_link_with_sink && (edp_stream_num == 0)) + for (i = 0; i < edp_stream_num; i++) { + if (!edp_streams[i]->dpms_off) { + should_clean_dsc_block = false; + } + } + + if (should_clean_dsc_block) clean_up_dsc_blocks(dc); disable_vga_and_power_gate_all_controllers(dc); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index e9fe97f0c4ea..fa62e40a9858 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -2245,7 +2245,7 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) if (lock) delay_cursor_until_vupdate(dc, pipe); - if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { + if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -3090,6 +3090,9 @@ static void dcn10_update_dchubp_dpp( } if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + if (dc->hwss.abort_cursor_offload_update) + dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); dc->hwss.set_cursor_position(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 9477c9f9e196..6bd905905984 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1449,7 +1449,7 @@ void dcn20_pipe_control_lock( !flip_immediate) dcn20_setup_gsl_group_as_lock(dc, pipe, false); - if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) { + if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -1793,6 +1793,9 @@ void dcn20_update_dchubp_dpp( if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + if (dc->hwss.abort_cursor_offload_update) + dc->hwss.abort_cursor_offload_update(dc, pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); dc->hwss.set_cursor_position(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index e47ed5571dfd..81bcadf5e57e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -53,7 +53,8 @@ #include "link_service.h" #include "dc_state_priv.h" - +#define TO_DCN_DCCG(dccg)\ + container_of(dccg, struct dcn_dccg, base) #define DC_LOGGER_INIT(logger) @@ -1235,44 +1236,47 @@ void dcn30_get_underflow_debug_data(const struct dc *dc, { struct hubbub *hubbub = dc->res_pool->hubbub; - if (tg) { - uint32_t v_blank_start = 0, v_blank_end = 0; - - out_data->otg_inst = tg->inst; - - tg->funcs->get_scanoutpos(tg, - &v_blank_start, - &v_blank_end, - &out_data->h_position, - &out_data->v_position); - - out_data->otg_frame_count = tg->funcs->get_frame_count(tg); - - out_data->otg_underflow = tg->funcs->is_optc_underflow_occurred(tg); + if (hubbub) { + if (hubbub->funcs->hubbub_read_reg_state) { + hubbub->funcs->hubbub_read_reg_state(hubbub, out_data->hubbub_reg_state); + } } for (int i = 0; i < MAX_PIPES; i++) { struct hubp *hubp = dc->res_pool->hubps[i]; + struct dpp *dpp = dc->res_pool->dpps[i]; + struct output_pixel_processor *opp = dc->res_pool->opps[i]; + struct display_stream_compressor *dsc = dc->res_pool->dscs[i]; + struct mpc *mpc = dc->res_pool->mpc; + struct timing_generator *optc = dc->res_pool->timing_generators[i]; + struct dccg *dccg = dc->res_pool->dccg; - if (hubp) { - if (hubp->funcs->hubp_get_underflow_status) - out_data->hubps[i].hubp_underflow = hubp->funcs->hubp_get_underflow_status(hubp); + if (hubp) + if (hubp->funcs->hubp_read_reg_state) + hubp->funcs->hubp_read_reg_state(hubp, out_data->hubp_reg_state[i]); - if (hubp->funcs->hubp_in_blank) - out_data->hubps[i].hubp_in_blank = hubp->funcs->hubp_in_blank(hubp); + if (dpp) + if (dpp->funcs->dpp_read_reg_state) + dpp->funcs->dpp_read_reg_state(dpp, out_data->dpp_reg_state[i]); - if (hubp->funcs->hubp_get_current_read_line) - out_data->hubps[i].hubp_readline = hubp->funcs->hubp_get_current_read_line(hubp); + if (opp) + if (opp->funcs->opp_read_reg_state) + opp->funcs->opp_read_reg_state(opp, out_data->opp_reg_state[i]); - if (hubp->funcs->hubp_get_det_config_error) - out_data->hubps[i].det_config_error = hubp->funcs->hubp_get_det_config_error(hubp); - } + if (dsc) + if (dsc->funcs->dsc_read_reg_state) + dsc->funcs->dsc_read_reg_state(dsc, out_data->dsc_reg_state[i]); + + if (mpc) + if (mpc->funcs->mpc_read_reg_state) + mpc->funcs->mpc_read_reg_state(mpc, i, out_data->mpc_reg_state[i]); + + if (optc) + if (optc->funcs->optc_read_reg_state) + optc->funcs->optc_read_reg_state(optc, out_data->optc_reg_state[i]); + + if (dccg) + if (dccg->funcs->dccg_read_reg_state) + dccg->funcs->dccg_read_reg_state(dccg, out_data->dccg_reg_state[i]); } - - if (hubbub->funcs->get_det_sizes) - hubbub->funcs->get_det_sizes(hubbub, out_data->curr_det_sizes, out_data->target_det_sizes); - - if (hubbub->funcs->compbuf_config_error) - out_data->compbuf_config_error = hubbub->funcs->compbuf_config_error(hubbub); - } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index b822f2dffff0..d1ecdb92b072 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -710,7 +710,8 @@ bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx, panel_cntl->inst, panel_cntl->pwrseq_inst); - dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); + if (backlight_level_params->control_type != BACKLIGHT_CONTROL_AMD_AUX) + dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index f925f669f2a4..4ee6ed610de0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -108,6 +108,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index f39292952702..bf19ba65d09a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1061,6 +1061,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (should_use_dto_dscclk) dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 05011061822c..7aa0f452e8f7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -364,6 +364,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); @@ -816,8 +817,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context) { struct dpp *dpp = pipe_ctx->plane_res.dpp; - struct dccg *dccg = dc->res_pool->dccg; - /* enable DCFCLK current DCHUB */ pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true); @@ -825,7 +824,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, /* initialize HUBP on power up */ pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp); /*make sure DPPCLK is on*/ - dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true); dpp->funcs->dpp_dppclk_control(dpp, false, true); /* make sure OPP_PIPE_CLOCK_EN = 1 */ pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control( @@ -859,7 +857,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; - struct dccg *dccg = dc->res_pool->dccg; dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx); @@ -878,7 +875,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) hubp->funcs->hubp_clk_cntl(hubp, false); dpp->funcs->dpp_dppclk_control(dpp, false, false); - dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false); hubp->power_gated = true; @@ -1592,3 +1588,141 @@ void dcn35_hardware_release(struct dc *dc) if (dc->hwss.hw_block_power_up) dc->hwss.hw_block_power_up(dc, &pg_update_state); } + +void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + if (!dc_dmub_srv_is_cursor_offload_enabled(dc)) + return; + + /* + * Insert a blank update to modify the write index and set pipe_mask to 0. + * + * While the DMU is interlocked with driver full pipe programming via + * the DMU HW lock, if the cursor update begins to execute after a full + * pipe programming occurs there are two possible issues: + * + * 1. Outdated cursor information is programmed, replacing the current update + * 2. The cursor update in firmware holds the cursor lock, preventing + * the current update from being latched atomically in the same frame + * as the rest of the update. + * + * This blank update, treated as a no-op, will allow the firmware to skip + * the programming. + */ + + if (dc->hwss.begin_cursor_offload_update) + dc->hwss.begin_cursor_offload_update(dc, pipe); + + if (dc->hwss.commit_cursor_offload_update) + dc->hwss.commit_cursor_offload_update(dc, pipe); +} + +void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx; + + if (pipe->plane_res.hubp) + pipe->plane_res.hubp->cursor_offload = true; + + if (pipe->plane_res.dpp) + pipe->plane_res.dpp->cursor_offload = true; +} + +void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + uint32_t stream_idx, write_idx, payload_idx; + + if (pipe->plane_res.hubp) + pipe->plane_res.hubp->cursor_offload = false; + + if (pipe->plane_res.dpp) + pipe->plane_res.dpp->cursor_offload = false; + + if (!top_pipe) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1] + .data.cursor_offload_v1.offload_streams[stream_idx]; + + shared_stream->last_write_idx = write_idx; + + cs->offload_streams[stream_idx].write_idx = write_idx; + cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx; +} + +void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + const struct hubp *hubp = pipe->plane_res.hubp; + const struct dpp *dpp = pipe->plane_res.dpp; + volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p; + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe || !hubp || !dpp) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30; + + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR; + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot; + p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk; + + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode; + p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en; + p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000; + p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF; + p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias; + p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale; + + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset; + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust; + + cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx); +} + +void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream) +{ + dc_dmub_srv_control_cursor_offload(dc, context, stream, true); +} + +void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe) +{ + dc_dmub_srv_program_cursor_now(dc, pipe); +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 0b1d6f608edd..1ff41dba556c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -101,4 +101,12 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx); void dcn35_hardware_release(struct dc *dc); +void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe); +void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream); +void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index f2f16a0bdb4f..5a66c9db2670 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -86,6 +86,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .abort_cursor_offload_update = dcn35_abort_cursor_offload_update, + .begin_cursor_offload_update = dcn35_begin_cursor_offload_update, + .commit_cursor_offload_update = dcn35_commit_cursor_offload_update, + .update_cursor_offload_pipe = dcn35_update_cursor_offload_pipe, + .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update, + .program_cursor_offload_now = dcn35_program_cursor_offload_now, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 7c276c319086..f02edc9371b0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -26,9 +26,11 @@ #include "clk_mgr.h" #include "dsc.h" #include "link_service.h" +#include "custom_float.h" #include "dce/dmub_hw_lock_mgr.h" #include "dcn10/dcn10_cm_common.h" +#include "dcn10/dcn10_hubbub.h" #include "dcn20/dcn20_optc.h" #include "dcn30/dcn30_cm_common.h" #include "dcn32/dcn32_hwseq.h" @@ -36,6 +38,7 @@ #include "dcn401/dcn401_resource.h" #include "dc_state_priv.h" #include "link_enc_cfg.h" +#include "../hw_sequencer.h" #define DC_LOGGER_INIT(logger) @@ -200,6 +203,9 @@ void dcn401_init_hw(struct dc *dc) */ struct dc_link *link = dc->links[i]; + if (link->ep_type != DISPLAY_ENDPOINT_PHY) + continue; + link->link_enc->funcs->hw_init(link->link_enc); /* Check for enabled DIG to identify enabled display */ @@ -1404,9 +1410,9 @@ void dcn401_prepare_bandwidth(struct dc *dc, } if (dc->debug.fams2_config.bits.enable) { - dcn401_fams2_global_control_lock(dc, context, true); + dcn401_dmub_hw_control_lock(dc, context, true); dcn401_fams2_update_config(dc, context, false); - dcn401_fams2_global_control_lock(dc, context, false); + dcn401_dmub_hw_control_lock(dc, context, false); } if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) { @@ -1425,9 +1431,9 @@ void dcn401_optimize_bandwidth( /* enable fams2 if needed */ if (dc->debug.fams2_config.bits.enable) { - dcn401_fams2_global_control_lock(dc, context, true); + dcn401_dmub_hw_control_lock(dc, context, true); dcn401_fams2_update_config(dc, context, true); - dcn401_fams2_global_control_lock(dc, context, false); + dcn401_dmub_hw_control_lock(dc, context, false); } /* program dchubbub watermarks */ @@ -1466,14 +1472,17 @@ void dcn401_optimize_bandwidth( } } -void dcn401_fams2_global_control_lock(struct dc *dc, +void dcn401_dmub_hw_control_lock(struct dc *dc, struct dc_state *context, bool lock) { /* use always for now */ union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; - if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable) + if (!dc->ctx || !dc->ctx->dmub_srv) + return; + + if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc)) return; hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; @@ -1483,12 +1492,12 @@ void dcn401_fams2_global_control_lock(struct dc *dc, dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); } -void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params) +void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params) { - struct dc *dc = params->fams2_global_control_lock_fast_params.dc; - bool lock = params->fams2_global_control_lock_fast_params.lock; + struct dc *dc = params->dmub_hw_control_lock_fast_params.dc; + bool lock = params->dmub_hw_control_lock_fast_params.lock; - if (params->fams2_global_control_lock_fast_params.is_required) { + if (params->dmub_hw_control_lock_fast_params.is_required) { union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; @@ -1595,6 +1604,143 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context, dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true); } +static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state) +{ + struct pipe_ctx *old_pipe; + struct pipe_ctx *new_pipe; + struct pipe_ctx *old_opp_heads[MAX_PIPES]; + struct pipe_ctx *old_otg_master; + int old_opp_head_count = 0; + int i; + + old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx]; + + if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) { + old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master, + &dc->current_state->res_ctx, + old_opp_heads); + } else { + old_otg_master = NULL; + } + + /* Process new DSC configuration if DSC is enabled */ + if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) { + struct dc_stream_state *stream = otg_master->stream; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + int last_dsc_calc = 0; + bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) && + stream->timing.pix_clk_100hz > 480000; + + /* Count ODM pipes */ + for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt; + + /* Step 1: Set DTO DSCCLK for main DSC if needed */ + if (should_use_dto_dscclk) { + hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg, + otg_master->stream_res.dsc->inst, num_slices_h); + } + + /* Step 2: Calculate and set DSC config for main DSC */ + last_dsc_calc = *seq_state->num_steps; + hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt); + + /* Step 3: Enable main DSC block */ + hwss_add_dsc_enable_with_opp(seq_state, otg_master); + + /* Step 4: Configure and enable ODM DSC blocks */ + for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + if (!odm_pipe->stream_res.dsc) + continue; + + /* Set DTO DSCCLK for ODM DSC if needed */ + if (should_use_dto_dscclk) { + hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg, + odm_pipe->stream_res.dsc->inst, num_slices_h); + } + + /* Calculate and set DSC config for ODM DSC */ + last_dsc_calc = *seq_state->num_steps; + hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt); + + /* Enable ODM DSC block */ + hwss_add_dsc_enable_with_opp(seq_state, odm_pipe); + } + + /* Step 5: Configure DSC in timing generator */ + hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, + &seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true); + } else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) { + /* Disable DSC in OPTC */ + hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false); + + hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc); + } + + /* Disable DSC for old pipes that no longer need it */ + if (old_otg_master && old_otg_master->stream_res.dsc) { + for (i = 0; i < old_opp_head_count; i++) { + old_pipe = old_opp_heads[i]; + new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx]; + + /* If old pipe had DSC but new pipe doesn't, disable the old DSC */ + if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) { + /* Then disconnect DSC block */ + hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc); + } + } + } +} + +void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state) +{ + struct pipe_ctx *opp_heads[MAX_PIPES]; + int opp_inst[MAX_PIPES] = {0}; + int opp_head_count; + int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false); + int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true); + int i; + + opp_head_count = resource_get_opp_heads_for_otg_master( + otg_master, &context->res_ctx, opp_heads); + + for (i = 0; i < opp_head_count; i++) + opp_inst[i] = opp_heads[i]->stream_res.opp->inst; + + /* Add ODM combine/bypass operation to sequence */ + if (opp_head_count > 1) { + hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst, + opp_head_count, odm_slice_width, last_odm_slice_width); + } else { + hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing); + } + + /* Add OPP operations to sequence */ + for (i = 0; i < opp_head_count; i++) { + /* Add OPP pipe clock control operation */ + hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true); + + /* Add OPP program left edge extra pixel operation */ + hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp, + opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER)); + } + + /* Add DSC update operations to sequence */ + dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state); + + /* Add blank pixel data operation if needed */ + if (!resource_is_pipe_type(otg_master, DPP_PIPE)) { + if (dc->hwseq->funcs.blank_pixel_data_sequence) + dc->hwseq->funcs.blank_pixel_data_sequence( + dc, otg_master, true, seq_state); + } +} + void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings) { @@ -2083,6 +2229,157 @@ void dcn401_program_pipe( } } +/* + * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe + * + * This function creates a sequence-based version of the original dcn401_program_pipe + * function. Instead of directly calling hardware programming functions, it appends + * sequence steps to the provided block_sequence array that can later be executed + * as part of hwss_execute_sequence. + * + */ +void dcn401_program_pipe_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + + /* Only need to unblank on top pipe */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) { + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.odm || + pipe_ctx->stream->update_flags.bits.abm_level) { + if (dc->hwseq->funcs.blank_pixel_data_sequence) + dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx, + !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible, + seq_state); + } + } + + /* Only update TG on top pipe */ + if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe + && !pipe_ctx->prev_odm_pipe) { + + /* Step 1: Program global sync */ + hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg, + dcn401_calculate_vready_offset_for_group(pipe_ctx), + (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines, + (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels, + (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels, + (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines); + + /* Step 2: Wait for VACTIVE state (if not phantom pipe) */ + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) + hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + + /* Step 3: Set VTG params */ + hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); + + /* Step 4: Setup vupdate interrupt (if available) */ + if (hws->funcs.setup_vupdate_interrupt) + dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state); + } + + if (pipe_ctx->update_flags.bits.odm) { + if (hws->funcs.update_odm_sequence) + hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state); + } + + if (pipe_ctx->update_flags.bits.enable) { + if (dc->hwss.enable_plane_sequence) + dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state); + } + + if (pipe_ctx->update_flags.bits.det_size) { + if (dc->res_pool->hubbub->funcs->program_det_size) { + hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub, + pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + } + + if (dc->res_pool->hubbub->funcs->program_det_segments) { + hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub, + pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); + } + } + + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw || + pipe_ctx->plane_state->update_flags.raw || + pipe_ctx->stream->update_flags.raw)) { + + if (dc->hwss.update_dchubp_dpp_sequence) + dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state); + } + + if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable || + pipe_ctx->plane_state->update_flags.bits.hdr_mult)) { + + hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state); + } + + if (pipe_ctx->plane_state && + (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change || + pipe_ctx->plane_state->update_flags.bits.lut_3d || + pipe_ctx->update_flags.bits.enable)) { + + hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state); + } + + /* dcn10_translate_regamma_to_hw_format takes 750us to finish + * only do gamma programming for powering on, internal memcmp to avoid + * updating on slave planes + */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->stream->update_flags.bits.out_tf) { + hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream); + } + + /* If the pipe has been enabled or has a different opp, we + * should reprogram the fmt. This deals with cases where + * interation between mpc and odm combine on different streams + * causes a different pipe to be chosen to odm combine with. + */ + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->update_flags.bits.opp_changed) { + + hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601, + pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal); + + hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp, + &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping); + } + + /* Set ABM pipe after other pipe configurations done */ + if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) { + if (pipe_ctx->stream_res.abm) { + hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx); + + hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level); + } + } + + if (pipe_ctx->update_flags.bits.test_pattern_changed) { + struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp; + + hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_opp, + pipe_ctx->stream_res.test_pattern_params.test_pattern, + pipe_ctx->stream_res.test_pattern_params.color_space, + pipe_ctx->stream_res.test_pattern_params.color_depth, + (struct tg_color){0}, + false, + pipe_ctx->stream_res.test_pattern_params.width, + pipe_ctx->stream_res.test_pattern_params.height, + pipe_ctx->stream_res.test_pattern_params.offset); + } + +} + void dcn401_program_front_end_for_ctx( struct dc *dc, struct dc_state *context) @@ -2160,7 +2457,6 @@ void dcn401_program_front_end_for_ctx( && context->res_ctx.pipe_ctx[i].stream) hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); - /* Disconnect mpcc */ for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable @@ -2239,11 +2535,11 @@ void dcn401_program_front_end_for_ctx( /* Avoid underflow by check of pipe line read when adding 2nd plane. */ if (hws->wa.wait_hubpret_read_start_during_mpo_transition && - !pipe->top_pipe && - pipe->stream && - pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && - dc->current_state->stream_status[0].plane_count == 1 && - context->stream_status[0].plane_count > 1) { + !pipe->top_pipe && + pipe->stream && + pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start && + dc->current_state->stream_status[0].plane_count == 1 && + context->stream_status[0].plane_count > 1) { pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); } } @@ -2355,7 +2651,6 @@ void dcn401_post_unlock_program_front_end( */ if (hwseq->funcs.update_force_pstate) dc->hwseq->funcs.update_force_pstate(dc, context); - /* Only program the MALL registers after all the main and phantom pipes * are done programming. */ @@ -2669,3 +2964,1082 @@ void dcn401_plane_atomic_power_down(struct dc *dc, if (hws->funcs.dpp_root_clock_control) hws->funcs.dpp_root_clock_control(hws, dpp->inst, false); } + +void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe) +{ + volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1; + const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe); + const struct hubp *hubp = pipe->plane_res.hubp; + const struct dpp *dpp = pipe->plane_res.dpp; + volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p; + uint32_t stream_idx, write_idx, payload_idx; + + if (!top_pipe || !hubp || !dpp) + return; + + stream_idx = top_pipe->pipe_idx; + write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */ + payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads); + + p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401; + + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR; + p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width; + p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos; + p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot; + p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot; + p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch; + p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk; + + p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode; + p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en; + p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000; + p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF; + + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y = + dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y = + dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB = + dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb; + p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB = + dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb; + + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset; + p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust; + p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor; + + cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx); +} + +void dcn401_plane_atomic_power_down_sequence(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t org_ip_request_cntl = 0; + + DC_LOGGER_INIT(dc->ctx->logger); + + /* Check and set DC_IP_REQUEST_CNTL if needed */ + if (REG(DC_IP_REQUEST_CNTL)) { + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + hwss_add_dc_ip_request_cntl(seq_state, dc, true); + } + + /* DPP power gating control */ + hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false); + + /* HUBP power gating control */ + hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false); + + /* HUBP reset */ + hwss_add_hubp_reset(seq_state, hubp); + + /* DPP reset */ + hwss_add_dpp_reset(seq_state, dpp); + + /* Restore DC_IP_REQUEST_CNTL if it was originally 0 */ + if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL)) + hwss_add_dc_ip_request_cntl(seq_state, dc, false); + + DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst); + + /* DPP root clock control */ + hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false); +} + +/* trigger HW to start disconnect plane from stream on the next vsync using block sequence */ +void dcn401_plane_atomic_disconnect_sequence(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + int dpp_id = pipe_ctx->plane_res.dpp->inst; + struct mpc *mpc = dc->res_pool->mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc *mpcc_to_remove = NULL; + struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; + + mpc_tree_params = &(opp->mpc_tree_params); + mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id); + + /*Already reset*/ + if (mpcc_to_remove == NULL) + return; + + /* Step 1: Remove MPCC from MPC tree */ + hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove); + + // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, + // so don't wait for MPCC_IDLE in the programming sequence + if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) { + /* Step 2: Set MPCC disconnect pending flag */ + hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true); + } + + /* Step 3: Set optimized required flag */ + hwss_add_dc_set_optimized_required(seq_state, dc, true); + + /* Step 4: Disconnect HUBP if function exists */ + if (hubp->funcs->hubp_disconnect) + hwss_add_hubp_disconnect(seq_state, hubp); + + /* Step 5: Verify pstate change high if debug sanity checks are enabled */ + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); +} + +void dcn401_blank_pixel_data_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state) +{ + struct tg_color black_color = {0}; + struct stream_resource *stream_res = &pipe_ctx->stream_res; + struct dc_stream_state *stream = pipe_ctx->stream; + enum dc_color_space color_space = stream->output_color_space; + enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; + enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; + struct pipe_ctx *odm_pipe; + struct rect odm_slice_src; + + if (stream->link->test_pattern_enabled) + return; + + /* get opp dpg blank color */ + color_space_to_black_color(dc, color_space, &black_color); + + if (blank) { + /* Set ABM immediate disable */ + hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx); + + if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) { + test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; + test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + } + } else { + test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; + } + + odm_pipe = pipe_ctx; + + /* Set display pattern generator for all ODM pipes */ + while (odm_pipe->next_odm_pipe) { + odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_pipe->stream_res.opp, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + black_color, + true, + odm_slice_src.width, + odm_slice_src.height, + odm_slice_src.x); + + odm_pipe = odm_pipe->next_odm_pipe; + } + + /* Set display pattern generator for final ODM pipe */ + odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe); + + hwss_add_opp_set_disp_pattern_generator(seq_state, + odm_pipe->stream_res.opp, + test_pattern, + test_pattern_color_space, + stream->timing.display_color_depth, + black_color, + true, + odm_slice_src.width, + odm_slice_src.height, + odm_slice_src.x); + + /* Handle ABM level setting when not blanking */ + if (!blank) { + if (stream_res->abm) { + /* Set pipe for ABM */ + hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx); + + /* Set ABM level */ + hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level); + } + } +} + +void dcn401_program_all_writeback_pipes_in_tree_sequence( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + int i_wb, i_pipe; + + if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb) + return; + + /* For each writeback pipe */ + for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) { + /* Get direct pointer to writeback info */ + struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb]; + int mpcc_inst = -1; + + if (wb_info->wb_enabled) { + /* Get the MPCC instance for writeback_source_plane */ + for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe]; + + if (!pipe_ctx->plane_state) + continue; + + if (pipe_ctx->plane_state == wb_info->writeback_source_plane) { + mpcc_inst = pipe_ctx->plane_res.mpcc_inst; + break; + } + } + + if (mpcc_inst == -1) { + /* Disable writeback pipe and disconnect from MPCC + * if source plane has been removed + */ + dcn401_disable_writeback_sequence(dc, wb_info, seq_state); + continue; + } + + ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb); + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + + if (dwb->funcs->is_enabled(dwb)) { + /* Writeback pipe already enabled, only need to update */ + dcn401_update_writeback_sequence(dc, wb_info, context, seq_state); + } else { + /* Enable writeback pipe and connect to MPCC */ + dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state); + } + } else { + /* Disable writeback pipe and disconnect from MPCC */ + dcn401_disable_writeback_sequence(dc, wb_info, seq_state); + } + } +} + +void dcn401_enable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + int mpcc_inst, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Update DWBC with new parameters */ + hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params); + + /* Configure MCIF_WB buffer settings */ + hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); + + /* Configure MCIF_WB arbitration */ + hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); + + /* Enable MCIF_WB */ + hwss_add_mcif_wb_enable(seq_state, mcif_wb); + + /* Set DWB MUX to connect writeback to MPCC */ + hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst); + + /* Enable DWBC */ + hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params); +} + +void dcn401_disable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Disable DWBC */ + hwss_add_dwbc_disable(seq_state, dwb); + + /* Disable DWB MUX */ + hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst); + + /* Disable MCIF_WB */ + hwss_add_mcif_wb_disable(seq_state, mcif_wb); +} + +void dcn401_update_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dwbc *dwb; + struct mcif_wb *mcif_wb; + + if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb) + return; + + dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; + mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; + + /* Update writeback pipe */ + hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params); + + /* Update MCIF_WB buffer settings if needed */ + hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); +} + +static int find_free_gsl_group(const struct dc *dc) +{ + if (dc->res_pool->gsl_groups.gsl_0 == 0) + return 1; + if (dc->res_pool->gsl_groups.gsl_1 == 0) + return 2; + if (dc->res_pool->gsl_groups.gsl_2 == 0) + return 3; + + return 0; +} + +void dcn401_setup_gsl_group_as_lock_sequence( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable, + struct block_sequence_state *seq_state) +{ + struct gsl_params gsl; + int group_idx; + + memset(&gsl, 0, sizeof(struct gsl_params)); + + if (enable) { + /* return if group already assigned since GSL was set up + * for vsync flip, we would unassign so it can't be "left over" + */ + if (pipe_ctx->stream_res.gsl_group > 0) + return; + + group_idx = find_free_gsl_group(dc); + ASSERT(group_idx != 0); + pipe_ctx->stream_res.gsl_group = group_idx; + + /* set gsl group reg field and mark resource used */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 1; + dc->res_pool->gsl_groups.gsl_0 = 1; + break; + case 2: + gsl.gsl1_en = 1; + dc->res_pool->gsl_groups.gsl_1 = 1; + break; + case 3: + gsl.gsl2_en = 1; + dc->res_pool->gsl_groups.gsl_2 = 1; + break; + default: + BREAK_TO_DEBUGGER(); + return; // invalid case + } + gsl.gsl_master_en = 1; + } else { + group_idx = pipe_ctx->stream_res.gsl_group; + if (group_idx == 0) + return; // if not in use, just return + + pipe_ctx->stream_res.gsl_group = 0; + + /* unset gsl group reg field and mark resource free */ + switch (group_idx) { + case 1: + gsl.gsl0_en = 0; + dc->res_pool->gsl_groups.gsl_0 = 0; + break; + case 2: + gsl.gsl1_en = 0; + dc->res_pool->gsl_groups.gsl_1 = 0; + break; + case 3: + gsl.gsl2_en = 0; + dc->res_pool->gsl_groups.gsl_2 = 0; + break; + default: + BREAK_TO_DEBUGGER(); + return; + } + gsl.gsl_master_en = 0; + } + + hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl); + hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); +} + +void dcn401_disable_plane_sequence( + struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM; + struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; + + if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) + return; + + /* Wait for MPCC disconnect */ + if (dc->hwss.wait_for_mpcc_disconnect_sequence) + dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state); + + /* In flip immediate with pipe splitting case GSL is used for synchronization + * so we must disable it when the plane is disabled. + */ + if (pipe_ctx->stream_res.gsl_group != 0) + dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state); + + /* Update HUBP mall sel */ + if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel) + hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false); + + /* Set flip control GSL */ + hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false); + + /* HUBP clock control */ + hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false); + + /* DPP clock control */ + hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false); + + /* Plane atomic power down */ + if (dc->hwseq->funcs.plane_atomic_power_down_sequence) + dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp, + pipe_ctx->plane_res.hubp, seq_state); + + pipe_ctx->stream = NULL; + memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res)); + memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res)); + pipe_ctx->top_pipe = NULL; + pipe_ctx->bottom_pipe = NULL; + pipe_ctx->prev_odm_pipe = NULL; + pipe_ctx->next_odm_pipe = NULL; + pipe_ctx->plane_state = NULL; + + /* Turn back off the phantom OTG after the phantom plane is fully disabled */ + if (is_phantom && tg && tg->funcs->disable_phantom_crtc) + hwss_add_disable_phantom_crtc(seq_state, tg); +} + +void dcn401_post_unlock_reset_opp_sequence( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state) +{ + struct display_stream_compressor *dsc = opp_head->stream_res.dsc; + struct dccg *dccg = dc->res_pool->dccg; + + /* Wait for all DPP pipes in current mpc blending tree completes double + * buffered disconnection before resetting OPP + */ + if (dc->hwss.wait_for_mpcc_disconnect_sequence) + dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state); + + if (dsc) { + bool *is_ungated = NULL; + /* Check DSC power gate status */ + if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status) + hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false); + + /* Seamless update specific where we will postpone non + * double buffered DSCCLK disable logic in post unlock + * sequence after DSC is disconnected from OPP but not + * yet power gated. + */ + + /* DSC wait disconnect pending clear */ + hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated); + + /* DSC disable */ + hwss_add_dsc_disable(seq_state, dsc, is_ungated); + + /* Set reference DSCCLK */ + if (dccg && dccg->funcs->set_ref_dscclk) + hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0); + } +} + +void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable) +{ + struct dce_hwseq *hws = dc->hwseq; + + if (REG(DC_IP_REQUEST_CNTL)) + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0); +} + +void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t org_ip_request_cntl = 0; + + if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp) + return; + + if (REG(DC_IP_REQUEST_CNTL)) + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + + /* Step 1: DPP root clock control - enable clock */ + if (hws->funcs.dpp_root_clock_control) + hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true); + + /* Step 2: Enable DC IP request (if needed) */ + if (hws->funcs.dc_ip_request_cntl) + hwss_add_dc_ip_request_cntl(seq_state, dc, true); + + /* Step 3: DPP power gating control - power on */ + if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control) + hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true); + + /* Step 4: HUBP power gating control - power on */ + if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control) + hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true); + + /* Step 5: Disable DC IP request (restore state) */ + if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl) + hwss_add_dc_ip_request_cntl(seq_state, dc, false); + + /* Step 6: HUBP clock control - enable DCFCLK */ + if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl) + hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true); + + /* Step 7: HUBP initialization */ + if (pipe_ctx->plane_res.hubp->funcs->hubp_init) + hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp); + + /* Step 8: OPP pipe clock control - enable */ + if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control) + hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true); + + /* Step 9: VM system aperture settings */ + if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) { + hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0, + dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr); + } + + /* Step 10: Flip interrupt setup */ + if (!pipe_ctx->top_pipe + && pipe_ctx->plane_state + && pipe_ctx->plane_state->flip_int_enabled + && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) { + hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp); + } +} + +void dcn401_update_dchubp_dpp_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state) +{ + struct dce_hwseq *hws = dc->hwseq; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; + struct dccg *dccg = dc->res_pool->dccg; + bool viewport_changed = false; + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx); + + if (!hubp || !dpp || !plane_state) + return; + + /* Step 1: DPP DPPCLK control */ + if (pipe_ctx->update_flags.bits.dppclk) + hwss_add_dpp_dppclk_control(seq_state, dpp, false, true); + + /* Step 2: DCCG update DPP DTO */ + if (pipe_ctx->update_flags.bits.enable) + hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz); + + /* Step 3: HUBP VTG selection */ + if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { + hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst); + + /* Step 4: HUBP setup (choose setup2 or setup) */ + if (hubp->funcs->hubp_setup2) { + hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs, + &pipe_ctx->global_sync, &pipe_ctx->stream->timing); + } else if (hubp->funcs->hubp_setup) { + hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param); + } + } + + /* Step 5: Set unbounded requesting */ + if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting) + hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req); + + /* Step 6: HUBP interdependent setup */ + if (pipe_ctx->update_flags.bits.hubp_interdependent) { + if (hubp->funcs->hubp_setup_interdependent2) + hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs); + else if (hubp->funcs->hubp_setup_interdependent) + hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs); + } + + /* Step 7: DPP setup - input CSC and format setup */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.input_csc_change || + plane_state->update_flags.bits.color_space_change || + plane_state->update_flags.bits.coeff_reduction_change) { + hwss_add_dpp_setup_dpp(seq_state, pipe_ctx); + + /* Step 8: DPP cursor matrix setup */ + if (dpp->funcs->set_cursor_matrix) { + hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space, + &plane_state->cursor_csc_color_matrix); + } + + /* Step 9: DPP program bias and scale */ + if (dpp->funcs->dpp_program_bias_and_scale) + hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx); + } + + /* Step 10: MPCC updates */ + if (pipe_ctx->update_flags.bits.mpcc || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.global_alpha_change || + plane_state->update_flags.bits.per_pixel_alpha_change) { + + /* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */ + if (hws->funcs.update_mpcc_sequence) + hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state); + } + + /* Step 11: DPP scaler setup */ + if (pipe_ctx->update_flags.bits.scaler || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.per_pixel_alpha_change || + pipe_ctx->stream->update_flags.bits.scaling) { + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; + ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP); + hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); + } + + /* Step 12: HUBP viewport programming */ + if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.position_change) || + (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { + hwss_add_hubp_mem_program_viewport(seq_state, hubp, + &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c); + viewport_changed = true; + } + + /* Step 13: HUBP program mcache if available */ + if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate) + hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs); + + /* Step 14: Cursor attribute setup */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || + pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && + pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + + hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx); + + /* Step 15: Cursor position setup */ + hwss_add_set_cursor_position(seq_state, dc, pipe_ctx); + + /* Step 16: Cursor SDR white level */ + if (dc->hwss.set_cursor_sdr_white_level) + hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx); + } + + /* Step 17: Gamut remap and output CSC */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->stream->update_flags.bits.gamut_remap || + plane_state->update_flags.bits.gamut_remap_change || + pipe_ctx->stream->update_flags.bits.out_csc) { + + /* Gamut remap */ + hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx); + + /* Output CSC */ + hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space, + pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id); + } + + /* Step 18: HUBP surface configuration */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + pipe_ctx->update_flags.bits.opp_changed || + plane_state->update_flags.bits.pixel_format_change || + plane_state->update_flags.bits.horizontal_mirror_change || + plane_state->update_flags.bits.rotation_change || + plane_state->update_flags.bits.swizzle_change || + plane_state->update_flags.bits.dcc_change || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.plane_size_change) { + struct plane_size size = plane_state->plane_size; + + size.surface_size = pipe_ctx->plane_res.scl_data.viewport; + hwss_add_hubp_program_surface_config(seq_state, hubp, + plane_state->format, &plane_state->tiling_info, size, + plane_state->rotation, &plane_state->dcc, + plane_state->horizontal_mirror, 0); + hubp->power_gated = false; + } + + /* Step 19: Update plane address (with SubVP support) */ + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.plane_changed || + plane_state->update_flags.bits.addr_update) { + + /* SubVP save surface address if needed */ + if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) { + hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv, + &pipe_ctx->plane_state->address, pipe_ctx->subvp_index); + } + + /* Update plane address */ + hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx); + } + + /* Step 20: HUBP set blank - enable plane */ + if (pipe_ctx->update_flags.bits.enable) + hwss_add_hubp_set_blank(seq_state, hubp, false); + + /* Step 21: Phantom HUBP post enable */ + if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable) + hwss_add_phantom_hubp_post_enable(seq_state, hubp); +} + +void dcn401_update_mpcc_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct mpcc_blnd_cfg blnd_cfg = {0}; + bool per_pixel_alpha; + int mpcc_id; + struct mpcc *new_mpcc; + struct mpc *mpc = dc->res_pool->mpc; + struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); + + if (!hubp || !pipe_ctx->plane_state) + return; + + per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; + + /* Initialize blend configuration */ + blnd_cfg.overlap_only = false; + blnd_cfg.global_gain = 0xff; + + if (per_pixel_alpha) { + blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha; + if (pipe_ctx->plane_state->global_alpha) { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN; + blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value; + } else { + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA; + } + } else { + blnd_cfg.pre_multiplied_alpha = false; + blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA; + } + + if (pipe_ctx->plane_state->global_alpha) + blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value; + else + blnd_cfg.global_alpha = 0xff; + + blnd_cfg.background_color_bpc = 4; + blnd_cfg.bottom_gain_mode = 0; + blnd_cfg.top_gain = 0x1f000; + blnd_cfg.bottom_inside_gain = 0x1f000; + blnd_cfg.bottom_outside_gain = 0x1f000; + + if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA) + blnd_cfg.pre_multiplied_alpha = false; + + /* MPCC instance is equal to HUBP instance */ + mpcc_id = hubp->inst; + + /* Step 1: Update blending if no full update needed */ + if (!pipe_ctx->plane_state->update_flags.bits.full_update && + !pipe_ctx->update_flags.bits.mpcc) { + + /* Update blending configuration */ + hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id); + + /* Update visual confirm color */ + hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id); + return; + } + + /* Step 2: Get existing MPCC for DPP */ + new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); + + /* Step 3: Remove MPCC if being used */ + if (new_mpcc != NULL) { + hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc); + } else { + /* Step 4: Assert MPCC idle (debug only) */ + if (dc->debug.sanity_checks) + hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id); + } + + /* Step 5: Insert new plane into MPC tree */ + hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id); + + /* Step 6: Update visual confirm color */ + hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id); + + /* Step 7: Set HUBP OPP and MPCC IDs */ + hubp->opp_id = pipe_ctx->stream_res.opp->inst; + hubp->mpcc_id = mpcc_id; +} + +static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst) +{ + int i; + + for (i = 0; i < res_pool->pipe_count; i++) { + if (res_pool->hubps[i]->inst == mpcc_inst) + return res_pool->hubps[i]; + } + ASSERT(false); + return NULL; +} + +void dcn401_wait_for_mpcc_disconnect_sequence( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + int mpcc_inst; + + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); + + if (!pipe_ctx->stream_res.opp) + return; + + for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { + if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) { + struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst); + + if (pipe_ctx->stream_res.tg && + pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) { + hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst); + } + pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; + if (hubp) + hwss_add_hubp_set_blank(seq_state, hubp, true); + } + } + + if (dc->debug.sanity_checks) + dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state); +} + +void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct timing_generator *tg = pipe_ctx->stream_res.tg; + int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); + + if (start_line < 0) + start_line = 0; + + if (tg->funcs->setup_vertical_interrupt2) + hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line); +} + +void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state) +{ + struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult; + uint32_t hw_mult = 0x1f000; // 1.0 default multiplier + struct custom_float_format fmt; + + fmt.exponenta_bits = 6; + fmt.mantissa_bits = 12; + fmt.sign = true; + + if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0 + convert_to_custom_float_format(multiplier, &fmt, &hw_mult); + + hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult); +} + +void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state) +{ + int i; + unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context); + bool cache_cursor = false; + + // Don't force p-state disallow -- can't block dummy p-state + + // Update MALL_SEL register for each pipe (break down update_mall_sel call) + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct hubp *hubp = pipe->plane_res.hubp; + + if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { + int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; + + switch (hubp->curs_attr.color_format) { + case CURSOR_MODE_MONO: + cursor_size /= 2; + break; + case CURSOR_MODE_COLOR_1BIT_AND: + case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA: + case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA: + cursor_size *= 4; + break; + + case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED: + case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED: + default: + cursor_size *= 8; + break; + } + + if (cursor_size > 16384) + cache_cursor = true; + + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { + hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false); + } else { + // MALL not supported with Stereo3D + uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways && + pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED && + pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO && + !pipe->plane_state->address.tmz_surface) ? 2 : 0; + hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor); + } + } + } + + // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct hubp *hubp = pipe->plane_res.hubp; + + if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) { + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) + hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true); + } + } +} + +void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc, + struct block_sequence_state *seq_state) +{ + struct hubbub *hubbub = dc->res_pool->hubbub; + + if (!hubbub->funcs->verify_allow_pstate_change_high) + return; + + if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) { + /* Attempt hardware workaround force recovery */ + dcn401_hw_wa_force_recovery_sequence(dc, seq_state); + } +} + +bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc, + struct block_sequence_state *seq_state) +{ + struct hubp *hubp; + unsigned int i; + + if (!dc->debug.recovery_enabled) + return false; + + /* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->set_hubp_blank_en) + hwss_add_hubp_set_blank_en(seq_state, hubp, true); + } + } + + /* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */ + hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true); + + /* Step 3: Set HUBP_DISABLE=1 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->hubp_disable_control) + hwss_add_hubp_disable_control(seq_state, hubp, true); + } + } + + /* Step 4: Set HUBP_DISABLE=0 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->hubp_disable_control) + hwss_add_hubp_disable_control(seq_state, hubp, false); + } + } + + /* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */ + hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false); + + /* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL && hubp->funcs->set_hubp_blank_en) + hwss_add_hubp_set_blank_en(seq_state, hubp, false); + } + } + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index 2621b7725267..f78162ab859b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -9,6 +9,7 @@ #include "dc.h" #include "dc_stream.h" #include "hw_sequencer_private.h" +#include "hwss/hw_sequencer.h" #include "dcn401/dcn401_dccg.h" struct dc; @@ -73,15 +74,17 @@ void dcn401_optimize_bandwidth( struct dc *dc, struct dc_state *context); -void dcn401_fams2_global_control_lock(struct dc *dc, +void dcn401_dmub_hw_control_lock(struct dc *dc, struct dc_state *context, bool lock); void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable); -void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params); +void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params); void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings); void dcn401_hardware_release(struct dc *dc); void dcn401_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); +void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context, + struct pipe_ctx *otg_master, struct block_sequence_state *seq_state); void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy); void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master); void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock); @@ -97,6 +100,11 @@ void dcn401_program_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); +void dcn401_program_pipe_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx); void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context); void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context); @@ -109,5 +117,97 @@ void dcn401_detect_pipe_changes( void dcn401_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp); +void dcn401_plane_atomic_power_down_sequence(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state); +void dcn401_plane_atomic_disconnect_sequence(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); +void dcn401_blank_pixel_data_sequence( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state); void dcn401_initialize_min_clocks(struct dc *dc); +void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe); + +void dcn401_program_all_writeback_pipes_in_tree_sequence( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_enable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + int mpcc_inst, + struct block_sequence_state *seq_state); + +void dcn401_disable_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct block_sequence_state *seq_state); + +void dcn401_update_writeback_sequence( + struct dc *dc, + struct dc_writeback_info *wb_info, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_setup_gsl_group_as_lock_sequence( + const struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool enable, + struct block_sequence_state *seq_state); + +void dcn401_disable_plane_sequence( + struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_post_unlock_reset_opp_sequence( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state); + +void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable); + +void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_update_dchubp_dpp_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_update_mpcc_sequence(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_wait_for_mpcc_disconnect_sequence( + struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); + +void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state); + +void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc, + struct block_sequence_state *seq_state); + +bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc, + struct block_sequence_state *seq_state); + #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c index d6e11b7e4fce..162096ce0bdf 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c @@ -9,6 +9,7 @@ #include "dcn30/dcn30_hwseq.h" #include "dcn31/dcn31_hwseq.h" #include "dcn32/dcn32_hwseq.h" +#include "dcn35/dcn35_hwseq.h" #include "dcn401/dcn401_hwseq.h" #include "dcn401_init.h" @@ -38,6 +39,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .enable_audio_stream = dce110_enable_audio_stream, .disable_audio_stream = dce110_disable_audio_stream, .disable_plane = dcn20_disable_plane, + .disable_plane_sequence = dcn401_disable_plane_sequence, .pipe_control_lock = dcn20_pipe_control_lock, .interdependent_update_lock = dcn401_interdependent_update_lock, .cursor_lock = dcn10_cursor_lock, @@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, + .wait_for_mpcc_disconnect_sequence = dcn401_wait_for_mpcc_disconnect_sequence, .edp_backlight_control = dce110_edp_backlight_control, .edp_power_control = dce110_edp_power_control, .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready, @@ -60,6 +63,12 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .set_cursor_position = dcn401_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .abort_cursor_offload_update = dcn35_abort_cursor_offload_update, + .begin_cursor_offload_update = dcn35_begin_cursor_offload_update, + .commit_cursor_offload_update = dcn35_commit_cursor_offload_update, + .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe, + .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update, + .program_cursor_offload_now = dcn35_program_cursor_offload_now, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .set_clock = dcn10_set_clock, .get_clock = dcn10_get_clock, @@ -95,55 +104,70 @@ static const struct hw_sequencer_funcs dcn401_funcs = { .apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom, .wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation, .is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless, - .fams2_global_control_lock = dcn401_fams2_global_control_lock, + .dmub_hw_control_lock = dcn401_dmub_hw_control_lock, .fams2_update_config = dcn401_fams2_update_config, - .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast, + .dmub_hw_control_lock_fast = dcn401_dmub_hw_control_lock_fast, .program_outstanding_updates = dcn401_program_outstanding_updates, .wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates, .detect_pipe_changes = dcn401_detect_pipe_changes, .enable_plane = dcn20_enable_plane, + .enable_plane_sequence = dcn401_enable_plane_sequence, .update_dchubp_dpp = dcn20_update_dchubp_dpp, + .update_dchubp_dpp_sequence = dcn401_update_dchubp_dpp_sequence, .post_unlock_reset_opp = dcn20_post_unlock_reset_opp, + .post_unlock_reset_opp_sequence = dcn401_post_unlock_reset_opp_sequence, .get_underflow_debug_data = dcn30_get_underflow_debug_data, }; static const struct hwseq_private_funcs dcn401_private_funcs = { .init_pipes = dcn10_init_pipes, .plane_atomic_disconnect = dcn10_plane_atomic_disconnect, + .plane_atomic_disconnect_sequence = dcn401_plane_atomic_disconnect_sequence, .update_mpcc = dcn20_update_mpcc, + .update_mpcc_sequence = dcn401_update_mpcc_sequence, .set_input_transfer_func = dcn32_set_input_transfer_func, .set_output_transfer_func = dcn401_set_output_transfer_func, .power_down = dce110_power_down, .enable_display_power_gating = dcn10_dummy_display_power_gating, .blank_pixel_data = dcn20_blank_pixel_data, + .blank_pixel_data_sequence = dcn401_blank_pixel_data_sequence, .reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap, .enable_stream_timing = dcn401_enable_stream_timing, .edp_backlight_control = dce110_edp_backlight_control, .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt, + .setup_vupdate_interrupt_sequence = dcn401_setup_vupdate_interrupt_sequence, .did_underflow_occur = dcn10_did_underflow_occur, .init_blank = dcn32_init_blank, .disable_vga = dcn20_disable_vga, .bios_golden_init = dcn10_bios_golden_init, .plane_atomic_disable = dcn20_plane_atomic_disable, .plane_atomic_power_down = dcn401_plane_atomic_power_down, + .plane_atomic_power_down_sequence = dcn401_plane_atomic_power_down_sequence, .enable_power_gating_plane = dcn32_enable_power_gating_plane, .hubp_pg_control = dcn32_hubp_pg_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, + .program_all_writeback_pipes_in_tree_sequence = dcn401_program_all_writeback_pipes_in_tree_sequence, .update_odm = dcn401_update_odm, + .update_odm_sequence = dcn401_update_odm_sequence, .dsc_pg_control = dcn32_dsc_pg_control, .dsc_pg_status = dcn32_dsc_pg_status, .set_hdr_multiplier = dcn10_set_hdr_multiplier, + .set_hdr_multiplier_sequence = dcn401_set_hdr_multiplier_sequence, .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high, + .verify_allow_pstate_change_high_sequence = dcn401_verify_allow_pstate_change_high_sequence, .wait_for_blank_complete = dcn20_wait_for_blank_complete, .dccg_init = dcn20_dccg_init, .set_mcm_luts = dcn401_set_mcm_luts, .program_mall_pipe_config = dcn32_program_mall_pipe_config, + .program_mall_pipe_config_sequence = dcn401_program_mall_pipe_config_sequence, .update_mall_sel = dcn32_update_mall_sel, .calculate_dccg_k1_k2_values = NULL, .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, .reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe, .populate_mcm_luts = NULL, .perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock, + .program_pipe_sequence = dcn401_program_pipe_sequence, + .dc_ip_request_cntl = dcn401_dc_ip_request_cntl, }; void dcn401_hw_sequencer_init_functions(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 1723bbcf2c46..3772b4aa11cc 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -31,6 +31,8 @@ #include "inc/hw/opp.h" #include "inc/hw/link_encoder.h" #include "inc/core_status.h" +#include "inc/hw/hw_shared.h" +#include "dsc/dsc.h" struct pipe_ctx; struct dc_state; @@ -48,6 +50,8 @@ struct dc_dmub_cmd; struct pg_block_update; struct drr_params; struct dc_underflow_debug_data; +struct dsc_optc_config; +struct vm_system_aperture_param; struct subvp_pipe_control_lock_fast_params { struct dc *dc; @@ -62,7 +66,7 @@ struct pipe_control_lock_params { }; struct set_flip_control_gsl_params { - struct pipe_ctx *pipe_ctx; + struct hubp *hubp; bool flip_immediate; }; @@ -148,12 +152,582 @@ struct wait_for_dcc_meta_propagation_params { const struct pipe_ctx *top_pipe_to_program; }; -struct fams2_global_control_lock_fast_params { +struct dmub_hw_control_lock_fast_params { struct dc *dc; bool is_required; bool lock; }; +struct program_surface_config_params { + struct hubp *hubp; + enum surface_pixel_format format; + struct dc_tiling_info *tiling_info; + struct plane_size plane_size; + enum dc_rotation_angle rotation; + struct dc_plane_dcc_param *dcc; + bool horizontal_mirror; + int compat_level; +}; + +struct program_mcache_id_and_split_coordinate { + struct hubp *hubp; + struct dml2_hubp_pipe_mcache_regs *mcache_regs; +}; + +struct program_cursor_update_now_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct hubp_wait_pipe_read_start_params { + struct hubp *hubp; +}; + +struct apply_update_flags_for_phantom_params { + struct pipe_ctx *pipe_ctx; +}; + +struct update_phantom_vp_position_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct set_odm_combine_params { + struct timing_generator *tg; + int opp_inst[MAX_PIPES]; + int opp_head_count; + int odm_slice_width; + int last_odm_slice_width; +}; + +struct set_odm_bypass_params { + struct timing_generator *tg; + const struct dc_crtc_timing *timing; +}; + +struct opp_pipe_clock_control_params { + struct output_pixel_processor *opp; + bool enable; +}; + +struct opp_program_left_edge_extra_pixel_params { + struct output_pixel_processor *opp; + enum dc_pixel_encoding pixel_encoding; + bool is_otg_master; +}; + +struct dccg_set_dto_dscclk_params { + struct dccg *dccg; + int inst; + int num_slices_h; +}; + +struct dsc_set_config_params { + struct display_stream_compressor *dsc; + struct dsc_config *dsc_cfg; + struct dsc_optc_config *dsc_optc_cfg; +}; + +struct dsc_enable_params { + struct display_stream_compressor *dsc; + int opp_inst; +}; + +struct tg_set_dsc_config_params { + struct timing_generator *tg; + struct dsc_optc_config *dsc_optc_cfg; + bool enable; +}; + +struct dsc_disconnect_params { + struct display_stream_compressor *dsc; +}; + +struct dsc_read_state_params { + struct display_stream_compressor *dsc; + struct dcn_dsc_state *dsc_state; +}; + +struct dsc_calculate_and_set_config_params { + struct pipe_ctx *pipe_ctx; + struct dsc_optc_config dsc_optc_cfg; + bool enable; + int opp_cnt; +}; + +struct dsc_enable_with_opp_params { + struct pipe_ctx *pipe_ctx; +}; + +struct program_tg_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct tg_program_global_sync_params { + struct timing_generator *tg; + int vready_offset; + unsigned int vstartup_lines; + unsigned int vupdate_offset_pixels; + unsigned int vupdate_vupdate_width_pixels; + unsigned int pstate_keepout_start_lines; +}; + +struct tg_wait_for_state_params { + struct timing_generator *tg; + enum crtc_state state; +}; + +struct tg_set_vtg_params_params { + struct timing_generator *tg; + struct dc_crtc_timing *timing; + bool program_fp2; +}; + +struct tg_set_gsl_params { + struct timing_generator *tg; + struct gsl_params gsl; +}; + +struct tg_set_gsl_source_select_params { + struct timing_generator *tg; + int group_idx; + uint32_t gsl_ready_signal; +}; + +struct setup_vupdate_interrupt_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct tg_setup_vertical_interrupt2_params { + struct timing_generator *tg; + int start_line; +}; + +struct dpp_set_hdr_multiplier_params { + struct dpp *dpp; + uint32_t hw_mult; +}; + +struct program_det_size_params { + struct hubbub *hubbub; + unsigned int hubp_inst; + unsigned int det_buffer_size_kb; +}; + +struct program_det_segments_params { + struct hubbub *hubbub; + unsigned int hubp_inst; + unsigned int det_size; +}; + +struct update_dchubp_dpp_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + struct dc_state *context; +}; + +struct opp_set_dyn_expansion_params { + struct output_pixel_processor *opp; + enum dc_color_space color_space; + enum dc_color_depth color_depth; + enum signal_type signal; +}; + +struct opp_program_fmt_params { + struct output_pixel_processor *opp; + struct bit_depth_reduction_params *fmt_bit_depth; + struct clamping_and_pixel_encoding_params *clamping; +}; + +struct opp_program_bit_depth_reduction_params { + struct output_pixel_processor *opp; + bool use_default_params; + struct pipe_ctx *pipe_ctx; +}; + +struct opp_set_disp_pattern_generator_params { + struct output_pixel_processor *opp; + enum controller_dp_test_pattern test_pattern; + enum controller_dp_color_space color_space; + enum dc_color_depth color_depth; + struct tg_color solid_color; + bool use_solid_color; + int width; + int height; + int offset; +}; + +struct set_abm_pipe_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_abm_level_params { + struct abm *abm; + unsigned int abm_level; +}; + +struct set_abm_immediate_disable_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_disp_pattern_generator_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + enum controller_dp_test_pattern test_pattern; + enum controller_dp_color_space color_space; + enum dc_color_depth color_depth; + const struct tg_color *solid_color; + int width; + int height; + int offset; +}; + +struct mpc_update_blending_params { + struct mpc *mpc; + struct mpcc_blnd_cfg blnd_cfg; + int mpcc_id; +}; + +struct mpc_assert_idle_mpcc_params { + struct mpc *mpc; + int mpcc_id; +}; + +struct mpc_insert_plane_params { + struct mpc *mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc_blnd_cfg blnd_cfg; + struct mpcc_sm_cfg *sm_cfg; + struct mpcc *insert_above_mpcc; + int dpp_id; + int mpcc_id; +}; + +struct mpc_remove_mpcc_params { + struct mpc *mpc; + struct mpc_tree *mpc_tree_params; + struct mpcc *mpcc_to_remove; +}; + +struct opp_set_mpcc_disconnect_pending_params { + struct output_pixel_processor *opp; + int mpcc_inst; + bool pending; +}; + +struct dc_set_optimized_required_params { + struct dc *dc; + bool optimized_required; +}; + +struct hubp_disconnect_params { + struct hubp *hubp; +}; + +struct hubbub_force_pstate_change_control_params { + struct hubbub *hubbub; + bool enable; + bool wait; +}; + +struct tg_enable_crtc_params { + struct timing_generator *tg; +}; + +struct hubp_wait_flip_pending_params { + struct hubp *hubp; + unsigned int timeout_us; + unsigned int polling_interval_us; +}; + +struct tg_wait_double_buffer_pending_params { + struct timing_generator *tg; + unsigned int timeout_us; + unsigned int polling_interval_us; +}; + +struct update_force_pstate_params { + struct dc *dc; + struct dc_state *context; +}; + +struct hubbub_apply_dedcn21_147_wa_params { + struct hubbub *hubbub; +}; + +struct hubbub_allow_self_refresh_control_params { + struct hubbub *hubbub; + bool allow; + bool *disallow_self_refresh_applied; +}; + +struct tg_get_frame_count_params { + struct timing_generator *tg; + unsigned int *frame_count; +}; + +struct mpc_set_dwb_mux_params { + struct mpc *mpc; + int dwb_id; + int mpcc_id; +}; + +struct mpc_disable_dwb_mux_params { + struct mpc *mpc; + unsigned int dwb_id; +}; + +struct mcif_wb_config_buf_params { + struct mcif_wb *mcif_wb; + struct mcif_buf_params *mcif_buf_params; + unsigned int dest_height; +}; + +struct mcif_wb_config_arb_params { + struct mcif_wb *mcif_wb; + struct mcif_arb_params *mcif_arb_params; +}; + +struct mcif_wb_enable_params { + struct mcif_wb *mcif_wb; +}; + +struct mcif_wb_disable_params { + struct mcif_wb *mcif_wb; +}; + +struct dwbc_enable_params { + struct dwbc *dwb; + struct dc_dwb_params *dwb_params; +}; + +struct dwbc_disable_params { + struct dwbc *dwb; +}; + +struct dwbc_update_params { + struct dwbc *dwb; + struct dc_dwb_params *dwb_params; +}; + +struct hubp_update_mall_sel_params { + struct hubp *hubp; + uint32_t mall_sel; + bool cache_cursor; +}; + +struct hubp_prepare_subvp_buffering_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_set_blank_en_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_disable_control_params { + struct hubp *hubp; + bool disable; +}; + +struct hubbub_soft_reset_params { + struct hubbub *hubbub; + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset); + bool reset; +}; + +struct hubp_clk_cntl_params { + struct hubp *hubp; + bool enable; +}; + +struct hubp_init_params { + struct hubp *hubp; +}; + +struct hubp_set_vm_system_aperture_settings_params { + struct hubp *hubp; + //struct vm_system_aperture_param apt; + PHYSICAL_ADDRESS_LOC sys_default; + PHYSICAL_ADDRESS_LOC sys_low; + PHYSICAL_ADDRESS_LOC sys_high; +}; + +struct hubp_set_flip_int_params { + struct hubp *hubp; +}; + +struct dpp_dppclk_control_params { + struct dpp *dpp; + bool dppclk_div; + bool enable; +}; + +struct disable_phantom_crtc_params { + struct timing_generator *tg; +}; + +struct dpp_pg_control_params { + struct dce_hwseq *hws; + unsigned int dpp_inst; + bool power_on; +}; + +struct hubp_pg_control_params { + struct dce_hwseq *hws; + unsigned int hubp_inst; + bool power_on; +}; + +struct hubp_reset_params { + struct hubp *hubp; +}; + +struct dpp_reset_params { + struct dpp *dpp; +}; + +struct dpp_root_clock_control_params { + struct dce_hwseq *hws; + unsigned int dpp_inst; + bool clock_on; +}; + +struct dc_ip_request_cntl_params { + struct dc *dc; + bool enable; +}; + +struct dsc_pg_status_params { + struct dce_hwseq *hws; + int dsc_inst; + bool is_ungated; +}; + +struct dsc_wait_disconnect_pending_clear_params { + struct display_stream_compressor *dsc; + bool *is_ungated; +}; + +struct dsc_disable_params { + struct display_stream_compressor *dsc; + bool *is_ungated; +}; + +struct dccg_set_ref_dscclk_params { + struct dccg *dccg; + int dsc_inst; + bool *is_ungated; +}; + +struct dccg_update_dpp_dto_params { + struct dccg *dccg; + int dpp_inst; + int dppclk_khz; +}; + +struct hubp_vtg_sel_params { + struct hubp *hubp; + uint32_t otg_inst; +}; + +struct hubp_setup2_params { + struct hubp *hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs; + union dml2_global_sync_programming *global_sync; + struct dc_crtc_timing *timing; +}; + +struct hubp_setup_params { + struct hubp *hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs; + struct _vcs_dpi_display_rq_regs_st *rq_regs; + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest; +}; + +struct hubp_set_unbounded_requesting_params { + struct hubp *hubp; + bool unbounded_req; +}; + +struct hubp_setup_interdependent2_params { + struct hubp *hubp; + struct dml2_dchub_per_pipe_register_set *hubp_regs; +}; + +struct hubp_setup_interdependent_params { + struct hubp *hubp; + struct _vcs_dpi_display_dlg_regs_st *dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs; +}; + +struct dpp_set_cursor_matrix_params { + struct dpp *dpp; + enum dc_color_space color_space; + struct dc_csc_transform *cursor_csc_color_matrix; +}; + +struct mpc_update_mpcc_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct dpp_set_scaler_params { + struct dpp *dpp; + const struct scaler_data *scl_data; +}; + +struct hubp_mem_program_viewport_params { + struct hubp *hubp; + const struct rect *viewport; + const struct rect *viewport_c; +}; + +struct hubp_program_mcache_id_and_split_coordinate_params { + struct hubp *hubp; + struct mcache_regs_struct *mcache_regs; +}; + +struct set_cursor_attribute_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_cursor_position_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct set_cursor_sdr_white_level_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; +}; + +struct program_output_csc_params { + struct dc *dc; + struct pipe_ctx *pipe_ctx; + enum dc_color_space colorspace; + uint16_t *matrix; + int opp_id; +}; + +struct hubp_set_blank_params { + struct hubp *hubp; + bool blank; +}; + +struct phantom_hubp_post_enable_params { + struct hubp *hubp; +}; + union block_sequence_params { struct update_plane_addr_params update_plane_addr_params; struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params; @@ -173,7 +747,107 @@ union block_sequence_params { struct set_ocsc_default_params set_ocsc_default_params; struct subvp_save_surf_addr subvp_save_surf_addr; struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params; - struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params; + struct dmub_hw_control_lock_fast_params dmub_hw_control_lock_fast_params; + struct program_surface_config_params program_surface_config_params; + struct program_mcache_id_and_split_coordinate program_mcache_id_and_split_coordinate; + struct program_cursor_update_now_params program_cursor_update_now_params; + struct hubp_wait_pipe_read_start_params hubp_wait_pipe_read_start_params; + struct apply_update_flags_for_phantom_params apply_update_flags_for_phantom_params; + struct update_phantom_vp_position_params update_phantom_vp_position_params; + struct set_odm_combine_params set_odm_combine_params; + struct set_odm_bypass_params set_odm_bypass_params; + struct opp_pipe_clock_control_params opp_pipe_clock_control_params; + struct opp_program_left_edge_extra_pixel_params opp_program_left_edge_extra_pixel_params; + struct dccg_set_dto_dscclk_params dccg_set_dto_dscclk_params; + struct dsc_set_config_params dsc_set_config_params; + struct dsc_enable_params dsc_enable_params; + struct tg_set_dsc_config_params tg_set_dsc_config_params; + struct dsc_disconnect_params dsc_disconnect_params; + struct dsc_read_state_params dsc_read_state_params; + struct dsc_calculate_and_set_config_params dsc_calculate_and_set_config_params; + struct dsc_enable_with_opp_params dsc_enable_with_opp_params; + struct program_tg_params program_tg_params; + struct tg_program_global_sync_params tg_program_global_sync_params; + struct tg_wait_for_state_params tg_wait_for_state_params; + struct tg_set_vtg_params_params tg_set_vtg_params_params; + struct tg_setup_vertical_interrupt2_params tg_setup_vertical_interrupt2_params; + struct dpp_set_hdr_multiplier_params dpp_set_hdr_multiplier_params; + struct tg_set_gsl_params tg_set_gsl_params; + struct tg_set_gsl_source_select_params tg_set_gsl_source_select_params; + struct setup_vupdate_interrupt_params setup_vupdate_interrupt_params; + struct program_det_size_params program_det_size_params; + struct program_det_segments_params program_det_segments_params; + struct update_dchubp_dpp_params update_dchubp_dpp_params; + struct opp_set_dyn_expansion_params opp_set_dyn_expansion_params; + struct opp_program_fmt_params opp_program_fmt_params; + struct opp_program_bit_depth_reduction_params opp_program_bit_depth_reduction_params; + struct opp_set_disp_pattern_generator_params opp_set_disp_pattern_generator_params; + struct set_abm_pipe_params set_abm_pipe_params; + struct set_abm_level_params set_abm_level_params; + struct set_abm_immediate_disable_params set_abm_immediate_disable_params; + struct set_disp_pattern_generator_params set_disp_pattern_generator_params; + struct mpc_remove_mpcc_params mpc_remove_mpcc_params; + struct opp_set_mpcc_disconnect_pending_params opp_set_mpcc_disconnect_pending_params; + struct dc_set_optimized_required_params dc_set_optimized_required_params; + struct hubp_disconnect_params hubp_disconnect_params; + struct hubbub_force_pstate_change_control_params hubbub_force_pstate_change_control_params; + struct tg_enable_crtc_params tg_enable_crtc_params; + struct hubp_wait_flip_pending_params hubp_wait_flip_pending_params; + struct tg_wait_double_buffer_pending_params tg_wait_double_buffer_pending_params; + struct update_force_pstate_params update_force_pstate_params; + struct hubbub_apply_dedcn21_147_wa_params hubbub_apply_dedcn21_147_wa_params; + struct hubbub_allow_self_refresh_control_params hubbub_allow_self_refresh_control_params; + struct tg_get_frame_count_params tg_get_frame_count_params; + struct mpc_set_dwb_mux_params mpc_set_dwb_mux_params; + struct mpc_disable_dwb_mux_params mpc_disable_dwb_mux_params; + struct mcif_wb_config_buf_params mcif_wb_config_buf_params; + struct mcif_wb_config_arb_params mcif_wb_config_arb_params; + struct mcif_wb_enable_params mcif_wb_enable_params; + struct mcif_wb_disable_params mcif_wb_disable_params; + struct dwbc_enable_params dwbc_enable_params; + struct dwbc_disable_params dwbc_disable_params; + struct dwbc_update_params dwbc_update_params; + struct hubp_update_mall_sel_params hubp_update_mall_sel_params; + struct hubp_prepare_subvp_buffering_params hubp_prepare_subvp_buffering_params; + struct hubp_set_blank_en_params hubp_set_blank_en_params; + struct hubp_disable_control_params hubp_disable_control_params; + struct hubbub_soft_reset_params hubbub_soft_reset_params; + struct hubp_clk_cntl_params hubp_clk_cntl_params; + struct hubp_init_params hubp_init_params; + struct hubp_set_vm_system_aperture_settings_params hubp_set_vm_system_aperture_settings_params; + struct hubp_set_flip_int_params hubp_set_flip_int_params; + struct dpp_dppclk_control_params dpp_dppclk_control_params; + struct disable_phantom_crtc_params disable_phantom_crtc_params; + struct dpp_pg_control_params dpp_pg_control_params; + struct hubp_pg_control_params hubp_pg_control_params; + struct hubp_reset_params hubp_reset_params; + struct dpp_reset_params dpp_reset_params; + struct dpp_root_clock_control_params dpp_root_clock_control_params; + struct dc_ip_request_cntl_params dc_ip_request_cntl_params; + struct dsc_pg_status_params dsc_pg_status_params; + struct dsc_wait_disconnect_pending_clear_params dsc_wait_disconnect_pending_clear_params; + struct dsc_disable_params dsc_disable_params; + struct dccg_set_ref_dscclk_params dccg_set_ref_dscclk_params; + struct dccg_update_dpp_dto_params dccg_update_dpp_dto_params; + struct hubp_vtg_sel_params hubp_vtg_sel_params; + struct hubp_setup2_params hubp_setup2_params; + struct hubp_setup_params hubp_setup_params; + struct hubp_set_unbounded_requesting_params hubp_set_unbounded_requesting_params; + struct hubp_setup_interdependent2_params hubp_setup_interdependent2_params; + struct hubp_setup_interdependent_params hubp_setup_interdependent_params; + struct dpp_set_cursor_matrix_params dpp_set_cursor_matrix_params; + struct mpc_update_mpcc_params mpc_update_mpcc_params; + struct mpc_update_blending_params mpc_update_blending_params; + struct mpc_assert_idle_mpcc_params mpc_assert_idle_mpcc_params; + struct mpc_insert_plane_params mpc_insert_plane_params; + struct dpp_set_scaler_params dpp_set_scaler_params; + struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params; + struct set_cursor_attribute_params set_cursor_attribute_params; + struct set_cursor_position_params set_cursor_position_params; + struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params; + struct program_output_csc_params program_output_csc_params; + struct hubp_set_blank_params hubp_set_blank_params; + struct phantom_hubp_post_enable_params phantom_hubp_post_enable_params; }; enum block_sequence_func { @@ -189,13 +863,110 @@ enum block_sequence_func { DPP_SETUP_DPP, DPP_PROGRAM_BIAS_AND_SCALE, DPP_SET_OUTPUT_TRANSFER_FUNC, + DPP_SET_HDR_MULTIPLIER, MPC_UPDATE_VISUAL_CONFIRM, MPC_POWER_ON_MPC_MEM_PWR, MPC_SET_OUTPUT_CSC, MPC_SET_OCSC_DEFAULT, DMUB_SUBVP_SAVE_SURF_ADDR, HUBP_WAIT_FOR_DCC_META_PROP, - DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST, + DMUB_HW_CONTROL_LOCK_FAST, + HUBP_PROGRAM_SURFACE_CONFIG, + HUBP_PROGRAM_MCACHE_ID, + PROGRAM_CURSOR_UPDATE_NOW, + HUBP_WAIT_PIPE_READ_START, + HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM, + HWS_UPDATE_PHANTOM_VP_POSITION, + OPTC_SET_ODM_COMBINE, + OPTC_SET_ODM_BYPASS, + OPP_PIPE_CLOCK_CONTROL, + OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL, + DCCG_SET_DTO_DSCCLK, + DSC_SET_CONFIG, + DSC_ENABLE, + TG_SET_DSC_CONFIG, + DSC_DISCONNECT, + DSC_READ_STATE, + DSC_CALCULATE_AND_SET_CONFIG, + DSC_ENABLE_WITH_OPP, + TG_PROGRAM_GLOBAL_SYNC, + TG_WAIT_FOR_STATE, + TG_SET_VTG_PARAMS, + TG_SETUP_VERTICAL_INTERRUPT2, + HUBP_PROGRAM_DET_SIZE, + HUBP_PROGRAM_DET_SEGMENTS, + OPP_SET_DYN_EXPANSION, + OPP_PROGRAM_FMT, + OPP_PROGRAM_BIT_DEPTH_REDUCTION, + OPP_SET_DISP_PATTERN_GENERATOR, + ABM_SET_PIPE, + ABM_SET_LEVEL, + ABM_SET_IMMEDIATE_DISABLE, + MPC_REMOVE_MPCC, + OPP_SET_MPCC_DISCONNECT_PENDING, + DC_SET_OPTIMIZED_REQUIRED, + HUBP_DISCONNECT, + HUBBUB_FORCE_PSTATE_CHANGE_CONTROL, + TG_ENABLE_CRTC, + TG_SET_GSL, + TG_SET_GSL_SOURCE_SELECT, + HUBP_WAIT_FLIP_PENDING, + TG_WAIT_DOUBLE_BUFFER_PENDING, + UPDATE_FORCE_PSTATE, + PROGRAM_MALL_PIPE_CONFIG, + HUBBUB_APPLY_DEDCN21_147_WA, + HUBBUB_ALLOW_SELF_REFRESH_CONTROL, + TG_GET_FRAME_COUNT, + MPC_SET_DWB_MUX, + MPC_DISABLE_DWB_MUX, + MCIF_WB_CONFIG_BUF, + MCIF_WB_CONFIG_ARB, + MCIF_WB_ENABLE, + MCIF_WB_DISABLE, + DWBC_ENABLE, + DWBC_DISABLE, + DWBC_UPDATE, + HUBP_UPDATE_MALL_SEL, + HUBP_PREPARE_SUBVP_BUFFERING, + HUBP_SET_BLANK_EN, + HUBP_DISABLE_CONTROL, + HUBBUB_SOFT_RESET, + HUBP_CLK_CNTL, + HUBP_INIT, + HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS, + HUBP_SET_FLIP_INT, + DPP_DPPCLK_CONTROL, + DISABLE_PHANTOM_CRTC, + DSC_PG_STATUS, + DSC_WAIT_DISCONNECT_PENDING_CLEAR, + DSC_DISABLE, + DCCG_SET_REF_DSCCLK, + DPP_PG_CONTROL, + HUBP_PG_CONTROL, + HUBP_RESET, + DPP_RESET, + DPP_ROOT_CLOCK_CONTROL, + DC_IP_REQUEST_CNTL, + DCCG_UPDATE_DPP_DTO, + HUBP_VTG_SEL, + HUBP_SETUP2, + HUBP_SETUP, + HUBP_SET_UNBOUNDED_REQUESTING, + HUBP_SETUP_INTERDEPENDENT2, + HUBP_SETUP_INTERDEPENDENT, + DPP_SET_CURSOR_MATRIX, + MPC_UPDATE_BLENDING, + MPC_ASSERT_IDLE_MPCC, + MPC_INSERT_PLANE, + DPP_SET_SCALER, + HUBP_MEM_PROGRAM_VIEWPORT, + SET_CURSOR_ATTRIBUTE, + SET_CURSOR_POSITION, + SET_CURSOR_SDR_WHITE_LEVEL, + PROGRAM_OUTPUT_CSC, + HUBP_SET_LEGACY_TILING_COMPAT_LEVEL, + HUBP_SET_BLANK, + PHANTOM_HUBP_POST_ENABLE, /* This must be the last value in this enum, add new ones above */ HWSS_BLOCK_SEQUENCE_FUNC_COUNT }; @@ -205,6 +976,11 @@ struct block_sequence { enum block_sequence_func func; }; +struct block_sequence_state { + struct block_sequence *steps; + unsigned int *num_steps; +}; + #define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES) struct hw_sequencer_funcs { @@ -222,6 +998,8 @@ struct hw_sequencer_funcs { enum dc_status (*apply_ctx_to_hw)(struct dc *dc, struct dc_state *context); void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); + void (*disable_plane_sequence)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); void (*apply_ctx_for_surface)(struct dc *dc, const struct dc_stream_state *stream, @@ -239,6 +1017,10 @@ struct hw_sequencer_funcs { void (*wait_for_mpcc_disconnect)(struct dc *dc, struct resource_pool *res_pool, struct pipe_ctx *pipe_ctx); + void (*wait_for_mpcc_disconnect_sequence)(struct dc *dc, + struct resource_pool *res_pool, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*edp_backlight_control)( struct dc_link *link, bool enable); @@ -310,6 +1092,13 @@ struct hw_sequencer_funcs { void (*set_cursor_position)(struct pipe_ctx *pipe); void (*set_cursor_attribute)(struct pipe_ctx *pipe); void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); + void (*abort_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*begin_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*commit_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe); + void (*update_cursor_offload_pipe)(struct dc *dc, const struct pipe_ctx *pipe); + void (*notify_cursor_offload_drr_update)(struct dc *dc, struct dc_state *context, + const struct dc_stream_state *stream); + void (*program_cursor_offload_now)(struct dc *dc, const struct pipe_ctx *pipe); /* Colour Related */ void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx); @@ -452,13 +1241,13 @@ struct hw_sequencer_funcs { const struct dc_state *new_ctx); void (*wait_for_dcc_meta_propagation)(const struct dc *dc, const struct pipe_ctx *top_pipe_to_program); - void (*fams2_global_control_lock)(struct dc *dc, + void (*dmub_hw_control_lock)(struct dc *dc, struct dc_state *context, bool lock); void (*fams2_update_config)(struct dc *dc, struct dc_state *context, bool enable); - void (*fams2_global_control_lock_fast)(union block_sequence_params *params); + void (*dmub_hw_control_lock_fast)(union block_sequence_params *params); void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); void (*program_outstanding_updates)(struct dc *dc, struct dc_state *context); @@ -471,11 +1260,23 @@ struct hw_sequencer_funcs { void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*enable_plane_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void (*update_dchubp_dpp)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*update_dchubp_dpp_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); void (*post_unlock_reset_opp)(struct dc *dc, struct pipe_ctx *opp_head); + void (*post_unlock_reset_opp_sequence)( + struct dc *dc, + struct pipe_ctx *opp_head, + struct block_sequence_state *seq_state); void (*get_underflow_debug_data)(const struct dc *dc, struct timing_generator *tg, struct dc_underflow_debug_data *out_data); @@ -588,4 +1389,624 @@ void hwss_set_ocsc_default(union block_sequence_params *params); void hwss_subvp_save_surf_addr(union block_sequence_params *params); +void hwss_program_surface_config(union block_sequence_params *params); + +void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params); + +void hwss_set_odm_combine(union block_sequence_params *params); + +void hwss_set_odm_bypass(union block_sequence_params *params); + +void hwss_opp_pipe_clock_control(union block_sequence_params *params); + +void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params); + +void hwss_blank_pixel_data(union block_sequence_params *params); + +void hwss_dccg_set_dto_dscclk(union block_sequence_params *params); + +void hwss_dsc_set_config(union block_sequence_params *params); + +void hwss_dsc_enable(union block_sequence_params *params); + +void hwss_tg_set_dsc_config(union block_sequence_params *params); + +void hwss_dsc_disconnect(union block_sequence_params *params); + +void hwss_dsc_read_state(union block_sequence_params *params); + +void hwss_dsc_calculate_and_set_config(union block_sequence_params *params); + +void hwss_dsc_enable_with_opp(union block_sequence_params *params); + +void hwss_program_tg(union block_sequence_params *params); + +void hwss_tg_program_global_sync(union block_sequence_params *params); + +void hwss_tg_wait_for_state(union block_sequence_params *params); + +void hwss_tg_set_vtg_params(union block_sequence_params *params); + +void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params); + +void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params); + +void hwss_program_det_size(union block_sequence_params *params); + +void hwss_program_det_segments(union block_sequence_params *params); + +void hwss_opp_set_dyn_expansion(union block_sequence_params *params); + +void hwss_opp_program_fmt(union block_sequence_params *params); + +void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params); + +void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params); + +void hwss_set_abm_pipe(union block_sequence_params *params); + +void hwss_set_abm_level(union block_sequence_params *params); + +void hwss_set_abm_immediate_disable(union block_sequence_params *params); + +void hwss_mpc_remove_mpcc(union block_sequence_params *params); + +void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params); + +void hwss_dc_set_optimized_required(union block_sequence_params *params); + +void hwss_hubp_disconnect(union block_sequence_params *params); + +void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params); + +void hwss_tg_enable_crtc(union block_sequence_params *params); + +void hwss_tg_set_gsl(union block_sequence_params *params); + +void hwss_tg_set_gsl_source_select(union block_sequence_params *params); + +void hwss_hubp_wait_flip_pending(union block_sequence_params *params); + +void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params); + +void hwss_update_force_pstate(union block_sequence_params *params); + +void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params); + +void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params); + +void hwss_tg_get_frame_count(union block_sequence_params *params); + +void hwss_mpc_set_dwb_mux(union block_sequence_params *params); + +void hwss_mpc_disable_dwb_mux(union block_sequence_params *params); + +void hwss_mcif_wb_config_buf(union block_sequence_params *params); + +void hwss_mcif_wb_config_arb(union block_sequence_params *params); + +void hwss_mcif_wb_enable(union block_sequence_params *params); + +void hwss_mcif_wb_disable(union block_sequence_params *params); + +void hwss_dwbc_enable(union block_sequence_params *params); + +void hwss_dwbc_disable(union block_sequence_params *params); + +void hwss_dwbc_update(union block_sequence_params *params); + +void hwss_hubp_update_mall_sel(union block_sequence_params *params); + +void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params); + +void hwss_hubp_set_blank_en(union block_sequence_params *params); + +void hwss_hubp_disable_control(union block_sequence_params *params); + +void hwss_hubbub_soft_reset(union block_sequence_params *params); + +void hwss_hubp_clk_cntl(union block_sequence_params *params); + +void hwss_hubp_init(union block_sequence_params *params); + +void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params); + +void hwss_hubp_set_flip_int(union block_sequence_params *params); + +void hwss_dpp_dppclk_control(union block_sequence_params *params); + +void hwss_disable_phantom_crtc(union block_sequence_params *params); + +void hwss_dsc_pg_status(union block_sequence_params *params); + +void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params); + +void hwss_dsc_disable(union block_sequence_params *params); + +void hwss_dccg_set_ref_dscclk(union block_sequence_params *params); + +void hwss_dpp_pg_control(union block_sequence_params *params); + +void hwss_hubp_pg_control(union block_sequence_params *params); + +void hwss_hubp_reset(union block_sequence_params *params); + +void hwss_dpp_reset(union block_sequence_params *params); + +void hwss_dpp_root_clock_control(union block_sequence_params *params); + +void hwss_dc_ip_request_cntl(union block_sequence_params *params); + +void hwss_dccg_update_dpp_dto(union block_sequence_params *params); + +void hwss_hubp_vtg_sel(union block_sequence_params *params); + +void hwss_hubp_setup2(union block_sequence_params *params); + +void hwss_hubp_setup(union block_sequence_params *params); + +void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params); + +void hwss_hubp_setup_interdependent2(union block_sequence_params *params); + +void hwss_hubp_setup_interdependent(union block_sequence_params *params); + +void hwss_dpp_set_cursor_matrix(union block_sequence_params *params); + +void hwss_mpc_update_mpcc(union block_sequence_params *params); + +void hwss_mpc_update_blending(union block_sequence_params *params); + +void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params); + +void hwss_mpc_insert_plane(union block_sequence_params *params); + +void hwss_dpp_set_scaler(union block_sequence_params *params); + +void hwss_hubp_mem_program_viewport(union block_sequence_params *params); + +void hwss_set_cursor_attribute(union block_sequence_params *params); + +void hwss_set_cursor_position(union block_sequence_params *params); + +void hwss_set_cursor_sdr_white_level(union block_sequence_params *params); + +void hwss_program_output_csc(union block_sequence_params *params); + +void hwss_hubp_set_legacy_tiling_compat_level(union block_sequence_params *params); + +void hwss_hubp_set_blank(union block_sequence_params *params); + +void hwss_phantom_hubp_post_enable(union block_sequence_params *params); + +void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, bool lock); + +void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state, + struct hubp *hubp, bool flip_immediate); + +void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer); + +void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state); + +void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream); + +void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx, int mpcc_id); + +void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state, + struct mpc *mpc, int mpcc_id, bool power_on); + +void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state, + struct mpc *mpc, int opp_id, const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode); + +void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state, + struct mpc *mpc, int opp_id, enum dc_color_space colorspace, enum mpc_output_csc_mode ocsc_mode); + +void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state, + struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); + +void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state, + struct dc_dmub_srv *dc_dmub_srv, struct dc_plane_address *addr, uint8_t subvp_index); + +void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *top_pipe_to_program); + +void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state, + struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); + +void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state, + struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count, + int odm_slice_width, int last_odm_slice_width); + +void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state, + struct timing_generator *optc, struct dc_crtc_timing *timing); + +void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int vready_offset, + unsigned int vstartup_lines, + unsigned int vupdate_offset_pixels, + unsigned int vupdate_vupdate_width_pixels, + unsigned int pstate_keepout_start_lines); + +void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state, + struct timing_generator *tg, enum crtc_state state); + +void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state, + struct timing_generator *tg, struct dc_crtc_timing *dc_crtc_timing, bool program_fp2); + +void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state, + struct timing_generator *tg, int start_line); + +void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state, + struct dpp *dpp, uint32_t hw_mult); + +void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state, + struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_buffer_size_kb); + +void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state, + struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs); + +void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, bool enable, bool wait); + +void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state, + struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_size); + +void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, enum dc_color_space color_sp, + enum dc_color_depth color_dpth, enum signal_type signal); + +void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth, + struct clamping_and_pixel_encoding_params *clamping); + +void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_abm_set_level(struct block_sequence_state *seq_state, + struct abm *abm, uint32_t abm_level); + +void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg); + +void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state, + struct hubp *hubp, unsigned int timeout_us, unsigned int polling_interval_us); + +void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state, + struct timing_generator *tg, unsigned int timeout_us, unsigned int polling_interval_us); + +void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, int inst, int num_slices_h); + +void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt); + +void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove); + +void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, int mpcc_inst, bool pending); + +void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc); + +void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state, + struct dc *dc, bool optimized_required); + +void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state, + struct dc *dc, struct pipe_ctx *pipe_ctx); + +void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum controller_dp_test_pattern test_pattern, + enum controller_dp_color_space color_space, + enum dc_color_depth color_depth, + struct tg_color solid_color, + bool use_solid_color, + int width, + int height, + int offset); + +void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool use_default_params, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state, + struct dc *dc, + bool enable); + +void hwss_add_dwbc_update(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params); + +void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_buf_params *mcif_buf_params, + unsigned int dest_height); + +void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb, + struct mcif_arb_params *mcif_arb_params); + +void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb); + +void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state, + struct mcif_wb *mcif_wb); + +void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + int dwb_id, + int mpcc_id); + +void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state, + struct mpc *mpc, + unsigned int dwb_id); + +void hwss_add_dwbc_enable(struct block_sequence_state *seq_state, + struct dwbc *dwb, + struct dc_dwb_params *dwb_params); + +void hwss_add_dwbc_disable(struct block_sequence_state *seq_state, + struct dwbc *dwb); + +void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct gsl_params gsl); + +void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state, + struct timing_generator *tg, + int group_idx, + uint32_t gsl_ready_signal); + +void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t mall_sel, + bool cache_cursor); + +void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool disable); + +void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset), + bool reset); + +void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool enable); + +void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state, + struct dpp *dpp, + bool dppclk_div, + bool enable); + +void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state, + struct timing_generator *tg); + +void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + int dsc_inst, + bool is_ungated); + +void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated); + +void hwss_add_dsc_disable(struct block_sequence_state *seq_state, + struct display_stream_compressor *dsc, + bool *is_ungated); + +void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dsc_inst, + bool *is_ungated); + +void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on); + +void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool power_on); + +void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state, + struct dce_hwseq *hws, + unsigned int hubp_inst, + bool power_on); + +void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool blank); + +void hwss_add_hubp_init(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_hubp_reset(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dpp_reset(struct block_sequence_state *seq_state, + struct dpp *dpp); + +void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + bool enable); + +void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint64_t sys_default, + uint64_t sys_low, + uint64_t sys_high); + +void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state, + struct dccg *dccg, + int dpp_inst, + int dppclk_khz); + +void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state, + struct hubp *hubp, + uint32_t otg_inst); + +void hwss_add_hubp_setup2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs, + union dml2_global_sync_programming *global_sync, + struct dc_crtc_timing *timing); + +void hwss_add_hubp_setup(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs, + struct _vcs_dpi_display_rq_regs_st *rq_regs, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); + +void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state, + struct hubp *hubp, + bool unbounded_req); + +void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct dml2_dchub_per_pipe_register_set *hubp_regs); + +void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state, + struct hubp *hubp, + struct _vcs_dpi_display_dlg_regs_st *dlg_regs, + struct _vcs_dpi_display_ttu_regs_st *ttu_regs); +void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state, + struct hubp *hubp, + enum surface_pixel_format format, + struct dc_tiling_info *tiling_info, + struct plane_size plane_size, + enum dc_rotation_angle rotation, + struct dc_plane_dcc_param *dcc, + bool horizontal_mirror, + int compat_level); + +void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state, + struct pipe_ctx *pipe_ctx); + +void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state, + struct dpp *dpp, + enum dc_color_space color_space, + struct dc_csc_transform *cursor_csc_color_matrix); + +void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpcc_blnd_cfg blnd_cfg, + int mpcc_id); + +void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state, + struct mpc *mpc, + int mpcc_id); + +void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state, + struct mpc *mpc, + struct mpc_tree *mpc_tree_params, + struct mpcc_blnd_cfg blnd_cfg, + struct mpcc_sm_cfg *sm_cfg, + struct mpcc *insert_above_mpcc, + int dpp_id, + int mpcc_id); + +void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state, + struct dpp *dpp, + const struct scaler_data *scl_data); + +void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state, + struct hubp *hubp, + const struct rect *viewport, + const struct rect *viewport_c); + +void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_set_cursor_position(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx); + +void hwss_add_program_output_csc(struct block_sequence_state *seq_state, + struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id); + +void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state, + struct hubp *hubp); + +void hwss_add_update_force_pstate(struct block_sequence_state *seq_state, + struct dc *dc, + struct dc_state *context); + +void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state, + struct hubbub *hubbub); + +void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state, + struct hubbub *hubbub, + bool allow, + bool *disallow_self_refresh_applied); + +void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state, + struct timing_generator *tg, + unsigned int *frame_count); + +void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state, + struct timing_generator *tg, + struct dsc_optc_config *dsc_optc_cfg, + bool enable); + +void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state, + struct output_pixel_processor *opp, + enum dc_pixel_encoding pixel_encoding, + bool is_otg_master); + #endif /* __DC_HW_SEQUENCER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 1e2d247fbbac..406db231bc72 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -27,6 +27,7 @@ #define __DC_HW_SEQUENCER_PRIVATE_H__ #include "dc_types.h" +#include "hw_sequencer.h" enum pipe_gating_control { PIPE_GATING_CONTROL_DISABLE = 0, @@ -80,7 +81,13 @@ struct hwseq_private_funcs { void (*plane_atomic_disconnect)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx); + void (*plane_atomic_disconnect_sequence)(struct dc *dc, + struct dc_state *state, + struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*update_mpcc_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); bool (*set_input_transfer_func)(struct dc *dc, struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); @@ -97,6 +104,10 @@ struct hwseq_private_funcs { void (*blank_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank); + void (*blank_pixel_data_sequence)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + bool blank, + struct block_sequence_state *seq_state); enum dc_status (*enable_stream_timing)( struct pipe_ctx *pipe_ctx, struct dc_state *context, @@ -105,6 +116,8 @@ struct hwseq_private_funcs { bool enable); void (*setup_vupdate_interrupt)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*setup_vupdate_interrupt_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx); void (*init_blank)(struct dc *dc, struct timing_generator *tg); void (*disable_vga)(struct dce_hwseq *hws); @@ -112,6 +125,10 @@ struct hwseq_private_funcs { void (*plane_atomic_power_down)(struct dc *dc, struct dpp *dpp, struct hubp *hubp); + void (*plane_atomic_power_down_sequence)(struct dc *dc, + struct dpp *dpp, + struct hubp *hubp, + struct block_sequence_state *seq_state); void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx); void (*enable_power_gating_plane)(struct dce_hwseq *hws, bool enable); @@ -140,15 +157,31 @@ struct hwseq_private_funcs { unsigned int dsc_inst); void (*update_odm)(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx); + void (*update_odm_sequence)(struct dc *dc, struct dc_state *context, + struct pipe_ctx *pipe_ctx, struct block_sequence_state *seq_state); void (*program_all_writeback_pipes_in_tree)(struct dc *dc, const struct dc_stream_state *stream, struct dc_state *context); + void (*program_all_writeback_pipes_in_tree_sequence)( + struct dc *dc, + const struct dc_stream_state *stream, + struct dc_state *context, + struct block_sequence_state *seq_state); bool (*s0i3_golden_init_wa)(struct dc *dc); void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx); + void (*set_hdr_multiplier_sequence)(struct pipe_ctx *pipe_ctx, + struct block_sequence_state *seq_state); void (*verify_allow_pstate_change_high)(struct dc *dc); + void (*verify_allow_pstate_change_high_sequence)(struct dc *dc, + struct block_sequence_state *seq_state); void (*program_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); + void (*program_pipe_sequence)( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context, + struct block_sequence_state *seq_state); bool (*wait_for_blank_complete)(struct output_pixel_processor *opp); void (*dccg_init)(struct dce_hwseq *hws); bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx, @@ -163,6 +196,8 @@ struct hwseq_private_funcs { void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_state *context); void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context); + void (*program_mall_pipe_config_sequence)(struct dc *dc, struct dc_state *context, + struct block_sequence_state *seq_state); void (*update_force_pstate)(struct dc *dc, struct dc_state *context); void (*update_mall_sel)(struct dc *dc, struct dc_state *context); unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx, @@ -186,6 +221,7 @@ struct hwseq_private_funcs { void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx); void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only); void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*dc_ip_request_cntl)(struct dc *dc, bool enable); }; struct dce_hwseq { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index d11893f8c916..5ed2cd344804 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -58,8 +58,8 @@ #include "transform.h" #include "dpp.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" -#include "dml2/dml21/inc/dml_top_types.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_types.h" struct resource_pool; struct dc_state; @@ -274,7 +274,7 @@ struct resource_pool { /* An array for accessing the link encoder objects that have been created. * Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA */ - struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS]; + struct link_encoder *link_encoders[MAX_LINK_ENCODERS]; /* Number of DIG link encoder objects created - i.e. number of valid * entries in link_encoders array. */ @@ -514,7 +514,7 @@ struct pipe_ctx { struct link_enc_cfg_context { enum link_enc_cfg_mode mode; struct link_enc_assignment link_enc_assignments[MAX_PIPES]; - enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS]; + enum engine_id link_enc_avail[MAX_LINK_ENCODERS]; struct link_enc_assignment transient_assignments[MAX_PIPES]; }; @@ -526,8 +526,8 @@ struct resource_context { uint8_t dp_clock_source_ref_count; bool is_dsc_acquired[MAX_PIPES]; struct link_enc_cfg_context link_enc_cfg_ctx; - unsigned int dio_link_enc_to_link_idx[MAX_DIG_LINK_ENCODERS]; - int dio_link_enc_ref_cnts[MAX_DIG_LINK_ENCODERS]; + unsigned int dio_link_enc_to_link_idx[MAX_LINK_ENCODERS]; + int dio_link_enc_ref_cnts[MAX_LINK_ENCODERS]; bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h index 45645f9fd86c..7ce2f417f86a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h @@ -57,9 +57,9 @@ struct cursor_attribute_cache_hubp { } size; union reg_cursor_settings_cfg { struct { - uint32_t dst_y_offset: 8; - uint32_t chunk_hdl_adjust: 2; - uint32_t reserved: 22; + uint32_t dst_y_offset: 8; + uint32_t chunk_hdl_adjust: 2; + uint32_t reserved: 22; } bits; uint32_t raw; } settings; @@ -83,12 +83,34 @@ union reg_cur0_control_cfg { } bits; uint32_t raw; }; + struct cursor_position_cache_dpp { union reg_cur0_control_cfg cur0_ctl; }; struct cursor_attribute_cache_dpp { union reg_cur0_control_cfg cur0_ctl; + union reg_cur0_fp_scale_bias { + struct { + uint32_t fp_bias: 16; + uint32_t fp_scale: 16; + } bits; + uint32_t raw; + } fp_scale_bias; + union reg_cur0_fp_scale_bias_g_y { + struct { + uint32_t fp_bias_g_y: 16; + uint32_t fp_scale_g_y: 16; + } bits; + uint32_t raw; + } fp_scale_bias_g_y; + union reg_cur0_fp_scale_bias_rb_crcb { + struct { + uint32_t fp_bias_rb_crcb: 16; + uint32_t fp_scale_rb_crcb: 16; + } bits; + uint32_t raw; + } fp_scale_bias_rb_crcb; }; struct cursor_attributes_cfg { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 61c4d2a7db1c..500a601e99b5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -71,6 +71,125 @@ enum pixel_rate_div { PIXEL_RATE_DIV_NA = 0xF }; +struct dcn_dccg_reg_state { + uint32_t dc_mem_global_pwr_req_cntl; + uint32_t dccg_audio_dtbclk_dto_modulo; + uint32_t dccg_audio_dtbclk_dto_phase; + uint32_t dccg_audio_dto_source; + uint32_t dccg_audio_dto0_module; + uint32_t dccg_audio_dto0_phase; + uint32_t dccg_audio_dto1_module; + uint32_t dccg_audio_dto1_phase; + uint32_t dccg_cac_status; + uint32_t dccg_cac_status2; + uint32_t dccg_disp_cntl_reg; + uint32_t dccg_ds_cntl; + uint32_t dccg_ds_dto_incr; + uint32_t dccg_ds_dto_modulo; + uint32_t dccg_ds_hw_cal_interval; + uint32_t dccg_gate_disable_cntl; + uint32_t dccg_gate_disable_cntl2; + uint32_t dccg_gate_disable_cntl3; + uint32_t dccg_gate_disable_cntl4; + uint32_t dccg_gate_disable_cntl5; + uint32_t dccg_gate_disable_cntl6; + uint32_t dccg_global_fgcg_rep_cntl; + uint32_t dccg_gtc_cntl; + uint32_t dccg_gtc_current; + uint32_t dccg_gtc_dto_incr; + uint32_t dccg_gtc_dto_modulo; + uint32_t dccg_perfmon_cntl; + uint32_t dccg_perfmon_cntl2; + uint32_t dccg_soft_reset; + uint32_t dccg_test_clk_sel; + uint32_t dccg_vsync_cnt_ctrl; + uint32_t dccg_vsync_cnt_int_ctrl; + uint32_t dccg_vsync_otg0_latch_value; + uint32_t dccg_vsync_otg1_latch_value; + uint32_t dccg_vsync_otg2_latch_value; + uint32_t dccg_vsync_otg3_latch_value; + uint32_t dccg_vsync_otg4_latch_value; + uint32_t dccg_vsync_otg5_latch_value; + uint32_t dispclk_cgtt_blk_ctrl_reg; + uint32_t dispclk_freq_change_cntl; + uint32_t dp_dto_dbuf_en; + uint32_t dp_dto0_modulo; + uint32_t dp_dto0_phase; + uint32_t dp_dto1_modulo; + uint32_t dp_dto1_phase; + uint32_t dp_dto2_modulo; + uint32_t dp_dto2_phase; + uint32_t dp_dto3_modulo; + uint32_t dp_dto3_phase; + uint32_t dpiaclk_540m_dto_modulo; + uint32_t dpiaclk_540m_dto_phase; + uint32_t dpiaclk_810m_dto_modulo; + uint32_t dpiaclk_810m_dto_phase; + uint32_t dpiaclk_dto_cntl; + uint32_t dpiasymclk_cntl; + uint32_t dppclk_cgtt_blk_ctrl_reg; + uint32_t dppclk_ctrl; + uint32_t dppclk_dto_ctrl; + uint32_t dppclk0_dto_param; + uint32_t dppclk1_dto_param; + uint32_t dppclk2_dto_param; + uint32_t dppclk3_dto_param; + uint32_t dprefclk_cgtt_blk_ctrl_reg; + uint32_t dprefclk_cntl; + uint32_t dpstreamclk_cntl; + uint32_t dscclk_dto_ctrl; + uint32_t dscclk0_dto_param; + uint32_t dscclk1_dto_param; + uint32_t dscclk2_dto_param; + uint32_t dscclk3_dto_param; + uint32_t dtbclk_dto_dbuf_en; + uint32_t dtbclk_dto0_modulo; + uint32_t dtbclk_dto0_phase; + uint32_t dtbclk_dto1_modulo; + uint32_t dtbclk_dto1_phase; + uint32_t dtbclk_dto2_modulo; + uint32_t dtbclk_dto2_phase; + uint32_t dtbclk_dto3_modulo; + uint32_t dtbclk_dto3_phase; + uint32_t dtbclk_p_cntl; + uint32_t force_symclk_disable; + uint32_t hdmicharclk0_clock_cntl; + uint32_t hdmistreamclk_cntl; + uint32_t hdmistreamclk0_dto_param; + uint32_t microsecond_time_base_div; + uint32_t millisecond_time_base_div; + uint32_t otg_pixel_rate_div; + uint32_t otg0_phypll_pixel_rate_cntl; + uint32_t otg0_pixel_rate_cntl; + uint32_t otg1_phypll_pixel_rate_cntl; + uint32_t otg1_pixel_rate_cntl; + uint32_t otg2_phypll_pixel_rate_cntl; + uint32_t otg2_pixel_rate_cntl; + uint32_t otg3_phypll_pixel_rate_cntl; + uint32_t otg3_pixel_rate_cntl; + uint32_t phyasymclk_clock_cntl; + uint32_t phybsymclk_clock_cntl; + uint32_t phycsymclk_clock_cntl; + uint32_t phydsymclk_clock_cntl; + uint32_t phyesymclk_clock_cntl; + uint32_t phyplla_pixclk_resync_cntl; + uint32_t phypllb_pixclk_resync_cntl; + uint32_t phypllc_pixclk_resync_cntl; + uint32_t phyplld_pixclk_resync_cntl; + uint32_t phyplle_pixclk_resync_cntl; + uint32_t refclk_cgtt_blk_ctrl_reg; + uint32_t socclk_cgtt_blk_ctrl_reg; + uint32_t symclk_cgtt_blk_ctrl_reg; + uint32_t symclk_psp_cntl; + uint32_t symclk32_le_cntl; + uint32_t symclk32_se_cntl; + uint32_t symclka_clock_enable; + uint32_t symclkb_clock_enable; + uint32_t symclkc_clock_enable; + uint32_t symclkd_clock_enable; + uint32_t symclke_clock_enable; +}; + struct dccg { struct dc_context *ctx; const struct dccg_funcs *funcs; @@ -81,7 +200,6 @@ struct dccg { //int audio_dtbclk_khz;/* TODO needs to be removed */ //int ref_dtbclk_khz;/* TODO needs to be removed */ }; - struct dtbclk_dto_params { const struct dc_crtc_timing *timing; int otg_inst; @@ -214,6 +332,7 @@ struct dccg_funcs { void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h); void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); + void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state); }; #endif //__DAL_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 843a18287c83..dafc8490efb5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -137,6 +137,14 @@ struct dcn_hubbub_state { uint32_t dram_state_cntl; }; +struct dcn_hubbub_reg_state { + uint32_t det0_ctrl; + uint32_t det1_ctrl; + uint32_t det2_ctrl; + uint32_t det3_ctrl; + uint32_t compbuf_ctrl; +}; + struct hubbub_system_latencies { uint32_t max_latency_ns; uint32_t avg_latency_ns; @@ -216,6 +224,8 @@ struct hubbub_funcs { void (*init_watermarks)(struct hubbub *hubbub); + void (*hubbub_read_reg_state)(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state); + /** * @program_det_size: * @@ -242,17 +252,37 @@ struct hubbub_funcs { void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst); bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower); - void (*get_det_sizes)(struct hubbub *hubbub, uint32_t *curr_det_sizes, uint32_t *target_det_sizes); - uint32_t (*compbuf_config_error)(struct hubbub *hubbub); - struct hubbub_perfmon_funcs{ - void (*start_system_latency_measurement)(struct hubbub *hubbub); - void (*get_system_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, struct hubbub_system_latencies *latencies); - void (*start_in_order_bandwidth_measurement)(struct hubbub *hubbub); - void (*get_in_order_bandwidth_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *bandwidth_mbps); - void (*start_urgent_ramp_latency_measurement)(struct hubbub *hubbub, const struct hubbub_urgent_latency_params *params); - void (*get_urgent_ramp_latency_result)(struct hubbub *hubbub, uint32_t refclk_mhz, uint32_t *latency_ns); + struct hubbub_perfmon_funcs { void (*reset)(struct hubbub *hubbub); + void (*start_measuring_max_memory_latency_ns)( + struct hubbub *hubbub); + uint32_t (*get_max_memory_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *sample_count); + void (*start_measuring_average_memory_latency_ns)( + struct hubbub *hubbub); + uint32_t (*get_average_memory_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *sample_count); + void (*start_measuring_urgent_ramp_latency_ns)( + struct hubbub *hubbub, + const struct hubbub_urgent_latency_params *params); + uint32_t (*get_urgent_ramp_latency_ns)(struct hubbub *hubbub, + uint32_t refclk_mhz); + void (*start_measuring_unbounded_bandwidth_mbps)( + struct hubbub *hubbub); + uint32_t (*get_unbounded_bandwidth_mbps)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t *duration_ns); + void (*start_measuring_average_bandwidth_mbps)( + struct hubbub *hubbub); + uint32_t (*get_average_bandwidth_mbps)(struct hubbub *hubbub, + uint32_t refclk_mhz, uint32_t min_duration_ns, + uint32_t *duration_ns); } perfmon; + + struct hubbub_qos_funcs { + void (*force_display_nominal_profile)(struct hubbub *hubbub); + void (*force_display_urgent_profile)(struct hubbub *hubbub); + void (*reset_display_qos_profile)(struct hubbub *hubbub); + } qos; }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 1b7c085dc2cc..d88b57d4f512 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -65,7 +65,6 @@ union defer_reg_writes { } bits; uint32_t raw; }; - struct dpp { const struct dpp_funcs *funcs; struct dc_context *ctx; @@ -84,6 +83,7 @@ struct dpp { struct pwl_params shaper_params; bool cm_bypass_mode; + bool cursor_offload; struct cursor_position_cache_dpp pos; struct cursor_attribute_cache_dpp att; @@ -202,6 +202,19 @@ struct dcn_dpp_state { uint32_t gamcor_mode; }; +struct dcn_dpp_reg_state { + uint32_t recout_start; + uint32_t recout_size; + uint32_t scl_horz_filter_scale_ratio; + uint32_t scl_vert_filter_scale_ratio; + uint32_t scl_mode; + uint32_t cm_control; + uint32_t dpp_control; + uint32_t dscl_control; + uint32_t obuf_control; + uint32_t mpc_size; +}; + struct CM_bias_params { uint32_t cm_bias_cr_r; uint32_t cm_bias_y_g; @@ -225,6 +238,8 @@ struct dpp_funcs { void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s); + void (*dpp_read_reg_state)(struct dpp *dpp, struct dcn_dpp_reg_state *dpp_reg_state); + void (*dpp_reset)(struct dpp *dpp); void (*dpp_set_scaler)(struct dpp *dpp, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 2b874d2cc61c..a79019365af8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -41,8 +41,8 @@ #include "mem_input.h" #include "cursor_reg_cache.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" -#include "dml2/dml21/inc/dml_top_types.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_types.h" #define OPP_ID_INVALID 0xf #define MAX_TTU 0xffffff @@ -126,11 +126,13 @@ struct hubp { int mpcc_id; struct dc_cursor_attributes curs_attr; struct dc_cursor_position curs_pos; + bool cursor_offload; bool power_gated; struct cursor_position_cache_hubp pos; struct cursor_attribute_cache_hubp att; struct cursor_rect cur_rect; + bool use_mall_for_cursor; }; struct surface_flip_registers { @@ -236,6 +238,7 @@ struct hubp_funcs { void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); void (*hubp_read_state)(struct hubp *hubp); + void (*hubp_read_reg_state)(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state); void (*hubp_clear_underflow)(struct hubp *hubp); void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 41c76ba9ba56..5e2813e9ae2f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -44,7 +44,37 @@ */ #define MAX_PIPES 6 #define MAX_PHANTOM_PIPES (MAX_PIPES / 2) -#define MAX_LINKS (MAX_PIPES * 2 +2) + +#define MAX_DPIA 6 +#define MAX_CONNECTOR 6 +#define MAX_VIRTUAL_LINKS 4 + +#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS) + +/** + * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders + * + * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7, + * although not every GPU may have that many. + */ +#define MAX_DIG_LINK_ENCODERS 7 + +/** + * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders + * + * Analog encoders are ENGINE_ID_DACA/B, there are at most 2, + * although not every GPU may have that many. Modern GPUs typically + * don't have analog encoders. + */ +#define MAX_DAC_LINK_ENCODERS 2 + +/** + * define MAX_LINK_ENCODERS - maximum number link encoders in total + * + * This includes both analog and digital encoders. + */ +#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS) + #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 08c16ba52a51..df512920a9fa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -47,6 +47,7 @@ struct encoder_init_data { enum hpd_source_id hpd_source; /* TODO: in DAL2, here was pointer to EventManagerInterface */ struct graphics_object_id encoder; + enum engine_id analog_engine; struct dc_context *ctx; enum transmitter transmitter; }; @@ -83,6 +84,7 @@ struct link_encoder { struct graphics_object_id connector; uint32_t output_signals; enum engine_id preferred_engine; + enum engine_id analog_engine; struct encoder_feature_support features; enum transmitter transmitter; enum hpd_source_id hpd_source; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 42fbc70f7056..d468bc85566a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -29,7 +29,7 @@ #include "include/grph_object_id.h" #include "dml/display_mode_structs.h" -#include "dml2/dml21/inc/dml_top_dchub_registers.h" +#include "dml2_0/dml21/inc/dml_top_dchub_registers.h" struct dchub_init_data; struct cstate_pstate_watermarks_st { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 22960ee03dee..a8d1abe20f62 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -350,6 +350,15 @@ struct mpcc_state { struct mpc_rmcm_regs rmcm_regs; }; +struct dcn_mpc_reg_state { + uint32_t mpcc_bot_sel; + uint32_t mpcc_control; + uint32_t mpcc_status; + uint32_t mpcc_top_sel; + uint32_t mpcc_opp_id; + uint32_t mpcc_ogam_control; +}; + /** * struct mpc_funcs - funcs */ @@ -373,6 +382,24 @@ struct mpc_funcs { struct mpc *mpc, int mpcc_inst, struct mpcc_state *s); + /** + * @mpc_read_reg_state: + * + * Read MPC register state for debugging underflow purposes. + * + * Parameters: + * + * - [in] mpc - MPC context + * - [out] reg_state - MPC register state structure + * + * Return: + * + * void + */ + void (*mpc_read_reg_state)( + struct mpc *mpc, + int mpcc_inst, + struct dcn_mpc_reg_state *mpc_reg_state); /** * @insert_plane: diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 747679cb4944..e1428a83ecbc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -297,6 +297,16 @@ struct oppbuf_params { uint32_t num_segment_padded_pixels; }; +struct dcn_opp_reg_state { + uint32_t dpg_control; + uint32_t fmt_control; + uint32_t oppbuf_control; + uint32_t opp_pipe_control; + uint32_t opp_pipe_crc_control; + uint32_t opp_abm_control; + uint32_t dscrm_dsc_forward_config; +}; + struct opp_funcs { @@ -368,6 +378,9 @@ struct opp_funcs { struct output_pixel_processor *opp, enum dc_pixel_encoding pixel_encoding, bool is_primary); + + void (*opp_read_reg_state)( + struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index f2de2cf23859..da7bf59c4b9d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -175,6 +175,135 @@ struct dcn_otg_state { uint32_t otg_double_buffer_control; }; +struct dcn_optc_reg_state { + uint32_t optc_bytes_per_pixel; + uint32_t optc_data_format_control; + uint32_t optc_data_source_select; + uint32_t optc_input_clock_control; + uint32_t optc_input_global_control; + uint32_t optc_input_spare_register; + uint32_t optc_memory_config; + uint32_t optc_rsmu_underflow; + uint32_t optc_underflow_threshold; + uint32_t optc_width_control; + + uint32_t otg_3d_structure_control; + uint32_t otg_clock_control; + uint32_t otg_control; + uint32_t otg_count_control; + uint32_t otg_count_reset; + uint32_t otg_crc_cntl; + uint32_t otg_crc_sig_blue_control_mask; + uint32_t otg_crc_sig_red_green_mask; + uint32_t otg_crc0_data_b; + uint32_t otg_crc0_data_rg; + uint32_t otg_crc0_windowa_x_control; + uint32_t otg_crc0_windowa_x_control_readback; + uint32_t otg_crc0_windowa_y_control; + uint32_t otg_crc0_windowa_y_control_readback; + uint32_t otg_crc0_windowb_x_control; + uint32_t otg_crc0_windowb_x_control_readback; + uint32_t otg_crc0_windowb_y_control; + uint32_t otg_crc0_windowb_y_control_readback; + uint32_t otg_crc1_data_b; + uint32_t otg_crc1_data_rg; + uint32_t otg_crc1_windowa_x_control; + uint32_t otg_crc1_windowa_x_control_readback; + uint32_t otg_crc1_windowa_y_control; + uint32_t otg_crc1_windowa_y_control_readback; + uint32_t otg_crc1_windowb_x_control; + uint32_t otg_crc1_windowb_x_control_readback; + uint32_t otg_crc1_windowb_y_control; + uint32_t otg_crc1_windowb_y_control_readback; + uint32_t otg_crc2_data_b; + uint32_t otg_crc2_data_rg; + uint32_t otg_crc3_data_b; + uint32_t otg_crc3_data_rg; + uint32_t otg_dlpc_control; + uint32_t otg_double_buffer_control; + uint32_t otg_drr_control2; + uint32_t otg_drr_control; + uint32_t otg_drr_timing_int_status; + uint32_t otg_drr_trigger_window; + uint32_t otg_drr_v_total_change; + uint32_t otg_drr_v_total_reach_range; + uint32_t otg_dsc_start_position; + uint32_t otg_force_count_now_cntl; + uint32_t otg_global_control0; + uint32_t otg_global_control1; + uint32_t otg_global_control2; + uint32_t otg_global_control3; + uint32_t otg_global_control4; + uint32_t otg_global_sync_status; + uint32_t otg_gsl_control; + uint32_t otg_gsl_vsync_gap; + uint32_t otg_gsl_window_x; + uint32_t otg_gsl_window_y; + uint32_t otg_h_blank_start_end; + uint32_t otg_h_sync_a; + uint32_t otg_h_sync_a_cntl; + uint32_t otg_h_timing_cntl; + uint32_t otg_h_total; + uint32_t otg_interlace_control; + uint32_t otg_interlace_status; + uint32_t otg_interrupt_control; + uint32_t otg_long_vblank_status; + uint32_t otg_m_const_dto0; + uint32_t otg_m_const_dto1; + uint32_t otg_manual_force_vsync_next_line; + uint32_t otg_master_en; + uint32_t otg_master_update_lock; + uint32_t otg_master_update_mode; + uint32_t otg_nom_vert_position; + uint32_t otg_pipe_update_status; + uint32_t otg_pixel_data_readback0; + uint32_t otg_pixel_data_readback1; + uint32_t otg_request_control; + uint32_t otg_snapshot_control; + uint32_t otg_snapshot_frame; + uint32_t otg_snapshot_position; + uint32_t otg_snapshot_status; + uint32_t otg_spare_register; + uint32_t otg_static_screen_control; + uint32_t otg_status; + uint32_t otg_status_frame_count; + uint32_t otg_status_hv_count; + uint32_t otg_status_position; + uint32_t otg_status_vf_count; + uint32_t otg_stereo_control; + uint32_t otg_stereo_force_next_eye; + uint32_t otg_stereo_status; + uint32_t otg_trig_manual_control; + uint32_t otg_triga_cntl; + uint32_t otg_triga_manual_trig; + uint32_t otg_trigb_cntl; + uint32_t otg_trigb_manual_trig; + uint32_t otg_update_lock; + uint32_t otg_v_blank_start_end; + uint32_t otg_v_count_stop_control; + uint32_t otg_v_count_stop_control2; + uint32_t otg_v_sync_a; + uint32_t otg_v_sync_a_cntl; + uint32_t otg_v_total; + uint32_t otg_v_total_control; + uint32_t otg_v_total_int_status; + uint32_t otg_v_total_max; + uint32_t otg_v_total_mid; + uint32_t otg_v_total_min; + uint32_t otg_vert_sync_control; + uint32_t otg_vertical_interrupt0_control; + uint32_t otg_vertical_interrupt0_position; + uint32_t otg_vertical_interrupt1_control; + uint32_t otg_vertical_interrupt1_position; + uint32_t otg_vertical_interrupt2_control; + uint32_t otg_vertical_interrupt2_position; + uint32_t otg_vready_param; + uint32_t otg_vstartup_param; + uint32_t otg_vsync_nom_int_status; + uint32_t otg_vupdate_keepout; + uint32_t otg_vupdate_param; +}; + /** * struct timing_generator - Entry point to Output Timing Generator feature. */ @@ -381,6 +510,7 @@ struct timing_generator_funcs { void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable); bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked); void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s); + void (*optc_read_reg_state)(struct timing_generator *tg, struct dcn_optc_reg_state *optc_reg_state); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 4e26a16a8743..79746d931471 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -49,6 +49,7 @@ struct resource_caps { int num_video_plane; int num_audio; int num_stream_encoder; + int num_analog_stream_encoder; int num_pll; int num_dwb; int num_ddc; diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 9e33bf937a69..1045c268672e 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link, struct audio_output audio_output[MAX_PIPES]; struct dc_stream_state *streams_on_link[MAX_PIPES]; int num_streams_on_link = 0; + struct dc *dc = (struct dc *)link->dc; needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) != link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings)); @@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link, if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) { stream_update.stream = streams_on_link[i]; stream_update.dpms_off = &dpms_off; - dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update); + dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update); } } } @@ -876,7 +877,7 @@ bool dp_set_test_pattern( return false; if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { - if (should_use_dmub_lock(pipe_ctx->stream->link)) { + if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -924,7 +925,7 @@ bool dp_set_test_pattern( CRTC_STATE_VACTIVE); if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { - if (should_use_dmub_lock(pipe_ctx->stream->link)) { + if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c index 892907991f91..befa67b2b2ae 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -58,8 +58,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) return; } - link_enc->funcs->connect_dig_be_to_fe(link_enc, - pipe_ctx->stream_res.stream_enc->id, true); + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + link_enc->funcs->connect_dig_be_to_fe(link_enc, + pipe_ctx->stream_res.stream_enc->id, true); if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); @@ -98,10 +99,13 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx) if (stream_enc->funcs->enable_stream) stream_enc->funcs->enable_stream(stream_enc, pipe_ctx->stream->signal, false); - link_enc->funcs->connect_dig_be_to_fe( - link_enc, - pipe_ctx->stream_res.stream_enc->id, - false); + + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + link_enc->funcs->connect_dig_be_to_fe( + link_enc, + pipe_ctx->stream_res.stream_enc->id, + false); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( pipe_ctx->stream->link, @@ -115,7 +119,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; - if (!dc_is_virtual_signal(stream->signal)) + if (!dc_is_virtual_signal(stream->signal) && + !dc_is_rgb_signal(stream->signal)) stream_encoder->funcs->setup_stereo_sync( stream_encoder, pipe_ctx->stream_res.tg->inst, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 85303167a553..c417780f37bc 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -270,6 +270,10 @@ static void read_scdc_caps(struct ddc_service *ddc_service, uint8_t slave_address = HDMI_SCDC_ADDRESS; uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI; + if (ddc_service->link->local_sink && + !ddc_service->link->local_sink->edid_caps.scdc_present) + return; + link_query_ddc_data(ddc_service, slave_address, &offset, sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte, sizeof(sink->scdc_caps.manufacturer_OUI.byte)); @@ -858,6 +862,79 @@ static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, verify_link_capability_non_destructive(link); } +/** + * link_detect_evaluate_edid_header() - Evaluate if an EDID header is acceptable. + * + * Evaluates an 8-byte EDID header to check if it's good enough + * for the purpose of determining whether a display is connected + * without reading the full EDID. + */ +static bool link_detect_evaluate_edid_header(uint8_t edid_header[8]) +{ + int edid_header_score = 0; + int i; + + for (i = 0; i < 8; ++i) + edid_header_score += edid_header[i] == ((i == 0 || i == 7) ? 0x00 : 0xff); + + return edid_header_score >= 6; +} + +/** + * link_detect_ddc_probe() - Probe the DDC to see if a display is connected. + * + * Detect whether a display is connected to DDC without reading full EDID. + * Reads only the EDID header (the first 8 bytes of EDID) from DDC and + * evaluates whether that matches. + */ +static bool link_detect_ddc_probe(struct dc_link *link) +{ + if (!link->ddc) + return false; + + uint8_t edid_header[8] = {0}; + bool ddc_probed = i2c_read(link->ddc, 0x50, edid_header, sizeof(edid_header)); + + if (!ddc_probed) + return false; + + if (!link_detect_evaluate_edid_header(edid_header)) + return false; + + return true; +} + +/** + * link_detect_dac_load_detect() - Performs DAC load detection. + * + * Load detection can be used to detect the presence of an + * analog display when we can't read DDC. This causes a visible + * visual glitch so it should be used sparingly. + */ +static bool link_detect_dac_load_detect(struct dc_link *link) +{ + struct dc_bios *bios = link->ctx->dc_bios; + struct link_encoder *link_enc = link->link_enc; + enum engine_id engine_id = link_enc->preferred_engine; + enum dal_device_type device_type = DEVICE_TYPE_CRT; + enum bp_result bp_result; + uint32_t enum_id; + + switch (engine_id) { + case ENGINE_ID_DACB: + enum_id = 2; + break; + case ENGINE_ID_DACA: + default: + engine_id = ENGINE_ID_DACA; + enum_id = 1; + break; + } + + bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id); + return bp_result == BP_RESULT_OK; +} + /* * detect_link_and_local_sink() - Detect if a sink is attached to a given link * @@ -942,6 +1019,12 @@ static bool detect_link_and_local_sink(struct dc_link *link, break; } + case SIGNAL_TYPE_RGB: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_RGB; + break; + } + case SIGNAL_TYPE_LVDS: { sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; sink_caps.signal = SIGNAL_TYPE_LVDS; @@ -1066,7 +1149,30 @@ static bool detect_link_and_local_sink(struct dc_link *link, DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); break; case EDID_NO_RESPONSE: + /* Analog connectors without EDID: + * - old monitor that actually doesn't have EDID + * - cheap DVI-A cable or adapter that doesn't connect DDC + */ + if (dc_connector_supports_analog(link->link_id.id)) { + /* If we didn't do DAC load detection yet, do it now + * to verify there really is a display connected. + */ + if (link->type != dc_connection_dac_load && + !link_detect_dac_load_detect(link)) { + if (prev_sink) + dc_sink_release(prev_sink); + link_disconnect_sink(link); + return false; + } + + DC_LOG_INFO("%s detected analog display without EDID\n", __func__); + link->type = dc_connection_dac_load; + sink->edid_caps.analog = true; + break; + } + DC_LOG_ERROR("No EDID read.\n"); + /* * Abort detection for non-DP connectors if we have * no EDID @@ -1103,6 +1209,8 @@ static bool detect_link_and_local_sink(struct dc_link *link, break; } + sink->edid_caps.analog &= dc_connector_supports_analog(link->link_id.id); + // Check if edid is the same if ((prev_sink) && (edid_status == EDID_THE_SAME || edid_status == EDID_OK)) @@ -1133,9 +1241,17 @@ static bool detect_link_and_local_sink(struct dc_link *link, sink = prev_sink; prev_sink = NULL; } - query_hdcp_capability(sink->sink_signal, link); + + if (!sink->edid_caps.analog) + query_hdcp_capability(sink->sink_signal, link); } + /* DVI-I connector connected to analog display. */ + if ((link->link_id.id == CONNECTOR_ID_DUAL_LINK_DVII || + link->link_id.id == CONNECTOR_ID_SINGLE_LINK_DVII) && + sink->edid_caps.analog) + sink->sink_signal = SIGNAL_TYPE_RGB; + /* HDMI-DVI Dongle */ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && !sink->edid_caps.edid_hdmi) @@ -1232,6 +1348,28 @@ static bool detect_link_and_local_sink(struct dc_link *link, return true; } +/** + * link_detect_analog() - Determines if an analog sink is connected. + */ +static bool link_detect_analog(struct dc_link *link, enum dc_connection_type *type) +{ + /* Don't care about connectors that don't support an analog signal. */ + ASSERT(dc_connector_supports_analog(link->link_id.id)); + + if (link_detect_ddc_probe(link)) { + *type = dc_connection_single; + return true; + } + + if (link_detect_dac_load_detect(link)) { + *type = dc_connection_dac_load; + return true; + } + + *type = dc_connection_none; + return true; +} + /* * link_detect_connection_type() - Determine if there is a sink connected * @@ -1248,6 +1386,17 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type * return true; } + /* Ignore the HPD pin (if any) for analog connectors. + * Instead rely on DDC and DAC. + * + * - VGA connectors don't have any HPD at all. + * - Some DVI-A cables don't connect the HPD pin. + * - Some DVI-A cables pull up the HPD pin. + * (So it's high even when no display is connected.) + */ + if (dc_connector_supports_analog(link->link_id.id)) + return link_detect_analog(link, type); + if (link->connector_signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ if (!link->dc->config.edp_no_power_sequencing) diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 83419e1a9036..4ddcdc222913 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -841,6 +841,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (should_use_dto_dscclk) dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); @@ -970,6 +971,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi dsc_cfg.color_depth = stream->timing.display_color_depth; dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); @@ -2256,6 +2258,9 @@ static enum dc_status enable_link( enable_link_lvds(pipe_ctx); status = DC_OK; break; + case SIGNAL_TYPE_RGB: + status = DC_OK; + break; case SIGNAL_TYPE_VIRTUAL: status = enable_link_virtual(pipe_ctx); break; @@ -2369,7 +2374,8 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) set_avmute(pipe_ctx, true); } - dc->hwss.disable_audio_stream(pipe_ctx); + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + dc->hwss.disable_audio_stream(pipe_ctx); update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx); @@ -2654,7 +2660,8 @@ void link_set_dpms_on( enable_stream_features(pipe_ctx); update_psp_stream_config(pipe_ctx, false); - dc->hwss.enable_audio_stream(pipe_ctx); + if (!dc_is_rgb_signal(pipe_ctx->stream->signal)) + dc->hwss.enable_audio_stream(pipe_ctx); if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { set_avmute(pipe_ctx, false); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 31a73867cd4c..7989baf3843c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -451,6 +451,46 @@ static enum channel_id get_ddc_line(struct dc_link *link) return channel; } +static enum engine_id find_analog_engine(struct dc_link *link) +{ + struct dc_bios *bp = link->ctx->dc_bios; + struct graphics_object_id encoder = {0}; + enum bp_result bp_result = BP_RESULT_OK; + int i; + + for (i = 0; i < 3; i++) { + bp_result = bp->funcs->get_src_obj(bp, link->link_id, i, &encoder); + + if (bp_result != BP_RESULT_OK) + return ENGINE_ID_UNKNOWN; + + switch (encoder.id) { + case ENCODER_ID_INTERNAL_DAC1: + case ENCODER_ID_INTERNAL_KLDSCP_DAC1: + return ENGINE_ID_DACA; + case ENCODER_ID_INTERNAL_DAC2: + case ENCODER_ID_INTERNAL_KLDSCP_DAC2: + return ENGINE_ID_DACB; + } + } + + return ENGINE_ID_UNKNOWN; +} + +static bool transmitter_supported(const enum transmitter transmitter) +{ + return transmitter != TRANSMITTER_UNKNOWN && + transmitter != TRANSMITTER_NUTMEG_CRT && + transmitter != TRANSMITTER_TRAVIS_CRT && + transmitter != TRANSMITTER_TRAVIS_LCD; +} + +static bool analog_engine_supported(const enum engine_id engine_id) +{ + return engine_id == ENGINE_ID_DACA || + engine_id == ENGINE_ID_DACB; +} + static bool construct_phy(struct dc_link *link, const struct link_init_data *init_params) { @@ -482,6 +522,19 @@ static bool construct_phy(struct dc_link *link, link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index); + /* Determine early if the link has any supported encoders, + * so that we avoid initializing DDC and HPD, etc. + */ + bp_funcs->get_src_obj(bios, link->link_id, 0, &enc_init_data.encoder); + enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder); + enc_init_data.analog_engine = find_analog_engine(link); + + if (!transmitter_supported(enc_init_data.transmitter) && + !analog_engine_supported(enc_init_data.analog_engine)) { + DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id); + return false; + } + link->ep_type = DISPLAY_ENDPOINT_PHY; DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); @@ -530,6 +583,9 @@ static bool construct_phy(struct dc_link *link, case CONNECTOR_ID_DUAL_LINK_DVII: link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; break; + case CONNECTOR_ID_VGA: + link->connector_signal = SIGNAL_TYPE_RGB; + break; case CONNECTOR_ID_DISPLAY_PORT: case CONNECTOR_ID_MXM: case CONNECTOR_ID_USBC: @@ -611,16 +667,12 @@ static bool construct_phy(struct dc_link *link, dal_ddc_get_line(get_ddc_pin(link->ddc)); enc_init_data.ctx = dc_ctx; - bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, - &enc_init_data.encoder); enc_init_data.connector = link->link_id; enc_init_data.channel = get_ddc_line(link); enc_init_data.hpd_source = get_hpd_line(link); link->hpd_src = enc_init_data.hpd_source; - enc_init_data.transmitter = - translate_encoder_to_transmitter(enc_init_data.encoder); link->link_enc = link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); @@ -817,9 +869,6 @@ static bool construct_dpia(struct dc_link *link, link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; - /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ - link->wa_flags.dp_mot_reset_segment = true; - return true; ddc_create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index 267180e7bc48..5d2bcce2f669 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -549,7 +549,8 @@ void write_scdc_data(struct ddc_service *ddc_service, /*Lower than 340 Scramble bit from SCDC caps*/ if (ddc_service->link->local_sink && - ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) + (ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite || + !ddc_service->link->local_sink->edid_caps.scdc_present)) return; link_query_ddc_data(ddc_service, slave_address, &offset, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index b12c11bd6a14..750147c52c8a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -357,7 +357,9 @@ bool dp_should_enable_fec(const struct dc_link *link) { bool force_disable = false; - if (link->fec_state == dc_link_fec_enabled) + if (link->dc->debug.disable_fec) + force_disable = true; + else if (link->fec_state == dc_link_fec_enabled) force_disable = false; else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && link->local_sink && @@ -424,6 +426,21 @@ static enum dc_link_rate get_link_rate_from_max_link_bw( return link_rate; } +static enum dc_lane_count get_lttpr_max_lane_count(struct dc_link *link) +{ + enum dc_lane_count lttpr_max_lane_count = LANE_COUNT_UNKNOWN; + + if (link->dpcd_caps.lttpr_caps.max_lane_count <= LANE_COUNT_DP_MAX) + lttpr_max_lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + + /* if bw_allocation is enabled and nrd_max_lane_count is set, use it */ + if (link->dpia_bw_alloc_config.bw_alloc_enabled && + link->dpia_bw_alloc_config.nrd_max_lane_count > 0) + lttpr_max_lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count; + + return lttpr_max_lane_count; +} + static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) { @@ -438,6 +455,11 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) break; } + /* if bw_allocation is enabled and nrd_max_link_rate is set, use it */ + if (link->dpia_bw_alloc_config.bw_alloc_enabled && + link->dpia_bw_alloc_config.nrd_max_link_rate > 0) + lttpr_max_link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate; + if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) lttpr_max_link_rate = LINK_RATE_UHBR20; else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) @@ -1845,6 +1867,12 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable)); + /* Some MST docks seem to NAK I2C writes to segment pointer with mot=0. */ + if (link->dpcd_caps.is_mst_capable) + link->wa_flags.dp_mot_reset_segment = true; + else + link->wa_flags.dp_mot_reset_segment = false; + get_active_converter_info(ds_port.byte, link); dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); @@ -2241,6 +2269,7 @@ const struct dc_link_settings *dp_get_verified_link_cap( struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; + enum dc_lane_count lttpr_max_lane_count; enum dc_link_rate lttpr_max_link_rate; enum dc_link_rate cable_max_link_rate; struct resource_context *res_ctx = &link->dc->current_state->res_ctx; @@ -2305,8 +2334,11 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) /* Some LTTPR devices do not report valid DPCD revisions, if so, do not take it's link cap into consideration. */ if (link->dpcd_caps.lttpr_caps.revision.raw >= DPCD_REV_14) { - if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) - max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + lttpr_max_lane_count = get_lttpr_max_lane_count(link); + + if (lttpr_max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = lttpr_max_lane_count; + lttpr_max_link_rate = get_lttpr_max_link_rate(link); if (lttpr_max_link_rate < max_link_cap.link_rate) @@ -2412,6 +2444,11 @@ bool dp_verify_link_cap_with_retries( dp_trace_detect_lt_init(link); + DC_LOG_HW_LINK_TRAINING("%s: Link[%d] LinkRate=0x%x LaneCount=%d", + __func__, link->link_index, + known_limit_link_setting->link_rate, + known_limit_link_setting->lane_count); + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && link->dc->debug.usbc_combo_phy_reset_wa) apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); @@ -2448,6 +2485,11 @@ bool dp_verify_link_cap_with_retries( dp_trace_lt_fail_count_update(link, fail_count, true); dp_trace_set_lt_end_timestamp(link, true); + DC_LOG_HW_LINK_TRAINING("%s: Link[%d] Exit. is_success=%d fail_count=%d", + __func__, link->link_index, + success, + fail_count); + return success; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 8a3c18ae97a7..c958d3f600c8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -225,11 +225,6 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) bool ret = false; uint8_t val; - if (link->dc->debug.dpia_debug.bits.enable_bw_allocation_mode == false) { - DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode disabled", __func__, link->link_index); - return false; - } - val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ; if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) { @@ -273,17 +268,28 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) { DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)", __func__, link->link_index); - } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) { + } + + if (status & DP_TUNNELING_BW_REQUEST_FAILED) { DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw); - } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { + } + + if (status & DP_TUNNELING_BW_ALLOC_CAP_CHANGED) { + link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link); + + DC_LOG_DEBUG("%s: Granularity changed on link(%d) new granularity=%d", + __func__, link->link_index, link->dpia_bw_alloc_config.bw_granularity); + } + + if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 693477413347..4b01ab0a5a7f 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -398,10 +398,12 @@ bool dp_should_allow_hpd_rx_irq(const struct dc_link *link) * Don't handle RX IRQ unless one of following is met: * 1) The link is established (cur_link_settings != unknown) * 2) We know we're dealing with a branch device, SST or MST + * 3) The link is bw_alloc enabled. */ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - is_dp_branch_device(link)) + is_dp_branch_device(link) || + link->dpia_bw_alloc_config.bw_alloc_enabled) return true; return false; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c index 85298b8a1b5e..6bfd2c1294e5 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c @@ -1514,6 +1514,21 @@ static void mpc3_read_mpcc_state( MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); } +void mpc3_read_reg_state( + struct mpc *mpc, + int mpcc_inst, struct dcn_mpc_reg_state *mpc_reg_state) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + mpc_reg_state->mpcc_bot_sel = REG_READ(MPCC_BOT_SEL[mpcc_inst]); + mpc_reg_state->mpcc_control = REG_READ(MPCC_CONTROL[mpcc_inst]); + mpc_reg_state->mpcc_ogam_control = REG_READ(MPCC_OGAM_CONTROL[mpcc_inst]); + mpc_reg_state->mpcc_opp_id = REG_READ(MPCC_OPP_ID[mpcc_inst]); + mpc_reg_state->mpcc_status = REG_READ(MPCC_STATUS[mpcc_inst]); + mpc_reg_state->mpcc_top_sel = REG_READ(MPCC_TOP_SEL[mpcc_inst]); + +} + static const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc3_read_mpcc_state, .insert_plane = mpc1_insert_plane, @@ -1544,6 +1559,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = { .release_rmu = mpcc3_release_rmu, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, .set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h index 103f29900a2c..e2f147d17178 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h @@ -1096,6 +1096,11 @@ void mpc3_power_on_ogam_lut( struct mpc *mpc, int mpcc_id, bool power_on); +void mpc3_read_reg_state( + struct mpc *mpc, + int mpcc_inst, + struct dcn_mpc_reg_state *mpc_reg_state); + void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst); enum dc_lut_mode mpc3_get_ogam_current( diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c index 6f0e017a8ae2..83bbbf34bcac 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c @@ -1020,6 +1020,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .release_rmu = NULL, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c index e1a0308dee57..eeac13fdd6f5 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c @@ -598,6 +598,7 @@ static const struct mpc_funcs dcn401_mpc_funcs = { .release_rmu = NULL, .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, + .mpc_read_reg_state = mpc3_read_reg_state, .set_bg_color = mpc1_set_bg_color, .set_movable_cm_location = mpc401_set_movable_cm_location, .update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c index 71e9288d60ed..45d418636d0c 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c @@ -372,6 +372,17 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable) REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval); } + +void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); + + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); +} + /*****************************************/ /* Constructor, Destructor */ /*****************************************/ @@ -392,7 +403,8 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_program_dpg_dimensions = NULL, .dpg_is_blanked = NULL, .dpg_is_pending = NULL, - .opp_destroy = opp1_destroy + .opp_destroy = opp1_destroy, + .opp_read_reg_state = opp1_read_reg_state }; void dcn10_opp_construct(struct dcn10_opp *oppn10, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h index c87de68a509e..38d0d530a9b7 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h @@ -63,7 +63,8 @@ uint32_t OPPBUF_CONTROL1; \ uint32_t OPPBUF_3D_PARAMETERS_0; \ uint32_t OPPBUF_3D_PARAMETERS_1; \ - uint32_t OPP_PIPE_CONTROL + uint32_t OPP_PIPE_CONTROL; \ + uint32_t OPP_PIPE_CRC_CONTROL #define OPP_MASK_SH_LIST_DCN(mask_sh) \ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \ @@ -153,7 +154,6 @@ struct dcn10_opp { const struct dcn10_opp_registers *regs; const struct dcn10_opp_shift *opp_shift; const struct dcn10_opp_mask *opp_mask; - bool is_write_to_ram_a_safe; }; @@ -188,4 +188,6 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable); void opp1_destroy(struct output_pixel_processor **opp); +void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c index f5fe0cac7cb0..ce826a5be4c7 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c @@ -377,6 +377,18 @@ uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp return 0; } +void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + + opp_reg_state->dpg_control = REG_READ(DPG_CONTROL); + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); + opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG); +} + /*****************************************/ /* Constructor, Destructor */ /*****************************************/ @@ -395,6 +407,7 @@ static struct opp_funcs dcn20_opp_funcs = { .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, .opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count, + .opp_read_reg_state = opp2_read_reg_state }; void dcn20_opp_construct(struct dcn20_opp *oppn20, diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h index 34936e6c49f3..fb0c047c1788 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h @@ -59,7 +59,8 @@ uint32_t DPG_COLOUR_G_Y; \ uint32_t DPG_COLOUR_R_CR; \ uint32_t DPG_RAMP_CONTROL; \ - uint32_t DPG_STATUS + uint32_t DPG_STATUS; \ + uint32_t DSCRM_DSC_FORWARD_CONFIG #define OPP_DPG_MASK_SH_LIST(mask_sh) \ OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \ @@ -171,4 +172,7 @@ void opp2_program_left_edge_extra_pixel ( uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp, enum dc_pixel_encoding pixel_encoding, bool is_primary); + +void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c index 3542b51c9aac..e11c4e16402f 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c @@ -51,3 +51,16 @@ void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable) { REG_UPDATE(OPP_TOP_CLK_CONTROL, OPP_FGCG_REP_DIS, !enable); } + +void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + + opp_reg_state->dpg_control = REG_READ(DPG_CONTROL); + opp_reg_state->fmt_control = REG_READ(FMT_CONTROL); + opp_reg_state->opp_abm_control = REG_READ(OPP_ABM_CONTROL); + opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL); + opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL); + opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL); + opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG); +} diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h index a9a413527801..c6cace90e8f2 100644 --- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h +++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h @@ -31,7 +31,8 @@ #define OPP_REG_VARIABLE_LIST_DCN3_5 \ OPP_REG_VARIABLE_LIST_DCN2_0; \ - uint32_t OPP_TOP_CLK_CONTROL + uint32_t OPP_TOP_CLK_CONTROL; \ + uint32_t OPP_ABM_CONTROL #define OPP_MASK_SH_LIST_DCN35(mask_sh) \ OPP_MASK_SH_LIST_DCN20(mask_sh), \ @@ -64,4 +65,5 @@ void dcn35_opp_construct(struct dcn20_opp *oppn20, void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable); +void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state); #endif diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index 8b2a8455eb56..803bcc25601c 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -209,7 +209,43 @@ uint32_t OPTC_WIDTH_CONTROL2; \ uint32_t OTG_PSTATE_REGISTER; \ uint32_t OTG_PIPE_UPDATE_STATUS; \ - uint32_t INTERRUPT_DEST + uint32_t INTERRUPT_DEST; \ + uint32_t OPTC_INPUT_SPARE_REGISTER; \ + uint32_t OPTC_RSMU_UNDERFLOW; \ + uint32_t OPTC_UNDERFLOW_THRESHOLD; \ + uint32_t OTG_COUNT_CONTROL; \ + uint32_t OTG_COUNT_RESET; \ + uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \ + uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \ + uint32_t OTG_DLPC_CONTROL; \ + uint32_t OTG_DRR_CONTROL2; \ + uint32_t OTG_DRR_TIMING_INT_STATUS; \ + uint32_t OTG_GLOBAL_CONTROL3; \ + uint32_t OTG_GLOBAL_SYNC_STATUS; \ + uint32_t OTG_GSL_VSYNC_GAP; \ + uint32_t OTG_INTERLACE_STATUS; \ + uint32_t OTG_INTERRUPT_CONTROL; \ + uint32_t OTG_LONG_VBLANK_STATUS; \ + uint32_t OTG_MANUAL_FORCE_VSYNC_NEXT_LINE; \ + uint32_t OTG_MASTER_EN; \ + uint32_t OTG_PIXEL_DATA_READBACK0; \ + uint32_t OTG_PIXEL_DATA_READBACK1; \ + uint32_t OTG_REQUEST_CONTROL; \ + uint32_t OTG_SNAPSHOT_CONTROL; \ + uint32_t OTG_SNAPSHOT_FRAME; \ + uint32_t OTG_SNAPSHOT_POSITION; \ + uint32_t OTG_SNAPSHOT_STATUS; \ + uint32_t OTG_SPARE_REGISTER; \ + uint32_t OTG_STATUS_HV_COUNT; \ + uint32_t OTG_STATUS_VF_COUNT; \ + uint32_t OTG_STEREO_FORCE_NEXT_EYE; \ + uint32_t OTG_TRIG_MANUAL_CONTROL; \ + uint32_t OTG_TRIGB_CNTL; \ + uint32_t OTG_TRIGB_MANUAL_TRIG; \ + uint32_t OTG_UPDATE_LOCK; \ + uint32_t OTG_V_TOTAL_INT_STATUS; \ + uint32_t OTG_VSYNC_NOM_INT_STATUS + struct dcn_optc_registers { OPTC_REG_VARIABLE_LIST_DCN; diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c index 4f1830ba619f..c6417538090f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c @@ -315,6 +315,136 @@ void optc31_read_otg_state(struct timing_generator *optc, s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); } +void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + optc_reg_state->optc_bytes_per_pixel = REG_READ(OPTC_BYTES_PER_PIXEL); + optc_reg_state->optc_data_format_control = REG_READ(OPTC_DATA_FORMAT_CONTROL); + optc_reg_state->optc_data_source_select = REG_READ(OPTC_DATA_SOURCE_SELECT); + optc_reg_state->optc_input_clock_control = REG_READ(OPTC_INPUT_CLOCK_CONTROL); + optc_reg_state->optc_input_global_control = REG_READ(OPTC_INPUT_GLOBAL_CONTROL); + optc_reg_state->optc_input_spare_register = REG_READ(OPTC_INPUT_SPARE_REGISTER); + optc_reg_state->optc_memory_config = REG_READ(OPTC_MEMORY_CONFIG); + optc_reg_state->optc_rsmu_underflow = REG_READ(OPTC_RSMU_UNDERFLOW); + optc_reg_state->optc_underflow_threshold = REG_READ(OPTC_UNDERFLOW_THRESHOLD); + optc_reg_state->optc_width_control = REG_READ(OPTC_WIDTH_CONTROL); + optc_reg_state->otg_3d_structure_control = REG_READ(OTG_3D_STRUCTURE_CONTROL); + optc_reg_state->otg_clock_control = REG_READ(OTG_CLOCK_CONTROL); + optc_reg_state->otg_control = REG_READ(OTG_CONTROL); + optc_reg_state->otg_count_control = REG_READ(OTG_COUNT_CONTROL); + optc_reg_state->otg_count_reset = REG_READ(OTG_COUNT_RESET); + optc_reg_state->otg_crc_cntl = REG_READ(OTG_CRC_CNTL); + optc_reg_state->otg_crc_sig_blue_control_mask = REG_READ(OTG_CRC_SIG_BLUE_CONTROL_MASK); + optc_reg_state->otg_crc_sig_red_green_mask = REG_READ(OTG_CRC_SIG_RED_GREEN_MASK); + optc_reg_state->otg_crc0_data_b = REG_READ(OTG_CRC0_DATA_B); + optc_reg_state->otg_crc0_data_rg = REG_READ(OTG_CRC0_DATA_RG); + optc_reg_state->otg_crc0_windowa_x_control = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL); + optc_reg_state->otg_crc0_windowa_x_control_readback = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowa_y_control = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL); + optc_reg_state->otg_crc0_windowa_y_control_readback = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowb_x_control = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL); + optc_reg_state->otg_crc0_windowb_x_control_readback = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL_READBACK); + optc_reg_state->otg_crc0_windowb_y_control = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL); + optc_reg_state->otg_crc0_windowb_y_control_readback = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK); + optc_reg_state->otg_crc1_data_b = REG_READ(OTG_CRC1_DATA_B); + optc_reg_state->otg_crc1_data_rg = REG_READ(OTG_CRC1_DATA_RG); + optc_reg_state->otg_crc1_windowa_x_control = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL); + optc_reg_state->otg_crc1_windowa_x_control_readback = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowa_y_control = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL); + optc_reg_state->otg_crc1_windowa_y_control_readback = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowb_x_control = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL); + optc_reg_state->otg_crc1_windowb_x_control_readback = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL_READBACK); + optc_reg_state->otg_crc1_windowb_y_control = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL); + optc_reg_state->otg_crc1_windowb_y_control_readback = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK); + optc_reg_state->otg_crc2_data_b = REG_READ(OTG_CRC2_DATA_B); + optc_reg_state->otg_crc2_data_rg = REG_READ(OTG_CRC2_DATA_RG); + optc_reg_state->otg_crc3_data_b = REG_READ(OTG_CRC3_DATA_B); + optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG); + optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL); + optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); + optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2); + optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL); + optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS); + optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW); + optc_reg_state->otg_drr_v_total_change = REG_READ(OTG_DRR_V_TOTAL_CHANGE); + optc_reg_state->otg_dsc_start_position = REG_READ(OTG_DSC_START_POSITION); + optc_reg_state->otg_force_count_now_cntl = REG_READ(OTG_FORCE_COUNT_NOW_CNTL); + optc_reg_state->otg_global_control0 = REG_READ(OTG_GLOBAL_CONTROL0); + optc_reg_state->otg_global_control1 = REG_READ(OTG_GLOBAL_CONTROL1); + optc_reg_state->otg_global_control2 = REG_READ(OTG_GLOBAL_CONTROL2); + optc_reg_state->otg_global_control3 = REG_READ(OTG_GLOBAL_CONTROL3); + optc_reg_state->otg_global_control4 = REG_READ(OTG_GLOBAL_CONTROL4); + optc_reg_state->otg_global_sync_status = REG_READ(OTG_GLOBAL_SYNC_STATUS); + optc_reg_state->otg_gsl_control = REG_READ(OTG_GSL_CONTROL); + optc_reg_state->otg_gsl_vsync_gap = REG_READ(OTG_GSL_VSYNC_GAP); + optc_reg_state->otg_gsl_window_x = REG_READ(OTG_GSL_WINDOW_X); + optc_reg_state->otg_gsl_window_y = REG_READ(OTG_GSL_WINDOW_Y); + optc_reg_state->otg_h_blank_start_end = REG_READ(OTG_H_BLANK_START_END); + optc_reg_state->otg_h_sync_a = REG_READ(OTG_H_SYNC_A); + optc_reg_state->otg_h_sync_a_cntl = REG_READ(OTG_H_SYNC_A_CNTL); + optc_reg_state->otg_h_timing_cntl = REG_READ(OTG_H_TIMING_CNTL); + optc_reg_state->otg_h_total = REG_READ(OTG_H_TOTAL); + optc_reg_state->otg_interlace_control = REG_READ(OTG_INTERLACE_CONTROL); + optc_reg_state->otg_interlace_status = REG_READ(OTG_INTERLACE_STATUS); + optc_reg_state->otg_interrupt_control = REG_READ(OTG_INTERRUPT_CONTROL); + optc_reg_state->otg_long_vblank_status = REG_READ(OTG_LONG_VBLANK_STATUS); + optc_reg_state->otg_m_const_dto0 = REG_READ(OTG_M_CONST_DTO0); + optc_reg_state->otg_m_const_dto1 = REG_READ(OTG_M_CONST_DTO1); + optc_reg_state->otg_manual_force_vsync_next_line = REG_READ(OTG_MANUAL_FORCE_VSYNC_NEXT_LINE); + optc_reg_state->otg_master_en = REG_READ(OTG_MASTER_EN); + optc_reg_state->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK); + optc_reg_state->otg_master_update_mode = REG_READ(OTG_MASTER_UPDATE_MODE); + optc_reg_state->otg_nom_vert_position = REG_READ(OTG_NOM_VERT_POSITION); + optc_reg_state->otg_pipe_update_status = REG_READ(OTG_PIPE_UPDATE_STATUS); + optc_reg_state->otg_pixel_data_readback0 = REG_READ(OTG_PIXEL_DATA_READBACK0); + optc_reg_state->otg_pixel_data_readback1 = REG_READ(OTG_PIXEL_DATA_READBACK1); + optc_reg_state->otg_request_control = REG_READ(OTG_REQUEST_CONTROL); + optc_reg_state->otg_snapshot_control = REG_READ(OTG_SNAPSHOT_CONTROL); + optc_reg_state->otg_snapshot_frame = REG_READ(OTG_SNAPSHOT_FRAME); + optc_reg_state->otg_snapshot_position = REG_READ(OTG_SNAPSHOT_POSITION); + optc_reg_state->otg_snapshot_status = REG_READ(OTG_SNAPSHOT_STATUS); + optc_reg_state->otg_spare_register = REG_READ(OTG_SPARE_REGISTER); + optc_reg_state->otg_static_screen_control = REG_READ(OTG_STATIC_SCREEN_CONTROL); + optc_reg_state->otg_status = REG_READ(OTG_STATUS); + optc_reg_state->otg_status_frame_count = REG_READ(OTG_STATUS_FRAME_COUNT); + optc_reg_state->otg_status_hv_count = REG_READ(OTG_STATUS_HV_COUNT); + optc_reg_state->otg_status_position = REG_READ(OTG_STATUS_POSITION); + optc_reg_state->otg_status_vf_count = REG_READ(OTG_STATUS_VF_COUNT); + optc_reg_state->otg_stereo_control = REG_READ(OTG_STEREO_CONTROL); + optc_reg_state->otg_stereo_force_next_eye = REG_READ(OTG_STEREO_FORCE_NEXT_EYE); + optc_reg_state->otg_stereo_status = REG_READ(OTG_STEREO_STATUS); + optc_reg_state->otg_trig_manual_control = REG_READ(OTG_TRIG_MANUAL_CONTROL); + optc_reg_state->otg_triga_cntl = REG_READ(OTG_TRIGA_CNTL); + optc_reg_state->otg_triga_manual_trig = REG_READ(OTG_TRIGA_MANUAL_TRIG); + optc_reg_state->otg_trigb_cntl = REG_READ(OTG_TRIGB_CNTL); + optc_reg_state->otg_trigb_manual_trig = REG_READ(OTG_TRIGB_MANUAL_TRIG); + optc_reg_state->otg_update_lock = REG_READ(OTG_UPDATE_LOCK); + optc_reg_state->otg_v_blank_start_end = REG_READ(OTG_V_BLANK_START_END); + optc_reg_state->otg_v_count_stop_control = REG_READ(OTG_V_COUNT_STOP_CONTROL); + optc_reg_state->otg_v_count_stop_control2 = REG_READ(OTG_V_COUNT_STOP_CONTROL2); + optc_reg_state->otg_v_sync_a = REG_READ(OTG_V_SYNC_A); + optc_reg_state->otg_v_sync_a_cntl = REG_READ(OTG_V_SYNC_A_CNTL); + optc_reg_state->otg_v_total = REG_READ(OTG_V_TOTAL); + optc_reg_state->otg_v_total_control = REG_READ(OTG_V_TOTAL_CONTROL); + optc_reg_state->otg_v_total_int_status = REG_READ(OTG_V_TOTAL_INT_STATUS); + optc_reg_state->otg_v_total_max = REG_READ(OTG_V_TOTAL_MAX); + optc_reg_state->otg_v_total_mid = REG_READ(OTG_V_TOTAL_MID); + optc_reg_state->otg_v_total_min = REG_READ(OTG_V_TOTAL_MIN); + optc_reg_state->otg_vert_sync_control = REG_READ(OTG_VERT_SYNC_CONTROL); + optc_reg_state->otg_vertical_interrupt0_control = REG_READ(OTG_VERTICAL_INTERRUPT0_CONTROL); + optc_reg_state->otg_vertical_interrupt0_position = REG_READ(OTG_VERTICAL_INTERRUPT0_POSITION); + optc_reg_state->otg_vertical_interrupt1_control = REG_READ(OTG_VERTICAL_INTERRUPT1_CONTROL); + optc_reg_state->otg_vertical_interrupt1_position = REG_READ(OTG_VERTICAL_INTERRUPT1_POSITION); + optc_reg_state->otg_vertical_interrupt2_control = REG_READ(OTG_VERTICAL_INTERRUPT2_CONTROL); + optc_reg_state->otg_vertical_interrupt2_position = REG_READ(OTG_VERTICAL_INTERRUPT2_POSITION); + optc_reg_state->otg_vready_param = REG_READ(OTG_VREADY_PARAM); + optc_reg_state->otg_vstartup_param = REG_READ(OTG_VSTARTUP_PARAM); + optc_reg_state->otg_vsync_nom_int_status = REG_READ(OTG_VSYNC_NOM_INT_STATUS); + optc_reg_state->otg_vupdate_keepout = REG_READ(OTG_VUPDATE_KEEPOUT); + optc_reg_state->otg_vupdate_param = REG_READ(OTG_VUPDATE_PARAM); +} + static const struct timing_generator_funcs dcn31_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -377,6 +507,7 @@ static const struct timing_generator_funcs dcn31_tg_funcs = { .init_odm = optc3_init_odm, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn31_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h index 0f72c274f40b..98f7d2e299c5 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h @@ -274,4 +274,6 @@ void optc3_init_odm(struct timing_generator *optc); void optc31_read_otg_state(struct timing_generator *optc, struct dcn_otg_state *s); +void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state); + #endif /* __DC_OPTC_DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c index 4a2caca37255..43ff957288b2 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c @@ -256,6 +256,7 @@ static const struct timing_generator_funcs dcn314_tg_funcs = { .set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn314_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index b2b226bcd871..3dcb0d0c931c 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -365,6 +365,7 @@ static const struct timing_generator_funcs dcn32_tg_funcs = { .get_otg_double_buffer_pending = optc3_get_otg_update_pending, .get_pipe_update_pending = optc3_get_pipe_update_pending, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn32_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index 52d5ea98c86b..f699e95059f3 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -511,6 +511,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = { .set_long_vtotal = optc35_set_long_vtotal, .is_two_pixels_per_container = optc1_is_two_pixels_per_container, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn35_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c index 5af13706e601..a8e978d1fae8 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c @@ -533,6 +533,7 @@ static const struct timing_generator_funcs dcn401_tg_funcs = { .set_vupdate_keepout = optc401_set_vupdate_keepout, .wait_update_lock_status = optc401_wait_update_lock_status, .read_otg_state = optc31_read_otg_state, + .optc_read_reg_state = optc31_read_reg_state, }; void dcn401_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index c4b4dc3ad8c9..d40d91ec2035 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -78,6 +78,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -225,6 +226,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), link_regs(6), + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -368,6 +370,7 @@ static const struct dce_abm_mask abm_mask = { #define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -402,8 +406,10 @@ static const struct dc_plane_cap plane_cap = { } }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; #define CTX ctx @@ -484,6 +490,11 @@ static struct stream_encoder *dce100_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); return &enc110->base; @@ -624,7 +635,20 @@ static struct link_encoder *dce100_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -952,6 +976,10 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( int i; int j = -1; struct dc_link *link = stream->link; + enum engine_id preferred_engine = link->link_enc->preferred_engine; + + if (dc_is_rgb_signal(stream->signal)) + preferred_engine = link->link_enc->analog_engine; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && @@ -960,8 +988,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link( * in daisy chain use case */ j = i; - if (pool->stream_enc[i]->id == - link->link_enc->preferred_engine) + if (pool->stream_enc[i]->id == preferred_engine) return pool->stream_enc[i]; } } @@ -1093,6 +1120,7 @@ static bool dce100_resource_construct( dc->caps.disable_dp_clk_share = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index cccde5a6f3cd..cd54382c0af3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -82,6 +82,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -377,6 +378,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -424,7 +426,9 @@ static const struct dc_plane_cap plane_cap = { 64 }; -static const struct dc_debug_options debug_defaults = { +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { .enable_legacy_fast_update = true, }; @@ -1376,6 +1380,7 @@ static bool dce110_resource_construct( dc->caps.is_apu = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 869a8e515fc0..3f0a6bc4dcc2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -76,6 +76,7 @@ #endif #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -385,6 +386,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -429,8 +431,10 @@ static const struct dc_plane_cap plane_cap = { 64 }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; #define CTX ctx @@ -1247,6 +1251,7 @@ static bool dce112_resource_construct( dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 540e04ec1e2d..b1570b6b1af3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -491,6 +491,7 @@ static struct dce_i2c_hw *dce120_i2c_hw_create( return dce_i2c_hw; } static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0 + NBIO_BASE(mmBIOS_SCRATCH_0_BASE_IDX), .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; @@ -526,8 +527,11 @@ static const struct dc_plane_cap plane_cap = { }; static const struct dc_debug_options debug_defaults = { - .disable_clock_gate = true, - .enable_legacy_fast_update = true, + .disable_clock_gate = true, +}; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static struct clock_source *dce120_clock_source_create( @@ -1089,6 +1093,7 @@ static bool dce120_resource_construct( dc->caps.psp_setup_panel_mode = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index b75be6ad64f6..f0152933bee2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -80,6 +80,7 @@ #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -240,7 +241,9 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(2), link_regs(3), link_regs(4), - link_regs(5) + link_regs(5), + {0}, + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -366,6 +369,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -373,6 +377,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -382,6 +387,7 @@ static const struct resource_caps res_cap_61 = { .num_timing_generator = 4, .num_audio = 6, .num_stream_encoder = 6, + .num_analog_stream_encoder = 1, .num_pll = 3, .num_ddc = 6, }; @@ -389,6 +395,7 @@ static const struct resource_caps res_cap_61 = { static const struct resource_caps res_cap_64 = { .num_timing_generator = 2, .num_audio = 2, + .num_analog_stream_encoder = 1, .num_stream_encoder = 2, .num_pll = 3, .num_ddc = 2, @@ -599,6 +606,11 @@ static struct stream_encoder *dce60_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -718,7 +730,20 @@ static struct link_encoder *dce60_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 5b7769745202..8687104cabb7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -78,6 +78,7 @@ #ifndef mmBIOS_SCRATCH_2 + #define mmBIOS_SCRATCH_0 0x05C9 #define mmBIOS_SCRATCH_2 0x05CB #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF @@ -241,6 +242,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = { link_regs(4), link_regs(5), link_regs(6), + { .DAC_ENABLE = mmDAC_ENABLE }, }; #define stream_enc_regs(id)\ @@ -368,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0, .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -375,6 +378,7 @@ static const struct bios_registers bios_regs = { static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 3, .num_ddc = 6, @@ -383,6 +387,7 @@ static const struct resource_caps res_cap = { static const struct resource_caps res_cap_81 = { .num_timing_generator = 4, .num_audio = 7, + .num_analog_stream_encoder = 1, .num_stream_encoder = 7, .num_pll = 3, .num_ddc = 6, @@ -391,6 +396,7 @@ static const struct resource_caps res_cap_81 = { static const struct resource_caps res_cap_83 = { .num_timing_generator = 2, .num_audio = 6, + .num_analog_stream_encoder = 1, .num_stream_encoder = 6, .num_pll = 2, .num_ddc = 2, @@ -418,8 +424,10 @@ static const struct dc_plane_cap plane_cap = { } }; -static const struct dc_debug_options debug_defaults = { - .enable_legacy_fast_update = true, +static const struct dc_debug_options debug_defaults = { 0 }; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static const struct dce_dmcu_registers dmcu_regs = { @@ -605,6 +613,11 @@ static struct stream_encoder *dce80_stream_encoder_create( if (!enc110) return NULL; + if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) { + dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id); + return &enc110->base; + } + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); @@ -724,7 +737,20 @@ static struct link_encoder *dce80_link_encoder_create( kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) + if (!enc110) + return NULL; + + if (enc_init_data->connector.id == CONNECTOR_ID_VGA) { + dce110_link_encoder_construct(enc110, + enc_init_data, + &link_enc_feature, + &link_enc_regs[ENGINE_ID_DACA], + NULL, + NULL); + return &enc110->base; + } + + if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = @@ -919,6 +945,7 @@ static bool dce80_construct( dc->caps.dual_link_dvi = true; dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * @@ -1320,6 +1347,7 @@ static bool dce83_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.is_apu = true; dc->debug = debug_defaults; + dc->check_config = config_defaults; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index 652c05c35494..f12367adf145 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -556,10 +556,13 @@ static const struct dc_debug_options debug_defaults_drv = { .recovery_enabled = false, /*enable this by default after testing.*/ .max_downscale_src_width = 3840, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn10_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN10_DPP(*dpp)); @@ -1395,6 +1398,8 @@ static bool dcn10_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 0; + dc->debug = debug_defaults_drv; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 84b38d2d6967..6679c1a14f2f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -718,10 +718,13 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + void dcn20_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -733,7 +736,7 @@ struct dpp *dcn20_dpp_create( uint32_t inst) { struct dcn20_dpp *dpp = - kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL); if (!dpp) return NULL; @@ -751,7 +754,7 @@ struct input_pixel_processor *dcn20_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { BREAK_TO_DEBUGGER(); @@ -768,7 +771,7 @@ struct output_pixel_processor *dcn20_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_opp *opp = - kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); if (!opp) { BREAK_TO_DEBUGGER(); @@ -785,7 +788,7 @@ struct dce_aux *dcn20_aux_engine_create( uint32_t inst) { struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; @@ -823,7 +826,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create( uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; @@ -835,8 +838,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create( } struct mpc *dcn20_mpc_create(struct dc_context *ctx) { - struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), - GFP_ATOMIC); + struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL); if (!mpc20) return NULL; @@ -853,8 +855,7 @@ struct mpc *dcn20_mpc_create(struct dc_context *ctx) struct hubbub *dcn20_hubbub_create(struct dc_context *ctx) { int i; - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_ATOMIC); + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub) return NULL; @@ -882,7 +883,7 @@ struct timing_generator *dcn20_timing_generator_create( uint32_t instance) { struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_ATOMIC); + kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; @@ -962,7 +963,7 @@ static struct clock_source *dcn20_clock_source_create( bool dp_clk_src) { struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; @@ -1061,7 +1062,7 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst) { struct dcn20_dsc *dsc = - kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); if (!dsc) { BREAK_TO_DEBUGGER(); @@ -1198,7 +1199,7 @@ struct hubp *dcn20_hubp_create( uint32_t inst) { struct dcn20_hubp *hubp2 = - kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); if (!hubp2) return NULL; @@ -1668,6 +1669,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding; if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) return false; @@ -2286,7 +2288,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { - struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC); + struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); if (!pp_smu) return pp_smu; @@ -2472,6 +2474,7 @@ static bool dcn20_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -2765,7 +2768,7 @@ struct resource_pool *dcn20_create_resource_pool( struct dc *dc) { struct dcn20_resource_pool *pool = - kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL); if (!pool) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index e4a1338d21e0..055107843a70 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -614,10 +614,13 @@ static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, .enable_tri_buf = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn201_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN201_DPP(*dpp)); @@ -629,7 +632,7 @@ static struct dpp *dcn201_dpp_create( uint32_t inst) { struct dcn201_dpp *dpp = - kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_dpp), GFP_KERNEL); if (!dpp) return NULL; @@ -646,7 +649,7 @@ static struct input_pixel_processor *dcn201_ipp_create( struct dc_context *ctx, uint32_t inst) { struct dcn10_ipp *ipp = - kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL); if (!ipp) { return NULL; @@ -662,7 +665,7 @@ static struct output_pixel_processor *dcn201_opp_create( struct dc_context *ctx, uint32_t inst) { struct dcn201_opp *opp = - kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_opp), GFP_KERNEL); if (!opp) { return NULL; @@ -677,7 +680,7 @@ static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx, uint32_t inst) { struct aux_engine_dce110 *aux_engine = - kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); + kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); if (!aux_engine) return NULL; @@ -710,7 +713,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, uint32_t inst) { struct dce_i2c_hw *dce_i2c_hw = - kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); + kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); if (!dce_i2c_hw) return NULL; @@ -723,8 +726,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) { - struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), - GFP_ATOMIC); + struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), GFP_KERNEL); if (!mpc201) return NULL; @@ -740,8 +742,7 @@ static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc) static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx) { - struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), - GFP_ATOMIC); + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL); if (!hubbub) return NULL; @@ -759,7 +760,7 @@ static struct timing_generator *dcn201_timing_generator_create( uint32_t instance) { struct optc *tgn10 = - kzalloc(sizeof(struct optc), GFP_ATOMIC); + kzalloc(sizeof(struct optc), GFP_KERNEL); if (!tgn10) return NULL; @@ -793,7 +794,7 @@ static struct link_encoder *dcn201_link_encoder_create( const struct encoder_init_data *enc_init_data) { struct dcn20_link_encoder *enc20 = - kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC); + kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); struct dcn10_link_encoder *enc10; if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) @@ -821,7 +822,7 @@ static struct clock_source *dcn201_clock_source_create( bool dp_clk_src) { struct dce110_clk_src *clk_src = - kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC); + kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL); if (!clk_src) return NULL; @@ -856,7 +857,7 @@ static struct stream_encoder *dcn201_stream_encoder_create( struct dc_context *ctx) { struct dcn10_stream_encoder *enc1 = - kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC); + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); if (!enc1) return NULL; @@ -883,7 +884,7 @@ static const struct dce_hwseq_mask hwseq_mask = { static struct dce_hwseq *dcn201_hwseq_create( struct dc_context *ctx) { - struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC); + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); if (hws) { hws->ctx = ctx; @@ -983,7 +984,7 @@ static struct hubp *dcn201_hubp_create( uint32_t inst) { struct dcn201_hubp *hubp201 = - kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_hubp), GFP_KERNEL); if (!hubp201) return NULL; @@ -1153,6 +1154,7 @@ static bool dcn201_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->debug = debug_defaults_drv; + dc->check_config = config_defaults; /*a0 only, remove later*/ dc->work_arounds.no_connect_phy_config = true; @@ -1303,7 +1305,7 @@ struct resource_pool *dcn201_create_resource_pool( struct dc *dc) { struct dcn201_resource_pool *pool = - kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC); + kzalloc(sizeof(struct dcn201_resource_pool), GFP_KERNEL); if (!pool) return NULL; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 918742a42ded..2060acd5ae09 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -626,10 +626,13 @@ static const struct dc_debug_options debug_defaults_drv = { .usbc_combo_phy_reset_wa = true, .dmub_command_table = true, .use_max_lb = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1458,6 +1461,7 @@ static bool dcn21_resource_construct( dc->caps.color.mpc.ocsc = 1; dc->caps.dp_hdmi21_pcon_support = true; + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index ff63f59ff928..d0ebb733e802 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -727,10 +727,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -2374,6 +2377,7 @@ static bool dcn30_resource_construct( dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 82a205a7c25c..3ad6a3d4858e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -701,10 +701,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = false, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static void dcn301_dpp_destroy(struct dpp **dpp) { kfree(TO_DCN20_DPP(*dpp)); @@ -1498,6 +1501,7 @@ static bool dcn301_resource_construct( bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 61623cb518d9..c0d4a1dc94f8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1290,6 +1293,7 @@ static bool dcn302_resource_construct( &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index 02b9a84f2db3..75e09c2c283e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .use_max_lb = true, .exit_idle_opt_for_cursor_updates = true, - .enable_legacy_fast_update = false, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1234,6 +1237,7 @@ static bool dcn303_resource_construct( bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 3ed7f50554e2..0d667b54ccf8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -888,12 +888,15 @@ static const struct dc_debug_options debug_defaults_drv = { } }, .disable_z10 = true, - .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1978,6 +1981,7 @@ static bool dcn31_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index d4917a35b991..3ccde75a4ecb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -924,12 +924,15 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = true, - .enable_legacy_fast_update = true, .using_dml2 = false, .disable_dsc_power_gate = true, .min_disp_clk_khz = 100000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1910,6 +1913,7 @@ static bool dcn314_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 82cc78c291d8..4e962f522f1b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -887,9 +887,13 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, - .enable_legacy_fast_update = true, .psr_power_use_phy_fsm = 0, .using_dml2 = false, + .min_disp_clk_khz = 100000, +}; + +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, }; static const struct dc_panel_config panel_config_defaults = { @@ -1939,6 +1943,7 @@ static bool dcn315_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 636110e48d01..5a95dd54cb42 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -882,10 +882,13 @@ static const struct dc_debug_options debug_defaults_drv = { .afmt = true, } }, - .enable_legacy_fast_update = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1815,6 +1818,7 @@ static bool dcn316_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 3965a7f1b64b..81e64e17d0cb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -92,7 +92,7 @@ #include "dc_state_priv.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define DC_LOGGER_INIT(logger) @@ -738,10 +738,13 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dp_plus_plus_wa = true, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, - .enable_legacy_fast_update = false, .disable_stutter_for_wm_program = true }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn32_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -2294,6 +2297,7 @@ static bool dcn32_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index ad214986f7ac..3466ca34c93f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -731,11 +731,14 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_subvp_high_refresh = false, .fpo_vactive_min_active_margin_us = 200, .fpo_vactive_max_blank_us = 1000, - .enable_legacy_fast_update = false, .disable_dc_mode_overwrite = true, .using_dml2 = false, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn321_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1797,6 +1800,7 @@ static bool dcn321_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index fff57f23f4f7..ef69898d2cc5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -33,7 +33,7 @@ #include "resource.h" #include "include/irq_service_interface.h" #include "dcn35_resource.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" @@ -767,7 +767,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -788,6 +787,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1946,6 +1949,7 @@ static bool dcn35_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 0abd163b425e..f3c614c4490c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -83,7 +83,7 @@ #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "link_enc_cfg.h" #define DC_LOGGER_INIT(logger) @@ -747,7 +747,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -768,6 +767,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1917,6 +1920,7 @@ static bool dcn351_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index ca125ee6c2fb..6469d5fe2e6d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -11,7 +11,7 @@ #include "resource.h" #include "include/irq_service_interface.h" #include "dcn36_resource.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #include "dcn20/dcn20_resource.h" #include "dcn30/dcn30_resource.h" @@ -748,7 +748,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, - .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, @@ -769,6 +768,10 @@ static const struct dc_debug_options debug_defaults_drv = { .min_disp_clk_khz = 50000, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = true, +}; + static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1918,6 +1921,7 @@ static bool dcn36_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index 1d18807e4749..130058d7a70c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -73,7 +73,7 @@ #include "dc_state_priv.h" -#include "dml2/dml2_wrapper.h" +#include "dml2_0/dml2_wrapper.h" #define DC_LOGGER_INIT(logger) @@ -721,7 +721,6 @@ static const struct dc_debug_options debug_defaults_drv = { .alloc_extra_way_for_cursor = true, .min_prefetch_in_strobe_ns = 60000, // 60us .disable_unbounded_requesting = false, - .enable_legacy_fast_update = false, .dcc_meta_propagation_delay_us = 10, .fams_version = { .minor = 1, @@ -737,6 +736,10 @@ static const struct dc_debug_options debug_defaults_drv = { .force_cositing = CHROMA_COSITING_NONE + 1, }; +static const struct dc_check_config config_defaults = { + .enable_legacy_fast_update = false, +}; + static struct dce_aux *dcn401_aux_engine_create( struct dc_context *ctx, uint32_t inst) @@ -1995,6 +1998,7 @@ static bool dcn401_resource_construct( dc->caps.vbios_lttpr_aware = true; } } + dc->check_config = config_defaults; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h index 21d842857601..88c11b6be004 100644 --- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h +++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h @@ -9,7 +9,7 @@ #include "dc.h" #include "clk_mgr.h" #include "soc_and_ip_translator.h" -#include "dml2/dml21/inc/dml_top_soc_parameter_types.h" +#include "dml2_0/dml21/inc/dml_top_soc_parameter_types.h" void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator); diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index b1fb0f8a253a..7a839984dbc0 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -1018,6 +1018,21 @@ static bool spl_get_optimal_number_of_taps( spl_scratch->scl_data.taps.h_taps_c = 6; spl_scratch->scl_data.taps.v_taps_c = 6; } + + /* Override mode: keep EASF enabled but use input taps if valid */ + if (spl_in->override_easf) { + spl_scratch->scl_data.taps.h_taps = (in_taps->h_taps != 0) ? in_taps->h_taps : spl_scratch->scl_data.taps.h_taps; + spl_scratch->scl_data.taps.v_taps = (in_taps->v_taps != 0) ? in_taps->v_taps : spl_scratch->scl_data.taps.v_taps; + spl_scratch->scl_data.taps.h_taps_c = (in_taps->h_taps_c != 0) ? in_taps->h_taps_c : spl_scratch->scl_data.taps.h_taps_c; + spl_scratch->scl_data.taps.v_taps_c = (in_taps->v_taps_c != 0) ? in_taps->v_taps_c : spl_scratch->scl_data.taps.v_taps_c; + + if ((spl_scratch->scl_data.taps.h_taps > 6) || (spl_scratch->scl_data.taps.v_taps > 6)) + skip_easf = true; + if ((spl_scratch->scl_data.taps.h_taps > 1) && (spl_scratch->scl_data.taps.h_taps % 2)) + spl_scratch->scl_data.taps.h_taps--; + if ((spl_scratch->scl_data.taps.h_taps_c > 1) && (spl_scratch->scl_data.taps.h_taps_c % 2)) + spl_scratch->scl_data.taps.h_taps_c--; + } } /*Ensure we can support the requested number of vtaps*/ diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h index 23d254dea18f..20e4e52a77ac 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h @@ -545,6 +545,7 @@ struct spl_in { enum linear_light_scaling lls_pref; // Linear Light Scaling bool prefer_easf; bool disable_easf; + bool override_easf; /* If true, keep EASF enabled but use provided in_taps */ struct spl_debug debug; bool is_fullscreen; bool is_hdr_on; diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 338fdc651f2c..9d0168986fe7 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -132,6 +132,7 @@ enum dmub_window_id { DMUB_WINDOW_IB_MEM, DMUB_WINDOW_SHARED_STATE, DMUB_WINDOW_LSDMA_BUFFER, + DMUB_WINDOW_CURSOR_OFFLOAD, DMUB_WINDOW_TOTAL, }; @@ -317,6 +318,7 @@ struct dmub_srv_hw_params { bool enable_non_transparent_setconfig; bool lower_hbr3_phy_ssc; bool override_hbr3_pll_vco; + bool disable_dpia_bw_allocation; }; /** @@ -361,6 +363,19 @@ struct dmub_diagnostic_data { uint8_t is_pwait : 1; }; +/** + * struct dmub_preos_info - preos fw info before loading post os fw. + */ +struct dmub_preos_info { + uint64_t fb_base; + uint64_t fb_offset; + uint64_t trace_buffer_phy_addr; + uint32_t trace_buffer_size; + uint32_t fw_version; + uint32_t boot_status; + uint32_t boot_options; +}; + struct dmub_srv_inbox { /* generic status */ uint64_t num_submitted; @@ -486,6 +501,7 @@ struct dmub_srv_hw_funcs { uint32_t (*get_current_time)(struct dmub_srv *dmub); void (*get_diagnostic_data)(struct dmub_srv *dmub); + bool (*get_preos_fw_info)(struct dmub_srv *dmub); bool (*should_detect)(struct dmub_srv *dmub); void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx); @@ -535,7 +551,8 @@ struct dmub_srv_create_params { * @fw_version: the current firmware version, if any * @is_virtual: false if hardware support only * @shared_state: dmub shared state between firmware and driver - * @fw_state: dmub firmware state pointer + * @cursor_offload_v1: Cursor offload state + * @fw_state: dmub firmware state pointer (debug purpose only) */ struct dmub_srv { enum dmub_asic asic; @@ -544,7 +561,9 @@ struct dmub_srv { bool is_virtual; struct dmub_fb scratch_mem_fb; struct dmub_fb ib_mem_gart; + struct dmub_fb cursor_offload_fb; volatile struct dmub_shared_state_feature_block *shared_state; + volatile struct dmub_cursor_offload_v1 *cursor_offload_v1; volatile const struct dmub_fw_state *fw_state; /* private: internal use only */ @@ -583,6 +602,7 @@ struct dmub_srv { enum dmub_srv_power_state_type power_state; struct dmub_diagnostic_data debug; struct dmub_fb lsdma_rb_fb; + struct dmub_preos_info preos_info; }; /** @@ -1068,4 +1088,14 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub, */ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub); +/** + * dmub_srv_get_preos_info() - retrieves preos fw info + * @dmub: the dmub service + * + * Return: + * true - preos fw info retrieved successfully + * false - preos fw info not retrieved successfully + */ +bool dmub_srv_get_preos_info(struct dmub_srv *dmub); + #endif /* _DMUB_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 92248224b713..772e07a1a959 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -485,7 +485,13 @@ union replay_debug_flags { */ uint32_t enable_visual_confirm_debug : 1; - uint32_t reserved : 18; + /** + * 0x4000 (bit 14) + * @debug_log_enabled: Debug Log Enabled + */ + uint32_t debug_log_enabled : 1; + + uint32_t reserved : 17; } bitfields; uint32_t u32All; @@ -629,6 +635,112 @@ struct dmub_visual_confirm_color { uint16_t panel_inst; }; +/** + * struct dmub_cursor_offload_pipe_data_dcn30_v1 - DCN30+ per pipe data. + */ +struct dmub_cursor_offload_pipe_data_dcn30_v1 { + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16; + uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5; + uint32_t reserved0[4]; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1; + uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1; + uint32_t CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24; + uint32_t CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24; + uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS : 16; + uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE, : 16; + uint32_t reserved1[5]; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8; + uint32_t reserved2[3]; +}; + +/** + * struct dmub_cursor_offload_pipe_data_dcn401_v1 - DCN401 per pipe data. + */ +struct dmub_cursor_offload_pipe_data_dcn401_v1 { + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS; + uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16; + uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16; + uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16; + uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2; + uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5; + uint32_t reserved0[4]; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1; + uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1; + uint32_t CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24; + uint32_t CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y, : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB : 16; + uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB : 16; + uint32_t reserved1[4]; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8; + uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8; + uint32_t HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR : 1; + uint32_t reserved2[3]; +}; + +/** + * struct dmub_cursor_offload_pipe_data_v1 - Per pipe data for cursor offload. + */ +struct dmub_cursor_offload_pipe_data_v1 { + union { + struct dmub_cursor_offload_pipe_data_dcn30_v1 dcn30; /**< DCN30 cursor data. */ + struct dmub_cursor_offload_pipe_data_dcn401_v1 dcn401; /**< DCN401 cursor data. */ + uint8_t payload[96]; /**< Guarantees the cursor pipe data size per-pipe. */ + }; +}; + +/** + * struct dmub_cursor_offload_payload_data_v1 - A payload of stream data. + */ +struct dmub_cursor_offload_payload_data_v1 { + uint32_t write_idx_start; /**< Write index, updated before pipe_data is written. */ + uint32_t write_idx_finish; /**< Write index, updated after pipe_data is written. */ + uint32_t pipe_mask; /**< Mask of pipes to update. */ + uint32_t reserved; /**< Reserved for future use. */ + struct dmub_cursor_offload_pipe_data_v1 pipe_data[6]; /**< Per-pipe cursor data. */ +}; + +/** + * struct dmub_cursor_offload_stream_v1 - Per-stream data for cursor offload. + */ +struct dmub_cursor_offload_stream_v1 { + struct dmub_cursor_offload_payload_data_v1 payloads[4]; /**< A small buffer of cursor payloads. */ + uint32_t write_idx; /**< The index of the last written payload. */ +}; + +/** + * struct dmub_cursor_offload_v1 - Cursor offload feature state. + */ +struct dmub_cursor_offload_v1 { + struct dmub_cursor_offload_stream_v1 offload_streams[6]; /**< Per-stream cursor offload data */ +}; + //============================================================================== //================================================================= //============================================================================== @@ -648,7 +760,8 @@ struct dmub_visual_confirm_color { union dmub_fw_meta_feature_bits { struct { uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */ - uint32_t reserved : 31; + uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */ + uint32_t reserved : 30; } bits; /**< status bits */ uint32_t all; /**< 32-bit access to status bits */ }; @@ -813,6 +926,28 @@ enum dmub_ips_comand_type { DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1, }; +/** + * enum dmub_cursor_offload_comand_type - Cursor offload subcommands. + */ +enum dmub_cursor_offload_comand_type { + /** + * Initializes the cursor offload feature. + */ + DMUB_CMD__CURSOR_OFFLOAD_INIT = 0, + /** + * Enables cursor offloading for a stream and updates the timing parameters. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE = 1, + /** + * Disables cursor offloading for a given stream. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE = 2, + /** + * Programs the latest data for a given stream. + */ + DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM = 3, +}; + /** * union dmub_fw_boot_options - Boot option definitions for SCRATCH14 */ @@ -844,7 +979,8 @@ union dmub_fw_boot_options { uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */ uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */ uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */ - uint32_t reserved : 5; /**< reserved */ + uint32_t disable_dpia_bw_allocation: 1; /**< 1 to disable the USB4 DPIA BW allocation */ + uint32_t reserved : 4; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -877,6 +1013,7 @@ enum dmub_shared_state_feature_id { DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1, DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2, DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3, + DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1 = 4, DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */ }; @@ -957,6 +1094,22 @@ struct dmub_shared_state_ips_driver { uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */ }; /* 248-bytes, fixed */ +/** + * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload. + */ +struct dmub_shared_state_cursor_offload_stream_v1 { + uint32_t last_write_idx; /**< Last write index */ + uint8_t reserved[28]; /**< Reserved bytes. */ +}; /* 32-bytes, fixed */ + +/** + * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload. + */ +struct dmub_shared_state_cursor_offload_v1 { + struct dmub_shared_state_cursor_offload_stream_v1 offload_streams[6]; /**< stream state, 32-bytes each */ + uint8_t reserved[56]; /**< reserved for future use */ +}; /* 248-bytes, fixed */ + /** * enum dmub_shared_state_feature_common - Generic payload. */ @@ -983,6 +1136,7 @@ struct dmub_shared_state_feature_block { struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */ struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */ struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */ + struct dmub_shared_state_cursor_offload_v1 cursor_offload_v1; /**< Cursor offload */ } data; /**< Shared state data. */ }; /* 256-bytes, fixed */ @@ -1572,6 +1726,19 @@ enum dmub_cmd_type { */ DMUB_CMD__IPS = 91, + /** + * Command type use for Cursor offload. + */ + DMUB_CMD__CURSOR_OFFLOAD = 92, + + /** + * Command type used for all SMART_POWER_HDR commands. + */ + DMUB_CMD__SMART_POWER_HDR = 93, + + /** + * Command type use for VBIOS shared commands. + */ DMUB_CMD__VBIOS = 128, }; @@ -4237,6 +4404,45 @@ enum replay_enable { REPLAY_ENABLE = 1, }; +/** + * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_HDR_ENABLE command. + */ +struct dmub_rb_cmd_smart_power_hdr_enable_data { + /** + * SMART_POWER_HDR enable or disable. + */ + uint8_t enable; + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + + uint16_t peak_nits; + /** + * OTG HW instance. + */ + uint8_t otg_inst; + /** + * DIG FE HW instance. + */ + uint8_t digfe_inst; + /** + * DIG BE HW instance. + */ + uint8_t digbe_inst; + uint8_t debugcontrol; + /* + * vertical interrupt trigger line + */ + uint32_t triggerline; + + uint16_t fixed_max_cll; + + uint8_t pad[2]; +}; + /** * Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command. */ @@ -4408,9 +4614,9 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { */ uint16_t coasting_vtotal_high; /** - * Explicit padding to 4 byte boundary. + * frame skip number. */ - uint8_t pad[2]; + uint16_t frame_skip_number; }; /** @@ -4570,6 +4776,58 @@ union dmub_replay_cmd_set { struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data; }; +/** + * SMART POWER HDR command sub-types. + */ +enum dmub_cmd_smart_power_hdr_type { + + /** + * Enable/Disable SMART_POWER_HDR. + */ + DMUB_CMD__SMART_POWER_HDR_ENABLE = 1, + /** + * Get current MaxCLL value if SMART POWER HDR is enabled. + */ + DMUB_CMD__SMART_POWER_HDR_GETMAXCLL = 2, +}; + +/** + * Definition of a DMUB_CMD__SMART_POWER_HDR command. + */ +struct dmub_rb_cmd_smart_power_hdr_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + struct dmub_rb_cmd_smart_power_hdr_enable_data data; +}; + +struct dmub_cmd_smart_power_hdr_getmaxcll_input { + uint8_t panel_inst; + uint8_t pad[3]; +}; + +struct dmub_cmd_smart_power_hdr_getmaxcll_output { + uint16_t current_max_cll; + uint8_t pad[2]; +}; + +/** + * Definition of a DMUB_CMD__SMART_POWER_HDR command. + */ +struct dmub_rb_cmd_smart_power_hdr_getmaxcll { + struct dmub_cmd_header header; /**< Command header */ + /** + * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_HDR_GETMAXCLL command. + */ + union dmub_cmd_smart_power_hdr_getmaxcll_data { + struct dmub_cmd_smart_power_hdr_getmaxcll_input input; /**< Input */ + struct dmub_cmd_smart_power_hdr_getmaxcll_output output; /**< Output */ + uint32_t output_raw; /**< Raw data output */ + } data; +}; + /** * Set of HW components that can be locked. * @@ -4652,6 +4910,7 @@ enum hw_lock_client { */ HW_LOCK_CLIENT_REPLAY = 4, HW_LOCK_CLIENT_FAMS2 = 5, + HW_LOCK_CLIENT_CURSOR_OFFLOAD = 6, /** * Invalid client. */ @@ -6063,6 +6322,40 @@ struct dmub_rb_cmd_ips_query_residency_info { struct dmub_cmd_ips_query_residency_info_data info_data; }; +/** + * struct dmub_cmd_cursor_offload_init_data - Payload for cursor offload init command. + */ +struct dmub_cmd_cursor_offload_init_data { + union dmub_addr state_addr; /**< State address for dmub_cursor_offload */ + uint32_t state_size; /**< State size for dmub_cursor_offload */ +}; + +/** + * struct dmub_rb_cmd_cursor_offload_init - Data for initializing cursor offload. + */ +struct dmub_rb_cmd_cursor_offload_init { + struct dmub_cmd_header header; + struct dmub_cmd_cursor_offload_init_data init_data; +}; + +/** + * struct dmub_cmd_cursor_offload_stream_data - Payload for cursor offload stream command. + */ +struct dmub_cmd_cursor_offload_stream_data { + uint32_t otg_inst: 4; /**< OTG instance to control */ + uint32_t reserved: 28; /**< Reserved for future use */ + uint32_t line_time_in_ns; /**< Line time in ns for the OTG */ + uint32_t v_total_max; /**< OTG v_total_max */ +}; + +/** + * struct dmub_rb_cmd_cursor_offload_stream_cntl - Controls a stream for cursor offload. + */ +struct dmub_rb_cmd_cursor_offload_stream_cntl { + struct dmub_cmd_header header; + struct dmub_cmd_cursor_offload_stream_data data; +}; + /** * union dmub_rb_cmd - DMUB inbox command. */ @@ -6392,6 +6685,26 @@ union dmub_rb_cmd { struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl; struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info; + /** + * Definition of a DMUB_CMD__CURSOR_OFFLOAD_INIT command. + */ + struct dmub_rb_cmd_cursor_offload_init cursor_offload_init; + /** + * Definition of a DMUB_CMD__CURSOR_OFFLOAD control commands. + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM + * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_UPDATE_DRR + */ + struct dmub_rb_cmd_cursor_offload_stream_cntl cursor_offload_stream_ctnl; + /** + * Definition of a DMUB_CMD__SMART_POWER_HDR_ENABLE command. + */ + struct dmub_rb_cmd_smart_power_hdr_enable smart_power_hdr_enable; + /** + * Definition of a DMUB_CMD__DMUB_CMD__SMART_POWER_HDR_GETMAXCLL command. + */ + struct dmub_rb_cmd_smart_power_hdr_getmaxcll smart_power_hdr_getmaxcll; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 4777c7203b2c..cd04d7c756c3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -380,6 +380,7 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco; boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0; + boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index ce041f6239dc..7e9856289910 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -89,50 +89,58 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in, void dmub_dcn32_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; - const uint32_t timeout = 100000; - uint32_t in_reset, is_enabled, scratch, i, pwait_mode; + const uint32_t timeout_us = 1 * 1000 * 1000; //1s + const uint32_t poll_delay_us = 1; //1us + uint32_t i = 0; + uint32_t enabled, in_reset, scratch, pwait_mode; - REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); - REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); + REG_GET(DMCUB_CNTL, + DMCUB_ENABLE, &enabled); + REG_GET(DMCUB_CNTL2, + DMCUB_SOFT_RESET, &in_reset); - if (in_reset == 0 && is_enabled != 0) { + if (enabled && in_reset == 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - for (i = 0; i < timeout; ++i) { - if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) - break; - - udelay(1); - } - - for (i = 0; i < timeout; ++i) { + for (; i < timeout_us; i++) { scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; - udelay(1); + udelay(poll_delay_us); } - for (i = 0; i < timeout; ++i) { + for (; i < timeout_us; i++) { REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); if (pwait_mode & (1 << 0)) break; - udelay(1); + udelay(poll_delay_us); } - /* Force reset in case we timed out, DMCUB is likely hung. */ } - if (is_enabled) { + if (enabled) { REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); udelay(1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); } + if (i >= timeout_us) { + /* timeout should never occur */ + BREAK_TO_DEBUGGER(); + } + + REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -141,7 +149,7 @@ void dmub_dcn32_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); - /* Clear the GPINT command manually so we don't send anything during boot. */ + /* Clear the GPINT command manually so we don't reset again. */ cmd.all = 0; dmub->hw_funcs.set_gpint(dmub, cmd); } @@ -163,7 +171,9 @@ void dmub_dcn32_backdoor_load(struct dmub_srv *dmub, dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset); + /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); @@ -193,7 +203,9 @@ void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub, { union dmub_addr offset; + /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); offset = cw0->offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 834e5434ccb8..e13557ed97be 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -418,6 +418,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc; + boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } @@ -520,6 +521,45 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); } + +bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub) +{ + uint64_t region3_cw5_offset; + uint32_t top_addr, top_addr_enable, offset_low; + uint32_t offset_high, base_addr, fw_version; + bool is_vbios_fw = false; + + memset(&dmub->preos_info, 0, sizeof(dmub->preos_info)); + + fw_version = REG_READ(DMCUB_SCRATCH1); + is_vbios_fw = ((fw_version >> 6) & 0x01) ? true : false; + if (!is_vbios_fw) + return false; + + dmub->preos_info.boot_status = REG_READ(DMCUB_SCRATCH0); + dmub->preos_info.fw_version = REG_READ(DMCUB_SCRATCH1); + dmub->preos_info.boot_options = REG_READ(DMCUB_SCRATCH14); + REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS, + DMCUB_REGION3_CW5_ENABLE, &top_addr_enable); + if (top_addr_enable) { + dmub_dcn35_get_fb_base_offset(dmub, + &dmub->preos_info.fb_base, &dmub->preos_info.fb_offset); + offset_low = REG_READ(DMCUB_REGION3_CW5_OFFSET); + offset_high = REG_READ(DMCUB_REGION3_CW5_OFFSET_HIGH); + region3_cw5_offset = ((uint64_t)offset_high << 32) | offset_low; + dmub->preos_info.trace_buffer_phy_addr = region3_cw5_offset + - dmub->preos_info.fb_base + dmub->preos_info.fb_offset; + + REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS, + DMCUB_REGION3_CW5_TOP_ADDRESS, &top_addr); + base_addr = REG_READ(DMCUB_REGION3_CW5_BASE_ADDRESS) & 0x1FFFFFFF; + dmub->preos_info.trace_buffer_size = + (top_addr > base_addr) ? (top_addr - base_addr + 1) : 0; + } + + return true; +} + void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub) { /* DMCUB_REGION3_TMR_AXI_SPACE values: diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h index 39fcb7275da5..92e6695a2c9b 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h @@ -285,4 +285,6 @@ bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub); void dmub_srv_dcn35_regs_init(struct dmub_srv *dmub, struct dc_context *ctx); +bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub); + #endif /* _DMUB_DCN35_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index b31adbd0d685..95542299e3b3 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -81,7 +81,7 @@ void dmub_dcn401_reset(struct dmub_srv *dmub) dmub->hw_funcs.set_gpint(dmub, cmd); for (; i < timeout_us; i++) { - scratch = dmub->hw_funcs.get_gpint_response(dmub); + scratch = REG_READ(DMCUB_SCRATCH7); if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) break; @@ -97,11 +97,24 @@ void dmub_dcn401_reset(struct dmub_srv *dmub) } } + if (enabled) { + REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); + udelay(1); + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); + } + if (i >= timeout_us) { /* timeout should never occur */ BREAK_TO_DEBUGGER(); } + REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0); + REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0); + REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); @@ -134,7 +147,6 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub, /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); @@ -168,7 +180,6 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub, /* reset and disable DMCUB and MMHUBBUB DMUIF */ REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); offset = cw0->offset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index b17a19400c06..a657efda89ce 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -359,6 +359,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->get_current_time = dmub_dcn35_get_current_time; funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data; + funcs->get_preos_fw_info = dmub_dcn35_get_preos_fw_info; funcs->init_reg_offsets = dmub_srv_dcn35_regs_init; if (asic == DMUB_ASIC_DCN351) @@ -564,10 +565,11 @@ enum dmub_status window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE; window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; - window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; + window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = dmub_align(DMUB_SCRATCH_MEM_SIZE, 64); window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE; window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE; + window_sizes[DMUB_WINDOW_CURSOR_OFFLOAD] = dmub_align(sizeof(struct dmub_cursor_offload_v1), 64); out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); @@ -652,21 +654,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; - struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; - struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM]; struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE]; struct dmub_rb_init_params rb_params, outbox0_rb_params; struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6; struct dmub_region inbox1, outbox1, outbox0; + uint32_t i; + if (!dmub->sw_init) return DMUB_STATUS_INVALID; - if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || - !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) { - ASSERT(0); - return DMUB_STATUS_INVALID; + for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) { + if (!params->fb[i]) { + ASSERT(0); + return DMUB_STATUS_INVALID; + } } dmub->fb_base = params->fb_base; @@ -748,9 +751,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->shared_state = shared_state_fb->cpu_addr; - dmub->scratch_mem_fb = *scratch_mem_fb; + dmub->scratch_mem_fb = *params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; + dmub->ib_mem_gart = *params->fb[DMUB_WINDOW_IB_MEM]; - dmub->ib_mem_gart = *ib_mem_gart; + dmub->cursor_offload_fb = *params->fb[DMUB_WINDOW_CURSOR_OFFLOAD]; + dmub->cursor_offload_v1 = (struct dmub_cursor_offload_v1 *)dmub->cursor_offload_fb.cpu_addr; if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6); @@ -1368,3 +1373,11 @@ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub) return DMUB_STATUS_OK; } + +bool dmub_srv_get_preos_info(struct dmub_srv *dmub) +{ + if (!dmub || !dmub->hw_funcs.get_preos_fw_info) + return false; + + return dmub->hw_funcs.get_preos_fw_info(dmub); +} diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index 812377d9e48f..973b6bdbac63 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -135,12 +135,8 @@ struct bp_external_encoder_control { struct bp_crtc_source_select { enum engine_id engine_id; enum controller_id controller_id; - /* from GPU Tx aka asic_signal */ - enum signal_type signal; - /* sink_signal may differ from asicSignal if Translator encoder */ enum signal_type sink_signal; - enum display_output_bit_depth display_output_bit_depth; - bool enable_dp_audio; + uint8_t bit_depth; }; struct bp_transmitter_control { @@ -166,6 +162,11 @@ struct bp_transmitter_control { bool single_pll_mode; }; +struct bp_load_detection_parameters { + enum engine_id engine_id; + uint16_t device_id; +}; + struct bp_hw_crtc_timing_parameters { enum controller_id controller_id; /* horizontal part */ diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index cc467031651d..38a77fa9b4af 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -169,6 +169,7 @@ struct dc_firmware_info { uint32_t engine_clk_ss_percentage; } feature; + uint32_t max_pixel_clock; /* in KHz */ uint32_t default_display_engine_pll_frequency; /* in KHz */ uint32_t external_clock_source_frequency_for_dp; /* in KHz */ uint32_t smu_gpu_pll_output_freq; /* in KHz */ diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index 54e33062b3c0..1386fa124e85 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -310,4 +310,11 @@ static inline bool dal_graphics_object_id_equal( } return false; } + +static inline bool dc_connector_supports_analog(const enum connector_id conn) +{ + return conn == CONNECTOR_ID_VGA || + conn == CONNECTOR_ID_SINGLE_LINK_DVII || + conn == CONNECTOR_ID_DUAL_LINK_DVII; +} #endif diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index a10d6b988aab..3a2c2d2fb629 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -118,6 +118,18 @@ static inline bool dc_is_dvi_signal(enum signal_type signal) } } +/** + * dc_is_rgb_signal() - Whether the signal is analog RGB. + * + * Returns whether the given signal type is an analog RGB signal + * that is used with a DAC on VGA or DVI-I connectors. + * Not to be confused with other uses of "RGB", such as RGB color space. + */ +static inline bool dc_is_rgb_signal(enum signal_type signal) +{ + return (signal == SIGNAL_TYPE_RGB); +} + static inline bool dc_is_tmds_signal(enum signal_type signal) { switch (signal) { diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index c760216a6240..ca402ddcdacc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -354,7 +354,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, /* reset retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* add display to connection */ @@ -400,7 +400,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, /* clear retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* remove display */ @@ -464,7 +464,7 @@ enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp, /* clear retry counters */ reset_retry_counts(hdcp); - /* reset error trace */ + /* reset trace */ memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); /* set new adjustment */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index a37634942b07..b883d626f1c3 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -508,7 +508,7 @@ static inline void set_auth_complete(struct mod_hdcp *hdcp, struct mod_hdcp_output *output) { output->auth_complete = 1; - mod_hdcp_log_ddc_trace(hdcp); + HDCP_AUTH_COMPLETE_TRACE(hdcp); } /* connection topology helpers */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index 8bc377560787..1bbd728d4345 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -29,6 +29,7 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) { uint64_t n = 0; uint8_t count = 0; + enum mod_hdcp_status status; u8 bksv[sizeof(n)] = { }; memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv)); @@ -38,8 +39,14 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) count++; n &= (n - 1); } - return (count == 20) ? MOD_HDCP_STATUS_SUCCESS : - MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; + + if (count == 20) { + hdcp->connection.trace.hdcp1.attempt_count++; + status = MOD_HDCP_STATUS_SUCCESS; + } else { + status = MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; + } + return status; } static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp) @@ -135,6 +142,8 @@ static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; + hdcp->connection.trace.hdcp1.downstream_device_count = get_device_count(hdcp); + /* Some MST display may choose to report the internal panel as an HDCP RX. * To update this condition with 1(because the immediate repeater's internal * panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index bb8ae80b37f8..5628f0ef73fd 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -48,6 +48,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) { enum mod_hdcp_status status; + struct mod_hdcp_trace *trace = &hdcp->connection.trace; if (is_dp_hdcp(hdcp)) status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) && @@ -55,9 +56,14 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp) MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; else - status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ? - MOD_HDCP_STATUS_SUCCESS : - MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi + & HDCP_2_2_HDMI_SUPPORT_MASK) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE; + + if (status == MOD_HDCP_STATUS_SUCCESS) + trace->hdcp2.attempt_count++; + return status; } @@ -201,10 +207,17 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { + struct mod_hdcp_trace *trace = &hdcp->connection.trace; + /* Avoid device count == 0 to do authentication */ if (get_device_count(hdcp) == 0) return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE; + trace->hdcp2.downstream_device_count = get_device_count(hdcp); + trace->hdcp2.hdcp1_device_downstream = + HDCP_2_2_HDCP1_DEVICE_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]); + trace->hdcp2.hdcp2_legacy_device_downstream = + HDCP_2_2_HDCP_2_0_REP_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]); /* Some MST display may choose to report the internal panel as an HDCP RX. */ /* To update this condition with 1(because the immediate repeater's internal */ /* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h index 1d83c1b9da10..26553aa4c5ca 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -31,6 +31,7 @@ #define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) #define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) #define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__) +#define HDCP_LOG_TRA(hdcp) do {} while (0) /* default logs */ #define HDCP_ERROR_TRACE(hdcp, status) \ @@ -131,4 +132,9 @@ HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \ } while (0) +#define HDCP_AUTH_COMPLETE_TRACE(hdcp) do { \ + mod_hdcp_log_ddc_trace(hdcp); \ + HDCP_LOG_TRA(hdcp); \ +} while (0) + #endif // MOD_HDCP_LOG_H_ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index b51ddf2846df..46e52fb3a118 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -230,9 +230,23 @@ struct mod_hdcp_error { uint8_t state_id; }; +struct mod_hdcp1_trace { + uint8_t attempt_count; + uint8_t downstream_device_count; +}; + +struct mod_hdcp2_trace { + uint8_t attempt_count; + uint8_t downstream_device_count; + uint8_t hdcp1_device_downstream; + uint8_t hdcp2_legacy_device_downstream; +}; + struct mod_hdcp_trace { struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE]; uint8_t error_count; + struct mod_hdcp1_trace hdcp1; + struct mod_hdcp2_trace hdcp2; }; enum mod_hdcp_encryption_status { diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h index 086869264425..a252ee4c7874 100644 --- a/drivers/gpu/drm/amd/include/amd_cper.h +++ b/drivers/gpu/drm/amd/include/amd_cper.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2025 Advanced Micro Devices, Inc. * diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 75efda2969cf..17945094a138 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -109,6 +109,7 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_VPE, AMD_IP_BLOCK_TYPE_UMSCH_MM, AMD_IP_BLOCK_TYPE_ISP, + AMD_IP_BLOCK_TYPE_RAS, AMD_IP_BLOCK_TYPE_NUM, }; diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h index 64b553e7de1a..e7fdcee22a71 100644 --- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h +++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: MIT */ /* * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 2b0cdb2a2775..f92f78d5d330 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -454,7 +454,7 @@ struct amd_pm_funcs { bool gate, int inst); int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id); - int (*set_power_limit)(void *handle, uint32_t n); + int (*set_power_limit)(void *handle, uint32_t limit_type, uint32_t n); int (*get_power_limit)(void *handle, uint32_t *limit, enum pp_power_limit_level pp_limit_level, enum pp_power_type power_type); @@ -532,6 +532,110 @@ struct metrics_table_header { uint8_t content_revision; }; +enum amdgpu_metrics_attr_id { + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HOTSPOT, + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MEM, + AMDGPU_METRICS_ATTR_ID_TEMPERATURE_VRSOC, + AMDGPU_METRICS_ATTR_ID_CURR_SOCKET_POWER, + AMDGPU_METRICS_ATTR_ID_AVERAGE_GFX_ACTIVITY, + AMDGPU_METRICS_ATTR_ID_AVERAGE_UMC_ACTIVITY, + AMDGPU_METRICS_ATTR_ID_MEM_MAX_BANDWIDTH, + AMDGPU_METRICS_ATTR_ID_ENERGY_ACCUMULATOR, + AMDGPU_METRICS_ATTR_ID_SYSTEM_CLOCK_COUNTER, + AMDGPU_METRICS_ATTR_ID_ACCUMULATION_COUNTER, + AMDGPU_METRICS_ATTR_ID_PROCHOT_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_PPT_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_SOCKET_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_VR_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_HBM_THM_RESIDENCY_ACC, + AMDGPU_METRICS_ATTR_ID_GFXCLK_LOCK_STATUS, + AMDGPU_METRICS_ATTR_ID_PCIE_LINK_WIDTH, + AMDGPU_METRICS_ATTR_ID_PCIE_LINK_SPEED, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_WIDTH, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_SPEED, + AMDGPU_METRICS_ATTR_ID_GFX_ACTIVITY_ACC, + AMDGPU_METRICS_ATTR_ID_MEM_ACTIVITY_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_INST, + AMDGPU_METRICS_ATTR_ID_PCIE_L0_TO_RECOV_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_ROVER_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_NAK_SENT_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_PCIE_NAK_RCVD_COUNT_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_READ_DATA_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_WRITE_DATA_ACC, + AMDGPU_METRICS_ATTR_ID_XGMI_LINK_STATUS, + AMDGPU_METRICS_ATTR_ID_FIRMWARE_TIMESTAMP, + AMDGPU_METRICS_ATTR_ID_CURRENT_GFXCLK, + AMDGPU_METRICS_ATTR_ID_CURRENT_SOCCLK, + AMDGPU_METRICS_ATTR_ID_CURRENT_VCLK0, + AMDGPU_METRICS_ATTR_ID_CURRENT_DCLK0, + AMDGPU_METRICS_ATTR_ID_CURRENT_UCLK, + AMDGPU_METRICS_ATTR_ID_NUM_PARTITION, + AMDGPU_METRICS_ATTR_ID_PCIE_LC_PERF_OTHER_END_RECOVERY, + AMDGPU_METRICS_ATTR_ID_GFX_BUSY_INST, + AMDGPU_METRICS_ATTR_ID_JPEG_BUSY, + AMDGPU_METRICS_ATTR_ID_VCN_BUSY, + AMDGPU_METRICS_ATTR_ID_GFX_BUSY_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_PPT_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC, + AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC, + AMDGPU_METRICS_ATTR_ID_MAX, +}; + +enum amdgpu_metrics_attr_type { + AMDGPU_METRICS_TYPE_U8, + AMDGPU_METRICS_TYPE_S8, + AMDGPU_METRICS_TYPE_U16, + AMDGPU_METRICS_TYPE_S16, + AMDGPU_METRICS_TYPE_U32, + AMDGPU_METRICS_TYPE_S32, + AMDGPU_METRICS_TYPE_U64, + AMDGPU_METRICS_TYPE_S64, + AMDGPU_METRICS_TYPE_MAX, +}; + +enum amdgpu_metrics_attr_unit { + /* None */ + AMDGPU_METRICS_UNIT_NONE, + /* MHz*/ + AMDGPU_METRICS_UNIT_CLOCK_1, + /* Degree Celsius*/ + AMDGPU_METRICS_UNIT_TEMP_1, + /* Watts*/ + AMDGPU_METRICS_UNIT_POWER_1, + /* In nanoseconds*/ + AMDGPU_METRICS_UNIT_TIME_1, + /* In 10 nanoseconds*/ + AMDGPU_METRICS_UNIT_TIME_2, + /* Speed in GT/s */ + AMDGPU_METRICS_UNIT_SPEED_1, + /* Speed in 0.1 GT/s */ + AMDGPU_METRICS_UNIT_SPEED_2, + /* Bandwidth GB/s */ + AMDGPU_METRICS_UNIT_BW_1, + /* Data in KB */ + AMDGPU_METRICS_UNIT_DATA_1, + /* Percentage */ + AMDGPU_METRICS_UNIT_PERCENT, + AMDGPU_METRICS_UNIT_MAX, +}; + +#define AMDGPU_METRICS_ATTR_UNIT_MASK 0xFF000000 +#define AMDGPU_METRICS_ATTR_UNIT_SHIFT 24 +#define AMDGPU_METRICS_ATTR_TYPE_MASK 0x00F00000 +#define AMDGPU_METRICS_ATTR_TYPE_SHIFT 20 +#define AMDGPU_METRICS_ATTR_ID_MASK 0x000FFC00 +#define AMDGPU_METRICS_ATTR_ID_SHIFT 10 +#define AMDGPU_METRICS_ATTR_INST_MASK 0x000003FF +#define AMDGPU_METRICS_ATTR_INST_SHIFT 0 + +#define AMDGPU_METRICS_ENC_ATTR(unit, type, id, inst) \ + (((u64)(unit) << AMDGPU_METRICS_ATTR_UNIT_SHIFT) | \ + ((u64)(type) << AMDGPU_METRICS_ATTR_TYPE_SHIFT) | \ + ((u64)(id) << AMDGPU_METRICS_ATTR_ID_SHIFT) | (inst)) + /* * gpu_metrics_v1_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v1_1 or later instead. @@ -1221,6 +1325,19 @@ struct gpu_metrics_v1_8 { uint32_t pcie_lc_perf_other_end_recovery; }; +struct gpu_metrics_attr { + /* Field type encoded with AMDGPU_METRICS_ENC_ATTR */ + uint64_t attr_encoding; + /* Attribute value, depends on attr_encoding */ + void *attr_value; +}; + +struct gpu_metrics_v1_9 { + struct metrics_table_header common_header; + int attr_count; + struct gpu_metrics_attr metrics_attrs[]; +}; + /* * gpu_metrics_v2_0 is not recommended as it's not naturally aligned. * Use gpu_metrics_v2_1 or later instead. diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index ab1cfc92dbeb..f9629d42ada2 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -345,7 +345,8 @@ union MESAPI__REMOVE_QUEUE { uint32_t unmap_kiq_utility_queue : 1; uint32_t preempt_legacy_gfx_queue : 1; uint32_t unmap_legacy_queue : 1; - uint32_t reserved : 28; + uint32_t remove_queue_after_reset : 1; + uint32_t reserved : 27; }; struct MES_API_STATUS api_status; diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h index 69611c7e30e3..2f12cba4eb66 100644 --- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h @@ -399,7 +399,8 @@ union MESAPI__REMOVE_QUEUE { uint32_t unmap_kiq_utility_queue : 1; uint32_t preempt_legacy_gfx_queue : 1; uint32_t unmap_legacy_queue : 1; - uint32_t reserved : 28; + uint32_t remove_queue_after_reset : 1; + uint32_t reserved : 27; }; struct MES_API_STATUS api_status; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 518d07afc7df..5d08dc3b7110 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1616,6 +1616,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, } int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, + uint32_t limit_type, uint32_t limit) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1626,7 +1627,7 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, - limit); + limit_type, limit); mutex_unlock(&adev->pm.mutex); return ret; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index b5fbb0fd1dc0..c88a76cce401 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -108,8 +108,9 @@ const char * const amdgpu_pp_profile_name[] = { static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm) { bool runpm_check = runpm ? adev->in_runpm : false; + bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT); - if (amdgpu_in_reset(adev)) + if (amdgpu_in_reset(adev) || !full_init) return -EBUSY; if (adev->in_suspend && !runpm_check) @@ -173,7 +174,6 @@ static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev) */ static inline void amdgpu_pm_put_access(struct amdgpu_device *adev) { - pm_runtime_mark_last_busy(adev->dev); pm_runtime_put_autosuspend(adev->dev); } @@ -3390,13 +3390,12 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return err; value = value / 1000000; /* convert to Watt */ - value |= limit_type << 24; err = amdgpu_pm_get_access(adev); if (err < 0) return err; - err = amdgpu_dpm_set_power_limit(adev, value); + err = amdgpu_dpm_set_power_limit(adev, limit_type, value); amdgpu_pm_put_access(adev); diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 65c1d98af26c..3bce74f8bb0a 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -553,7 +553,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, enum pp_power_limit_level pp_limit_level, enum pp_power_type power_type); int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, - uint32_t limit); + uint32_t limit_type, uint32_t limit); int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev); int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index cf9932e68055..3a9522c17fee 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -3500,6 +3500,11 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, * for these GPUs to calculate bandwidth requirements. */ if (high_pixelclock_count) { + /* Work around flickering lines at the bottom edge + * of the screen when using a single 4K 60Hz monitor. + */ + disable_mclk_switching = true; + /* On Oland, we observe some flickering when two 4K 60Hz * displays are connected, possibly because voltage is too low. * Raise the voltage by requiring a higher SCLK. diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 554492dfa3c0..76a5353d7f4a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -20,7 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ -#include "pp_debug.h" #include #include #include @@ -28,12 +27,10 @@ #include #include #include "amd_shared.h" -#include "amd_powerplay.h" #include "power_state.h" #include "amdgpu.h" #include "hwmgr.h" #include "amdgpu_dpm_internal.h" -#include "amdgpu_display.h" static const struct amd_pm_funcs pp_dpm_funcs; @@ -955,7 +952,7 @@ static int pp_dpm_switch_power_profile(void *handle, return 0; } -static int pp_set_power_limit(void *handle, uint32_t limit) +static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) { struct pp_hwmgr *hwmgr = handle; uint32_t max_power_limit; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index 8da882c51856..9b28c0728269 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; else if (hwmgr->pp_table_version == PP_TABLE_V0) - thermal_data->max = data->thermal_temp_setting.temperature_shutdown * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + thermal_data->max = data->thermal_temp_setting.temperature_shutdown; thermal_data->sw_ctf_threshold = thermal_data->max; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c index d2dbd90bb427..0a876c840c79 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c @@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) table->VoltageResponseTime = 0; table->PhaseResponseTime = 0; table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); table->PCIeGenInterval = 1; table->VRConfig = 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c index 1f50f1e74c48..aa3ae9b115c4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c @@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr) table->VoltageResponseTime = 0; table->PhaseResponseTime = 0; table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = 0; + table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count); table->PCIeGenInterval = 1; result = iceland_populate_smc_svi2_config(hwmgr, table); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index fb8086859857..4317da6f7c38 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -68,7 +68,7 @@ static int smu_handle_task(struct smu_context *smu, static int smu_reset(struct smu_context *smu); static int smu_set_fan_speed_pwm(void *handle, u32 speed); static int smu_set_fan_control_mode(void *handle, u32 value); -static int smu_set_power_limit(void *handle, uint32_t limit); +static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit); static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); @@ -508,11 +508,14 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) /* Enable restore flag */ smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; - /* set the user dpm power limit */ - if (smu->user_dpm_profile.power_limit) { - ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); + /* set the user dpm power limits */ + for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) { + if (!smu->user_dpm_profile.power_limits[i]) + continue; + ret = smu_set_power_limit(smu, i, + smu->user_dpm_profile.power_limits[i]); if (ret) - dev_err(smu->adev->dev, "Failed to set power limit value\n"); + dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i); } /* set the user dpm clock configurations */ @@ -609,6 +612,17 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) return true; } +int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, + uint32_t param, uint32_t *read_arg) +{ + struct smu_context *smu = adev->powerplay.pp_handle; + int ret = -EOPNOTSUPP; + + if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg) + ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg); + + return ret; +} static int smu_sys_get_pp_table(void *handle, char **table) @@ -2225,7 +2239,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) int ret; struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (amdgpu_sriov_multi_vf_mode(adev)) return 0; @@ -2257,18 +2270,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) adev->pm.dpm_enabled = true; - if (smu->current_power_limit) { - ret = smu_set_power_limit(smu, smu->current_power_limit); - if (ret && ret != -EOPNOTSUPP) - return ret; - } - - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) { - ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); - if (ret) - return ret; - } - dev_info(adev->dev, "SMU is resumed successfully!\n"); return 0; @@ -2958,37 +2959,34 @@ int smu_get_power_limit(void *handle, return ret; } -static int smu_set_power_limit(void *handle, uint32_t limit) +static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit) { struct smu_context *smu = handle; - uint32_t limit_type = limit >> 24; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - limit &= (1<<24)-1; - if (limit_type != SMU_DEFAULT_PPT_LIMIT) - if (smu->ppt_funcs->set_power_limit) - return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - - if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { - dev_err(smu->adev->dev, - "New power limit (%d) is out of range [%d,%d]\n", - limit, smu->min_power_limit, smu->max_power_limit); - return -EINVAL; + if (limit_type == SMU_DEFAULT_PPT_LIMIT) { + if (!limit) + limit = smu->current_power_limit; + if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { + dev_err(smu->adev->dev, + "New power limit (%d) is out of range [%d,%d]\n", + limit, smu->min_power_limit, smu->max_power_limit); + return -EINVAL; + } } - if (!limit) - limit = smu->current_power_limit; - if (smu->ppt_funcs->set_power_limit) { ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) - smu->user_dpm_profile.power_limit = limit; + if (ret) + return ret; + if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) + smu->user_dpm_profile.power_limits[limit_type] = limit; } - return ret; + return 0; } static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 582c186d8b62..c48028abc8c4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -212,6 +212,7 @@ enum smu_power_src_type { enum smu_ppt_limit_type { SMU_DEFAULT_PPT_LIMIT = 0, SMU_FAST_PPT_LIMIT, + SMU_LIMIT_TYPE_COUNT, }; enum smu_ppt_limit_level { @@ -231,7 +232,7 @@ enum smu_memory_pool_size { struct smu_user_dpm_profile { uint32_t fan_mode; - uint32_t power_limit; + uint32_t power_limits[SMU_LIMIT_TYPE_COUNT]; uint32_t fan_speed_pwm; uint32_t fan_speed_rpm; uint32_t flags; @@ -1521,6 +1522,15 @@ struct pptable_funcs { */ ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id, void *table); + /** + * @ras_send_msg: Send a message with a parameter from Ras + * &msg: Type of message. + * ¶m: Message parameter. + * &read_arg: SMU response (optional). + */ + int (*ras_send_msg)(struct smu_context *smu, + enum smu_message_type msg, uint32_t param, uint32_t *read_arg); + }; typedef enum { @@ -1786,6 +1796,8 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, ssize_t smu_get_pm_policy_info(struct smu_context *smu, enum pp_pm_policy p_type, char *sysbuf); +int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg, + uint32_t param, uint32_t *readarg); #endif void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 9548bd3c624b..55401e6b2b0b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -291,11 +291,12 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int ret = 0, size = 0; + int ret = 0, size = 0, start_offset = 0; uint32_t cur_value = 0; int i; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -353,7 +354,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, return ret; } - return size; + return size - start_offset; } static bool cyan_skillfish_is_dpm_running(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 0028f10ead42..bbf09aec9152 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1469,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, levels, size = 0, ret = 0; + int i, levels, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1484,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1497,11 +1498,11 @@ static int navi10_print_clk_levels(struct smu_context *smu, case SMU_DCEFCLK: ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count); if (ret) - return size; + return size - start_offset; ret = navi10_is_support_fine_grained_dpm(smu, clk_type); if (ret < 0) @@ -1511,7 +1512,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, for (i = 0; i < count; i++) { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) - return size; + return size - start_offset; size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, cur_value == value ? "*" : ""); @@ -1519,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu, } else { ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); if (ret) - return size; + return size - start_offset; ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) - return size; + return size - start_offset; freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : @@ -1653,7 +1654,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int navi10_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 31c2c0386b1f..774283ac7827 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1281,7 +1281,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings; OverDriveTable_t *od_table = (OverDriveTable_t *)table_context->overdrive_table; - int i, size = 0, ret = 0; + int i, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1289,6 +1289,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, uint32_t min_value, max_value; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_GFXCLK: @@ -1434,7 +1435,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } static int sienna_cichlid_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 2c9869feba61..53579208cffb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -565,7 +565,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -576,6 +576,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -658,7 +659,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int vangogh_print_clk_levels(struct smu_context *smu, @@ -666,7 +667,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -678,6 +679,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -779,7 +781,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int vangogh_common_print_clk_levels(struct smu_context *smu, @@ -2308,8 +2310,7 @@ static int vangogh_get_power_limit(struct smu_context *smu, uint32_t *max_power_limit, uint32_t *min_power_limit) { - struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; + struct smu_11_5_power_context *power_context = smu->smu_power.power_context; uint32_t ppt_limit; int ret = 0; @@ -2345,12 +2346,11 @@ static int vangogh_get_power_limit(struct smu_context *smu, } static int vangogh_get_ppt_limit(struct smu_context *smu, - uint32_t *ppt_limit, - enum smu_ppt_limit_type type, - enum smu_ppt_limit_level level) + uint32_t *ppt_limit, + enum smu_ppt_limit_type type, + enum smu_ppt_limit_level level) { - struct smu_11_5_power_context *power_context = - smu->smu_power.power_context; + struct smu_11_5_power_context *power_context = smu->smu_power.power_context; if (!power_context) return -EOPNOTSUPP; @@ -2399,7 +2399,6 @@ static int vangogh_set_power_limit(struct smu_context *smu, smu->current_power_limit = ppt_limit; break; case SMU_FAST_PPT_LIMIT: - ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24); if (ppt_limit > power_context->max_fast_ppt_limit) { dev_err(smu->adev->dev, "New power limit (%d) is over the max allowed %d\n", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 3baf20f4c373..eaa9ea162f16 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; bool cur_value_match_level = false; @@ -506,6 +506,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, return ret; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_RANGE: @@ -550,7 +551,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, i == 2 ? "*" : ""); } - return size; + return size - start_offset; case SMU_SOCCLK: count = NUM_SOCCLK_DPM_LEVELS; cur_value = metrics.ClockFrequency[CLOCK_SOCCLK]; @@ -607,7 +608,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index c1062e5f0393..677781060246 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -1195,15 +1195,16 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1534,7 +1535,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index b081ae3e8f43..6908f9930f16 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -497,11 +497,12 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -565,7 +566,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_4_read_sensor(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index f5db181ef489..4576bf008b22 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -861,11 +861,12 @@ out: static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -928,7 +929,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 285cf7979693..0a7d2cea7dc6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -450,7 +450,8 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu) ((pgm == 4) && (fw_ver >= 0x4557000))) smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET)); - if ((pgm == 0) && (fw_ver >= 0x00558200)) + if ((pgm == 0 && fw_ver >= 0x00558200) || + (pgm == 7 && fw_ver >= 0x07551400)) smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET)); } @@ -1428,7 +1429,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size, static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, enum smu_clk_type type, char *buf) { - int now, size = 0; + int now, size = 0, start_offset = 0; int ret = 0; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; struct smu_13_0_dpm_table *single_dpm_table; @@ -1437,10 +1438,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, uint32_t min_clk, max_clk; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } dpm_context = smu_dpm->dpm_context; @@ -1575,7 +1577,7 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max, @@ -3226,6 +3228,24 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask) return ret; } +static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg) +{ + int ret; + + switch (msg) { + case SMU_MSG_QueryValidMcaCount: + case SMU_MSG_QueryValidMcaCeCount: + case SMU_MSG_McaBankDumpDW: + case SMU_MSG_McaBankCeDumpDW: + case SMU_MSG_ClearMcaOnRead: + ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg); + break; + default: + ret = -EPERM; + } + + return ret; +} static int smu_v13_0_6_post_init(struct smu_context *smu) { @@ -3921,6 +3941,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .reset_sdma = smu_v13_0_6_reset_sdma, .dpm_reset_vcn = smu_v13_0_6_reset_vcn, .post_init = smu_v13_0_6_post_init, + .ras_send_msg = smu_v13_0_6_ras_send_msg, }; void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c96fa5e49ed6..a3fc35b9011e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1184,15 +1184,16 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, struct smu_13_0_dpm_table *single_dpm_table; struct smu_13_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1523,7 +1524,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 73b4506ef5a8..5d7e671fa3c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1041,12 +1041,13 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu, static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, size = 0, ret = 0; + int i, idx, size = 0, ret = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; uint32_t clk_limit = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -1111,7 +1112,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, } print_clk_out: - return size; + return size - start_offset; } static int yellow_carp_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index fe00c84b1cc6..b1bd946d8e30 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1132,11 +1132,12 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, idx, ret = 0, size = 0; + int i, idx, ret = 0, size = 0, start_offset = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; switch (clk_type) { case SMU_OD_SCLK: @@ -1202,7 +1203,7 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 086501cc5213..2cea688c604f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1056,15 +1056,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, struct smu_14_0_dpm_table *single_dpm_table; struct smu_14_0_pcie_table *pcie_table; uint32_t gen_speed, lane_width; - int i, curr_freq, size = 0; + int i, curr_freq, size = 0, start_offset = 0; int32_t min_value, max_value; int ret = 0; smu_cmn_get_sysfs_buf(&buf, &size); + start_offset = size; if (amdgpu_ras_intr_triggered()) { size += sysfs_emit_at(buf, size, "unavailable\n"); - return size; + return size - start_offset; } switch (clk_type) { @@ -1374,7 +1375,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, break; } - return size; + return size - start_offset; } static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index f532f7c69259..a8961a8f5c42 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu, table_index); uint32_t table_size; int ret = 0; - if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) + if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0) return -EINVAL; table_size = smu_table->tables[table_index].size; diff --git a/drivers/gpu/drm/amd/ras/Makefile b/drivers/gpu/drm/amd/ras/Makefile new file mode 100644 index 000000000000..bbdaba811d34 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/Makefile @@ -0,0 +1,34 @@ +# +# Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +ifeq ($(AMD_GPU_RAS_MGR),) + AMD_GPU_RAS_MGR := ras_mgr +endif + +subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/rascore +subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/$(AMD_GPU_RAS_MGR) + +RAS_LIBS = $(AMD_GPU_RAS_MGR) rascore + +AMD_RAS = $(addsuffix /Makefile, $(addprefix $(AMD_GPU_RAS_FULL_PATH)/,$(RAS_LIBS))) + +include $(AMD_RAS) + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/Makefile b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile new file mode 100644 index 000000000000..5e5a2cfa4068 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile @@ -0,0 +1,33 @@ +# Copyright 2025 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. + +RAS_MGR_FILES = amdgpu_ras_sys.o \ + amdgpu_ras_mgr.o \ + amdgpu_ras_eeprom_i2c.o \ + amdgpu_ras_mp1_v13_0.o \ + amdgpu_ras_cmd.o \ + amdgpu_ras_process.o \ + amdgpu_ras_nbio_v7_9.o + + +RAS_MGR = $(addprefix $(AMD_GPU_RAS_PATH)/ras_mgr/, $(RAS_MGR_FILES)) + +AMD_GPU_RAS_FILES += $(RAS_MGR) + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c new file mode 100644 index 000000000000..78419b7f7729 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include "amdgpu.h" +#include "amdgpu_ras.h" +#include "ras_sys.h" +#include "amdgpu_ras_cmd.h" +#include "amdgpu_ras_mgr.h" + +/* inject address is 52 bits */ +#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) + +#define AMDGPU_RAS_TYPE_RASCORE 0x1 +#define AMDGPU_RAS_TYPE_AMDGPU 0x2 +#define AMDGPU_RAS_TYPE_VF 0x3 + +static int amdgpu_ras_trigger_error_prepare(struct ras_core_context *ras_core, + struct ras_cmd_inject_error_req *block_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + + if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) { + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + RAS_DEV_WARN(adev, "Failed to disallow df cstate"); + + ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW); + if (ret && (ret != -EOPNOTSUPP)) + RAS_DEV_WARN(adev, "Failed to disallow XGMI power down"); + } + + return 0; +} + +static int amdgpu_ras_trigger_error_end(struct ras_core_context *ras_core, + struct ras_cmd_inject_error_req *block_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + + if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) { + if (amdgpu_ras_intr_triggered()) + return 0; + + ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT); + if (ret && (ret != -EOPNOTSUPP)) + RAS_DEV_WARN(adev, "Failed to allow XGMI power down"); + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + RAS_DEV_WARN(adev, "Failed to allow df cstate"); + } + + return 0; +} + +static uint64_t local_addr_to_xgmi_global_addr(struct ras_core_context *ras_core, + uint64_t addr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi; + + return (addr + xgmi->physical_node_id * xgmi->node_segment_size); +} + +static int amdgpu_ras_inject_error(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_cmd_inject_error_req *req = + (struct ras_cmd_inject_error_req *)cmd->input_buff_raw; + int ret = RAS_CMD__ERROR_GENERIC; + + if (req->block_id == RAS_BLOCK_ID__UMC) { + if (amdgpu_ras_mgr_check_retired_addr(adev, req->address)) { + RAS_DEV_WARN(ras_core->dev, + "RAS WARN: inject: 0x%llx has already been marked as bad!\n", + req->address); + return RAS_CMD__ERROR_ACCESS_DENIED; + } + + if ((req->address >= adev->gmc.mc_vram_size && + adev->gmc.mc_vram_size) || + (req->address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + RAS_DEV_WARN(adev, "RAS WARN: input address 0x%llx is invalid.", + req->address); + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + } + + /* Calculate XGMI relative offset */ + if (adev->gmc.xgmi.num_physical_nodes > 1 && + req->block_id != RAS_BLOCK_ID__GFX) { + req->address = local_addr_to_xgmi_global_addr(ras_core, req->address); + } + } + + amdgpu_ras_trigger_error_prepare(ras_core, req); + ret = rascore_handle_cmd(ras_core, cmd, data); + amdgpu_ras_trigger_error_end(ras_core, req); + if (ret) { + RAS_DEV_ERR(adev, "ras inject block %u failed %d\n", req->block_id, ret); + ret = RAS_CMD__ERROR_ACCESS_DENIED; + } + + + return ret; +} + +static int amdgpu_ras_get_ras_safe_fb_addr_ranges(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_cmd_dev_handle *input_data = + (struct ras_cmd_dev_handle *)cmd->input_buff_raw; + struct ras_cmd_ras_safe_fb_address_ranges_rsp *ranges = + (struct ras_cmd_ras_safe_fb_address_ranges_rsp *)cmd->output_buff_raw; + struct amdgpu_mem_partition_info *mem_ranges; + uint32_t i = 0; + + if (cmd->input_size != sizeof(*input_data)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + mem_ranges = adev->gmc.mem_partitions; + for (i = 0; i < adev->gmc.num_mem_partitions; i++) { + ranges->range[i].start = mem_ranges[i].range.fpfn << AMDGPU_GPU_PAGE_SHIFT; + ranges->range[i].size = mem_ranges[i].size; + ranges->range[i].idx = i; + } + + ranges->num_ranges = adev->gmc.num_mem_partitions; + + ranges->version = 0; + cmd->output_size = sizeof(struct ras_cmd_ras_safe_fb_address_ranges_rsp); + + return RAS_CMD__SUCCESS; +} + +static int ras_translate_fb_address(struct ras_core_context *ras_core, + enum ras_fb_addr_type src_type, + enum ras_fb_addr_type dest_type, + union ras_translate_fb_address *src_addr, + union ras_translate_fb_address *dest_addr) +{ + uint64_t soc_phy_addr; + int ret = RAS_CMD__SUCCESS; + + /* Does not need to be queued as event as this is a SW translation */ + switch (src_type) { + case RAS_FB_ADDR_SOC_PHY: + soc_phy_addr = src_addr->soc_phy_addr; + break; + case RAS_FB_ADDR_BANK: + ret = ras_cmd_translate_bank_to_soc_pa(ras_core, + src_addr->bank_addr, &soc_phy_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + break; + default: + return RAS_CMD__ERROR_INVALID_CMD; + } + + switch (dest_type) { + case RAS_FB_ADDR_SOC_PHY: + dest_addr->soc_phy_addr = soc_phy_addr; + break; + case RAS_FB_ADDR_BANK: + ret = ras_cmd_translate_soc_pa_to_bank(ras_core, + soc_phy_addr, &dest_addr->bank_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + break; + default: + return RAS_CMD__ERROR_INVALID_CMD; + } + + return ret; +} + +static int amdgpu_ras_translate_fb_address(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_translate_fb_address_req *req_buff = + (struct ras_cmd_translate_fb_address_req *)cmd->input_buff_raw; + struct ras_cmd_translate_fb_address_rsp *rsp_buff = + (struct ras_cmd_translate_fb_address_rsp *)cmd->output_buff_raw; + int ret = RAS_CMD__ERROR_GENERIC; + + if (cmd->input_size != sizeof(struct ras_cmd_translate_fb_address_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if ((req_buff->src_addr_type >= RAS_FB_ADDR_UNKNOWN) || + (req_buff->dest_addr_type >= RAS_FB_ADDR_UNKNOWN)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + ret = ras_translate_fb_address(ras_core, req_buff->src_addr_type, + req_buff->dest_addr_type, &req_buff->trans_addr, &rsp_buff->trans_addr); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + rsp_buff->version = 0; + cmd->output_size = sizeof(struct ras_cmd_translate_fb_address_rsp); + + return RAS_CMD__SUCCESS; +} + +static struct ras_cmd_func_map amdgpu_ras_cmd_maps[] = { + {RAS_CMD__INJECT_ERROR, amdgpu_ras_inject_error}, + {RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, amdgpu_ras_get_ras_safe_fb_addr_ranges}, + {RAS_CMD__TRANSLATE_FB_ADDRESS, amdgpu_ras_translate_fb_address}, +}; + +int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_func_map *ras_cmd = NULL; + int i, res; + + for (i = 0; i < ARRAY_SIZE(amdgpu_ras_cmd_maps); i++) { + if (cmd->cmd_id == amdgpu_ras_cmd_maps[i].cmd_id) { + ras_cmd = &amdgpu_ras_cmd_maps[i]; + break; + } + } + + if (ras_cmd) + res = ras_cmd->func(ras_core, cmd, NULL); + else + res = RAS_CMD__ERROR_UKNOWN_CMD; + + return res; +} + +int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd) +{ + struct ras_core_context *cmd_core = ras_core; + int timeout = 60; + int res; + + cmd->cmd_res = RAS_CMD__ERROR_INVALID_CMD; + cmd->output_size = 0; + + if (!ras_core_is_enabled(cmd_core)) + return RAS_CMD__ERROR_ACCESS_DENIED; + + while (ras_core_gpu_in_reset(cmd_core)) { + msleep(1000); + if (!timeout--) + return RAS_CMD__ERROR_TIMEOUT; + } + + res = amdgpu_ras_handle_cmd(cmd_core, cmd, NULL); + if (res == RAS_CMD__ERROR_UKNOWN_CMD) + res = rascore_handle_cmd(cmd_core, cmd, NULL); + + cmd->cmd_res = res; + + if (cmd->output_size > cmd->output_buf_size) { + RAS_DEV_ERR(cmd_core->dev, + "Output size 0x%x exceeds output buffer size 0x%x!\n", + cmd->output_size, cmd->output_buf_size); + return RAS_CMD__SUCCESS_EXEED_BUFFER; + } + + return RAS_CMD__SUCCESS; +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h new file mode 100644 index 000000000000..5973b156cc85 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMDGPU_RAS_CMD_H__ +#define __AMDGPU_RAS_CMD_H__ +#include "ras.h" + +enum amdgpu_ras_cmd_id { + RAS_CMD__AMDGPU_BEGIN = RAS_CMD_ID_AMDGPU_START, + RAS_CMD__TRANSLATE_MEMORY_FD, + RAS_CMD__AMDGPU_SUPPORTED_MAX = RAS_CMD_ID_AMDGPU_END, +}; + +struct ras_cmd_translate_memory_fd_req { + struct ras_cmd_dev_handle dev; + uint32_t type; + uint32_t fd; + uint64_t address; + uint32_t reserved[4]; +}; + +struct ras_cmd_translate_memory_fd_rsp { + uint32_t version; + uint32_t padding; + uint64_t start; + uint64_t size; + uint32_t reserved[2]; +}; + +int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data); +int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd); + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c new file mode 100644 index 000000000000..1bb7b7001ec7 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_ras_eeprom.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_eeprom_i2c.h" +#include "ras_eeprom.h" + +/* These are memory addresses as would be seen by one or more EEPROM + * chips strung on the I2C bus, usually by manipulating pins 1-3 of a + * set of EEPROM devices. They form a continuous memory space. + * + * The I2C device address includes the device type identifier, 1010b, + * which is a reserved value and indicates that this is an I2C EEPROM + * device. It also includes the top 3 bits of the 19 bit EEPROM memory + * address, namely bits 18, 17, and 16. This makes up the 7 bit + * address sent on the I2C bus with bit 0 being the direction bit, + * which is not represented here, and sent by the hardware directly. + * + * For instance, + * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0. + * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h. + * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h. + * Depending on the size of the I2C EEPROM device(s), bits 18:16 may + * address memory in a device or a device on the I2C bus, depending on + * the status of pins 1-3. See top of amdgpu_eeprom.c. + * + * The RAS table lives either at address 0 or address 40000h of EEPROM. + */ +#define EEPROM_I2C_MADDR_0 0x0 +#define EEPROM_I2C_MADDR_4 0x40000 + +#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF)) +#define to_amdgpu_ras(x) (container_of(x, struct amdgpu_ras, eeprom_control)) + +#define EEPROM_PAGE_BITS 8 +#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS) +#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1) + +#define EEPROM_OFFSET_SIZE 2 + +static int ras_eeprom_i2c_config(struct ras_core_context *ras_core) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + u8 i2c_addr; + + if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) { + /* The address given by VBIOS is an 8-bit, wire-format + * address, i.e. the most significant byte. + * + * Normalize it to a 19-bit EEPROM address. Remove the + * device type identifier and make it a 7-bit address; + * then make it a 19-bit EEPROM address. See top of + * amdgpu_eeprom.c. + */ + i2c_addr = (i2c_addr & 0x0F) >> 1; + control->i2c_address = ((u32) i2c_addr) << 16; + return 0; + } + + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { + case IP_VERSION(13, 0, 5): + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 10): + case IP_VERSION(13, 0, 14): + control->i2c_address = EEPROM_I2C_MADDR_4; + return 0; + default: + return -ENODATA; + } + return -ENODATA; +} + +static int ras_eeprom_i2c_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + struct i2c_adapter *i2c_adap = ras_core->ras_eeprom.i2c_adapter; + u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE]; + struct i2c_msg msgs[] = { + { + .flags = 0, + .len = EEPROM_OFFSET_SIZE, + .buf = eeprom_offset_buf, + }, + { + .flags = read ? I2C_M_RD : 0, + }, + }; + const u8 *p = eeprom_buf; + int r; + u16 len; + + for (r = 0; buf_size > 0; + buf_size -= len, eeprom_addr += len, eeprom_buf += len) { + /* Set the EEPROM address we want to write to/read from. + */ + msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr); + msgs[1].addr = msgs[0].addr; + msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff; + msgs[0].buf[1] = eeprom_addr & 0xff; + + if (!read) { + /* Write the maximum amount of data, without + * crossing the device's page boundary, as per + * its spec. Partial page writes are allowed, + * starting at any location within the page, + * so long as the page boundary isn't crossed + * over (actually the page pointer rolls + * over). + * + * As per the AT24CM02 EEPROM spec, after + * writing into a page, the I2C driver should + * terminate the transfer, i.e. in + * "i2c_transfer()" below, with a STOP + * condition, so that the self-timed write + * cycle begins. This is implied for the + * "i2c_transfer()" abstraction. + */ + len = min(EEPROM_PAGE_SIZE - (eeprom_addr & EEPROM_PAGE_MASK), + buf_size); + } else { + /* Reading from the EEPROM has no limitation + * on the number of bytes read from the EEPROM + * device--they are simply sequenced out. + * Keep in mind that i2c_msg.len is u16 type. + */ + len = min(U16_MAX, buf_size); + } + msgs[1].len = len; + msgs[1].buf = eeprom_buf; + + + /* This constitutes a START-STOP transaction. + */ + r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs)); + if (r != ARRAY_SIZE(msgs)) + break; + + if (!read) { + /* According to EEPROM specs the length of the + * self-writing cycle, tWR (tW), is 10 ms. + * + * TODO: Use polling on ACK, aka Acknowledge + * Polling, to minimize waiting for the + * internal write cycle to complete, as it is + * usually smaller than tWR (tW). + */ + msleep(10); + } + } + + return r < 0 ? r : eeprom_buf - p; +} + +const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func = { + .eeprom_i2c_xfer = ras_eeprom_i2c_xfer, + .update_eeprom_i2c_config = ras_eeprom_i2c_config, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h new file mode 100644 index 000000000000..3b5878605411 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_EEPROM_I2C_H__ +#define __AMDGPU_RAS_EEPROM_I2C_H__ +#include "ras.h" + +extern const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func; +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c new file mode 100644 index 000000000000..8007e49951d8 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "amdgpu_reset.h" +#include "amdgpu_xgmi.h" +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_cmd.h" +#include "amdgpu_ras_process.h" +#include "amdgpu_ras_eeprom_i2c.h" +#include "amdgpu_ras_mp1_v13_0.h" +#include "amdgpu_ras_nbio_v7_9.h" + +#define MAX_SOCKET_NUM_PER_HIVE 8 +#define MAX_AID_NUM_PER_SOCKET 4 +#define MAX_XCD_NUM_PER_AID 2 + +/* typical ECC bad page rate is 1 bad page per 100MB VRAM */ +#define ESTIMATE_BAD_PAGE_THRESHOLD(size) ((size)/(100 * 1024 * 1024ULL)) + +#define COUNT_BAD_PAGE_THRESHOLD(size) (((size) >> 21) << 4) + +/* Reserve 8 physical dram row for possible retirement. + * In worst cases, it will lose 8 * 2MB memory in vram domain + */ +#define RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20) + + +static void ras_mgr_init_event_mgr(struct ras_event_manager *mgr) +{ + struct ras_event_state *event_state; + int i; + + memset(mgr, 0, sizeof(*mgr)); + atomic64_set(&mgr->seqno, 0); + + for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) { + event_state = &mgr->event_state[i]; + event_state->last_seqno = RAS_EVENT_INVALID_ID; + atomic64_set(&event_state->count, 0); + } +} + +static void amdgpu_ras_mgr_init_event_mgr(struct ras_core_context *ras_core) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_event_manager *event_mgr; + struct amdgpu_hive_info *hive; + + hive = amdgpu_get_xgmi_hive(adev); + event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr; + + /* init event manager with node 0 on xgmi system */ + if (!amdgpu_reset_in_recovery(adev)) { + if (!hive || adev->gmc.xgmi.node_id == 0) + ras_mgr_init_event_mgr(event_mgr); + } + + if (hive) + amdgpu_put_xgmi_hive(hive); +} + +static int amdgpu_ras_mgr_init_aca_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_aca_config *aca_cfg = &config->aca_cfg; + + aca_cfg->socket_num_per_hive = MAX_SOCKET_NUM_PER_HIVE; + aca_cfg->aid_num_per_socket = MAX_AID_NUM_PER_SOCKET; + aca_cfg->xcd_num_per_aid = MAX_XCD_NUM_PER_AID; + + return 0; +} + +static int amdgpu_ras_mgr_init_eeprom_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_eeprom_config *eeprom_cfg = &config->eeprom_cfg; + + eeprom_cfg->eeprom_sys_fn = &amdgpu_ras_eeprom_i2c_sys_func; + eeprom_cfg->eeprom_i2c_adapter = adev->pm.ras_eeprom_i2c_bus; + if (eeprom_cfg->eeprom_i2c_adapter) { + const struct i2c_adapter_quirks *quirks = + ((struct i2c_adapter *)eeprom_cfg->eeprom_i2c_adapter)->quirks; + + if (quirks) { + eeprom_cfg->max_i2c_read_len = quirks->max_read_len; + eeprom_cfg->max_i2c_write_len = quirks->max_write_len; + } + } + + /* + * amdgpu_bad_page_threshold is used to config + * the threshold for the number of bad pages. + * -1: Threshold is set to default value + * Driver will issue a warning message when threshold is reached + * and continue runtime services. + * 0: Disable bad page retirement + * Driver will not retire bad pages + * which is intended for debugging purpose. + * -2: Threshold is determined by a formula + * that assumes 1 bad page per 100M of local memory. + * Driver will continue runtime services when threhold is reached. + * 0 < threshold < max number of bad page records in EEPROM, + * A user-defined threshold is set + * Driver will halt runtime services when this custom threshold is reached. + */ + if (amdgpu_bad_page_threshold == NONSTOP_OVER_THRESHOLD) + eeprom_cfg->eeprom_record_threshold_count = + ESTIMATE_BAD_PAGE_THRESHOLD(adev->gmc.mc_vram_size); + else if (amdgpu_bad_page_threshold == WARN_NONSTOP_OVER_THRESHOLD) + eeprom_cfg->eeprom_record_threshold_count = + COUNT_BAD_PAGE_THRESHOLD(RAS_RESERVED_VRAM_SIZE_DEFAULT); + else + eeprom_cfg->eeprom_record_threshold_count = amdgpu_bad_page_threshold; + + eeprom_cfg->eeprom_record_threshold_config = amdgpu_bad_page_threshold; + + return 0; +} + +static int amdgpu_ras_mgr_init_mp1_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_mp1_config *mp1_cfg = &config->mp1_cfg; + int ret = 0; + + switch (config->mp1_ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + mp1_cfg->mp1_sys_fn = &amdgpu_ras_mp1_sys_func_v13_0; + break; + default: + RAS_DEV_ERR(adev, + "The mp1(0x%x) ras config is not right!\n", + config->mp1_ip_version); + ret = -EINVAL; + break; + } + + return ret; +} + +static int amdgpu_ras_mgr_init_nbio_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_nbio_config *nbio_cfg = &config->nbio_cfg; + int ret = 0; + + switch (config->nbio_ip_version) { + case IP_VERSION(7, 9, 0): + nbio_cfg->nbio_sys_fn = &amdgpu_ras_nbio_sys_func_v7_9; + break; + default: + RAS_DEV_ERR(adev, + "The nbio(0x%x) ras config is not right!\n", + config->mp1_ip_version); + ret = -EINVAL; + break; + } + + return ret; +} + +static int amdgpu_ras_mgr_get_ras_psp_system_status(struct ras_core_context *ras_core, + struct ras_psp_sys_status *status) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ta_context *context = &adev->psp.ras_context.context; + + status->initialized = context->initialized; + status->session_id = context->session_id; + status->psp_cmd_mutex = &adev->psp.mutex; + + return 0; +} + +static int amdgpu_ras_mgr_get_ras_ta_init_param(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t nps_mode; + + if (amdgpu_ras_is_poison_mode_supported(adev)) + ras_ta_param->poison_mode_en = 1; + + if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) + ras_ta_param->dgpu_mode = 1; + + ras_ta_param->xcc_mask = adev->gfx.xcc_mask; + ras_ta_param->channel_dis_num = hweight32(adev->gmc.m_half_use) * 2; + + ras_ta_param->active_umc_mask = adev->umc.active_mask; + + if (!amdgpu_ras_mgr_get_curr_nps_mode(adev, &nps_mode)) + ras_ta_param->nps_mode = nps_mode; + + return 0; +} + +const struct ras_psp_sys_func amdgpu_ras_psp_sys_func = { + .get_ras_psp_system_status = amdgpu_ras_mgr_get_ras_psp_system_status, + .get_ras_ta_init_param = amdgpu_ras_mgr_get_ras_ta_init_param, +}; + +static int amdgpu_ras_mgr_init_psp_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_psp_config *psp_cfg = &config->psp_cfg; + + psp_cfg->psp_sys_fn = &amdgpu_ras_psp_sys_func; + + return 0; +} + +static int amdgpu_ras_mgr_init_umc_config(struct amdgpu_device *adev, + struct ras_core_config *config) +{ + struct ras_umc_config *umc_cfg = &config->umc_cfg; + + umc_cfg->umc_vram_type = adev->gmc.vram_type; + + return 0; +} + +static struct ras_core_context *amdgpu_ras_mgr_create_ras_core(struct amdgpu_device *adev) +{ + struct ras_core_config init_config; + + memset(&init_config, 0, sizeof(init_config)); + + init_config.umc_ip_version = amdgpu_ip_version(adev, UMC_HWIP, 0); + init_config.mp1_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0); + init_config.gfx_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); + init_config.nbio_ip_version = amdgpu_ip_version(adev, NBIO_HWIP, 0); + init_config.psp_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0); + + if (init_config.umc_ip_version == IP_VERSION(12, 0, 0)) + init_config.aca_ip_version = IP_VERSION(1, 0, 0); + + init_config.sys_fn = &amdgpu_ras_sys_fn; + init_config.ras_eeprom_supported = true; + init_config.poison_supported = + amdgpu_ras_is_poison_mode_supported(adev); + + amdgpu_ras_mgr_init_aca_config(adev, &init_config); + amdgpu_ras_mgr_init_eeprom_config(adev, &init_config); + amdgpu_ras_mgr_init_mp1_config(adev, &init_config); + amdgpu_ras_mgr_init_nbio_config(adev, &init_config); + amdgpu_ras_mgr_init_psp_config(adev, &init_config); + amdgpu_ras_mgr_init_umc_config(adev, &init_config); + + return ras_core_create(&init_config); +} + +static int amdgpu_ras_mgr_sw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr; + int ret = 0; + + ras_mgr = kzalloc(sizeof(*ras_mgr), GFP_KERNEL); + if (!ras_mgr) + return -EINVAL; + + con->ras_mgr = ras_mgr; + ras_mgr->adev = adev; + + ras_mgr->ras_core = amdgpu_ras_mgr_create_ras_core(adev); + if (!ras_mgr->ras_core) { + RAS_DEV_ERR(adev, "Failed to create ras core!\n"); + ret = -EINVAL; + goto err; + } + + ras_mgr->ras_core->dev = adev; + + amdgpu_ras_process_init(adev); + ras_core_sw_init(ras_mgr->ras_core); + amdgpu_ras_mgr_init_event_mgr(ras_mgr->ras_core); + return 0; + +err: + kfree(ras_mgr); + return ret; +} + +static int amdgpu_ras_mgr_sw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct amdgpu_ras_mgr *ras_mgr = (struct amdgpu_ras_mgr *)con->ras_mgr; + + if (!ras_mgr) + return 0; + + amdgpu_ras_process_fini(adev); + ras_core_sw_fini(ras_mgr->ras_core); + ras_core_destroy(ras_mgr->ras_core); + ras_mgr->ras_core = NULL; + + kfree(con->ras_mgr); + con->ras_mgr = NULL; + + return 0; +} + +static int amdgpu_ras_mgr_hw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + int ret; + + /* Currently only debug mode can enable the ras module + */ + if (!adev->debug_enable_ras_aca) + return 0; + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + ret = ras_core_hw_init(ras_mgr->ras_core); + if (ret) { + RAS_DEV_ERR(adev, "Failed to initialize ras core!\n"); + return ret; + } + + ras_mgr->ras_is_ready = true; + + amdgpu_enable_uniras(adev, true); + + RAS_DEV_INFO(adev, "AMDGPU RAS Is Ready.\n"); + return 0; +} + +static int amdgpu_ras_mgr_hw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + /* Currently only debug mode can enable the ras module + */ + if (!adev->debug_enable_ras_aca) + return 0; + + if (!ras_mgr || !ras_mgr->ras_core) + return -EINVAL; + + ras_core_hw_fini(ras_mgr->ras_core); + + ras_mgr->ras_is_ready = false; + + return 0; +} + +struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(struct amdgpu_device *adev) +{ + if (!adev || !adev->psp.ras_context.ras) + return NULL; + + return (struct amdgpu_ras_mgr *)adev->psp.ras_context.ras->ras_mgr; +} + +static const struct amd_ip_funcs __maybe_unused ras_v1_0_ip_funcs = { + .name = "ras_v1_0", + .sw_init = amdgpu_ras_mgr_sw_init, + .sw_fini = amdgpu_ras_mgr_sw_fini, + .hw_init = amdgpu_ras_mgr_hw_init, + .hw_fini = amdgpu_ras_mgr_hw_fini, +}; + +const struct amdgpu_ip_block_version ras_v1_0_ip_block = { + .type = AMD_IP_BLOCK_TYPE_RAS, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &ras_v1_0_ip_funcs, +}; + +int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core) + return -EPERM; + + if (amdgpu_sriov_vf(adev)) + return -EPERM; + + RAS_DEV_INFO(adev, "Enable amdgpu unified ras!"); + return ras_core_set_status(ras_mgr->ras_core, enable); +} + +bool amdgpu_uniras_enabled(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core) + return false; + + if (amdgpu_sriov_vf(adev)) + return false; + + return ras_core_is_enabled(ras_mgr->ras_core); +} + +static bool amdgpu_ras_mgr_is_ready(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (ras_mgr && ras_mgr->ras_core && ras_mgr->ras_is_ready && + ras_core_is_ready(ras_mgr->ras_core)) + return true; + + return false; +} + +int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return ras_core_handle_nbio_irq(ras_mgr->ras_core, data); +} + +uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev, + enum ras_seqno_type seqno_type) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + int ret; + uint64_t seq_no; + + if (!amdgpu_ras_mgr_is_ready(adev) || + (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)) + return 0; + + seq_no = ras_core_gen_seqno(ras_mgr->ras_core, seqno_type); + + if ((seqno_type == RAS_SEQNO_TYPE_DE) || + (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)) { + ret = ras_core_put_seqno(ras_mgr->ras_core, seqno_type, seq_no); + if (ret) + RAS_DEV_WARN(adev, "There are too many ras interrupts!"); + } + + return seq_no; +} + +int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_ih_info *ih_info = (struct ras_ih_info *)data; + uint64_t seq_no = 0; + int ret = 0; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + if (ih_info && (ih_info->block == AMDGPU_RAS_BLOCK__UMC)) { + if (ras_mgr->ras_core->poison_supported) { + seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_DE); + RAS_DEV_INFO(adev, + "{%llu} RAS poison is created, no user action is needed.\n", + seq_no); + } + + ret = amdgpu_ras_process_handle_umc_interrupt(adev, ih_info); + } else if (ras_mgr->ras_core->poison_supported) { + ret = amdgpu_ras_process_handle_unexpected_interrupt(adev, ih_info); + } else { + RAS_DEV_WARN(adev, + "No RAS interrupt handler for non-UMC block with poison disabled.\n"); + } + + return ret; +} + +int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data) +{ + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return amdgpu_ras_process_handle_consumption_interrupt(adev, data); +} + +int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + return ras_core_update_ecc_info(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + con->gpu_reset_flags |= flags; + return amdgpu_ras_reset_gpu(adev); +} + +bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return false; + + return ras_eeprom_check_safety_watermark(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, + uint32_t *nps_mode) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + uint32_t mode; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EINVAL; + + mode = ras_core_get_curr_nps_mode(ras_mgr->ras_core); + if (!mode || mode > AMDGPU_NPS8_PARTITION_MODE) + return -EINVAL; + + *nps_mode = mode; + + return 0; +} + +bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev, + uint64_t addr) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!amdgpu_ras_mgr_is_ready(adev)) + return false; + + return ras_umc_check_retired_addr(ras_mgr->ras_core, addr); +} + +bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr || !ras_mgr->ras_core || !ras_mgr->ras_is_ready) + return false; + + return ras_core_gpu_is_rma(ras_mgr->ras_core); +} + +int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev, + uint32_t cmd_id, void *input, uint32_t input_size, + void *output, uint32_t out_size) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_cmd_ctx *cmd_ctx; + uint32_t ctx_buf_size = PAGE_SIZE; + int ret; + + if (!amdgpu_ras_mgr_is_ready(adev)) + return -EPERM; + + cmd_ctx = kzalloc(ctx_buf_size, GFP_KERNEL); + if (!cmd_ctx) + return -ENOMEM; + + cmd_ctx->cmd_id = cmd_id; + + memcpy(cmd_ctx->input_buff_raw, input, input_size); + cmd_ctx->input_size = input_size; + cmd_ctx->output_buf_size = ctx_buf_size - sizeof(*cmd_ctx); + + ret = amdgpu_ras_submit_cmd(ras_mgr->ras_core, cmd_ctx); + if (!ret && !cmd_ctx->cmd_res && output && (out_size == cmd_ctx->output_size)) + memcpy(output, cmd_ctx->output_buff_raw, cmd_ctx->output_size); + + kfree(cmd_ctx); + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h new file mode 100644 index 000000000000..42f190a8feb9 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_MGR_H__ +#define __AMDGPU_RAS_MGR_H__ +#include "ras.h" +#include "amdgpu_ras_process.h" + +enum ras_ih_type { + RAS_IH_NONE, + RAS_IH_FROM_BLOCK_CONTROLLER, + RAS_IH_FROM_CONSUMER_CLIENT, + RAS_IH_FROM_FATAL_ERROR, +}; + +struct ras_ih_info { + uint32_t block; + union { + struct amdgpu_iv_entry iv_entry; + struct { + uint16_t pasid; + uint32_t reset; + pasid_notify pasid_fn; + void *data; + }; + }; +}; + +struct amdgpu_ras_mgr { + struct amdgpu_device *adev; + struct ras_core_context *ras_core; + struct delayed_work retire_page_dwork; + struct ras_event_manager ras_event_mgr; + uint64_t last_poison_consumption_seqno; + bool ras_is_ready; +}; + +extern const struct amdgpu_ip_block_version ras_v1_0_ip_block; + +struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context( + struct amdgpu_device *adev); +int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable); +bool amdgpu_uniras_enabled(struct amdgpu_device *adev); +int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data); +int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev); +int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags); +uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev, + enum ras_seqno_type seqno_type); +bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev); +int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, uint32_t *nps_mode); +bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev, + uint64_t addr); +bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev); +int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev, + uint32_t cmd_id, void *input, uint32_t input_size, + void *output, uint32_t out_size); +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c new file mode 100644 index 000000000000..79a51b1603ac --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu_smu.h" +#include "amdgpu_reset.h" +#include "amdgpu_ras_mp1_v13_0.h" + +#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A +#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B + +static int mp1_v13_0_get_valid_bank_count(struct ras_core_context *ras_core, + u32 msg, u32 *count) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + u32 smu_msg; + int ret = 0; + + if (!count) + return -EINVAL; + + smu_msg = (msg == RAS_MP1_MSG_QueryValidMcaCeCount) ? + SMU_MSG_QueryValidMcaCeCount : SMU_MSG_QueryValidMcaCount; + + if (down_read_trylock(&adev->reset_domain->sem)) { + ret = amdgpu_smu_ras_send_msg(adev, smu_msg, 0, count); + up_read(&adev->reset_domain->sem); + } else { + ret = -RAS_CORE_GPU_IN_MODE1_RESET; + } + + if (ret) + *count = 0; + + return ret; +} + +static int mp1_v13_0_dump_valid_bank(struct ras_core_context *ras_core, + u32 msg, u32 idx, u32 reg_idx, u64 *val) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t data[2] = {0, 0}; + uint32_t param; + int ret = 0; + int i, offset; + u32 smu_msg = (msg == RAS_MP1_MSG_McaBankCeDumpDW) ? + SMU_MSG_McaBankCeDumpDW : SMU_MSG_McaBankDumpDW; + + if (down_read_trylock(&adev->reset_domain->sem)) { + offset = reg_idx * 8; + for (i = 0; i < ARRAY_SIZE(data); i++) { + param = ((idx & 0xffff) << 16) | ((offset + (i << 2)) & 0xfffc); + ret = amdgpu_smu_ras_send_msg(adev, smu_msg, param, &data[i]); + if (ret) { + RAS_DEV_ERR(adev, "ACA failed to read register[%d], offset:0x%x\n", + reg_idx, offset); + break; + } + } + up_read(&adev->reset_domain->sem); + + if (!ret) + *val = (uint64_t)data[1] << 32 | data[0]; + } else { + ret = -RAS_CORE_GPU_IN_MODE1_RESET; + } + + return ret; +} + +const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0 = { + .mp1_get_valid_bank_count = mp1_v13_0_get_valid_bank_count, + .mp1_dump_valid_bank = mp1_v13_0_dump_valid_bank, +}; + diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h new file mode 100644 index 000000000000..71c614ae1ae4 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __AMDGPU_RAS_MP1_V13_0_H__ +#define __AMDGPU_RAS_MP1_V13_0_H__ +#include "ras.h" + +extern const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c new file mode 100644 index 000000000000..2783f5875c7c --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_nbio_v7_9.h" +#include "nbio/nbio_7_9_0_offset.h" +#include "nbio/nbio_7_9_0_sh_mask.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" + +static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + /* Dummy function, there is no initialization operation in driver */ + + return 0; +} + +static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for ras_controller_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + /* Dummy function, there is no initialization operation in driver */ + + return 0; +} + +static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = { + .set = nbio_v7_9_set_ras_controller_irq_state, + .process = nbio_v7_9_process_ras_controller_irq, +}; + +static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = { + .set = nbio_v7_9_set_ras_err_event_athub_irq_state, + .process = nbio_v7_9_process_err_event_athub_irq, +}; + +static int nbio_v7_9_init_ras_controller_interrupt(struct ras_core_context *ras_core, bool state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int r; + + /* init the irq funcs */ + adev->nbio.ras_controller_irq.funcs = + &nbio_v7_9_ras_controller_irq_funcs; + adev->nbio.ras_controller_irq.num_types = 1; + + /* register ras controller interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, + &adev->nbio.ras_controller_irq); + + return r; +} + +static int nbio_v7_9_init_ras_err_event_athub_interrupt(struct ras_core_context *ras_core, + bool state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v7_9_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + + return r; +} + +const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9 = { + .set_ras_controller_irq_state = nbio_v7_9_init_ras_controller_interrupt, + .set_ras_err_event_athub_irq_state = nbio_v7_9_init_ras_err_event_athub_interrupt, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h new file mode 100644 index 000000000000..272259e9a0e7 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_RAS_NBIO_V7_9_H__ +#define __AMDGPU_RAS_NBIO_V7_9_H__ + +extern const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9; + +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c new file mode 100644 index 000000000000..6727fc9a2b9b --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_reset.h" +#include "amdgpu_xgmi.h" +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras_process.h" + +#define RAS_MGR_RETIRE_PAGE_INTERVAL 100 + +static void ras_process_retire_page_dwork(struct work_struct *work) +{ + struct amdgpu_ras_mgr *ras_mgr = + container_of(work, struct amdgpu_ras_mgr, retire_page_dwork.work); + struct amdgpu_device *adev = ras_mgr->adev; + int ret; + + if (amdgpu_ras_is_rma(adev)) + return; + + /* If gpu reset is ongoing, delay retiring the bad pages */ + if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) { + schedule_delayed_work(&ras_mgr->retire_page_dwork, + msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL * 3)); + return; + } + + ret = ras_umc_handle_bad_pages(ras_mgr->ras_core, NULL); + if (!ret) + schedule_delayed_work(&ras_mgr->retire_page_dwork, + msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL)); +} + +int amdgpu_ras_process_init(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + INIT_DELAYED_WORK(&ras_mgr->retire_page_dwork, ras_process_retire_page_dwork); + + return 0; +} + +int amdgpu_ras_process_fini(struct amdgpu_device *adev) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + /* Save all cached bad pages to eeprom */ + flush_delayed_work(&ras_mgr->retire_page_dwork); + cancel_delayed_work_sync(&ras_mgr->retire_page_dwork); + return 0; +} + +int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + + if (!ras_mgr->ras_core) + return -EINVAL; + + return ras_process_add_interrupt_req(ras_mgr->ras_core, NULL, true); +} + +int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, void *data) +{ + amdgpu_ras_set_fed(adev, true); + return amdgpu_ras_mgr_reset_gpu(adev, AMDGPU_RAS_GPU_RESET_MODE1_RESET); +} + +int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_ih_info *ih_info = (struct ras_ih_info *)data; + struct ras_event_req req; + uint64_t seqno; + + if (!ih_info) + return -EINVAL; + + memset(&req, 0, sizeof(req)); + req.block = ih_info->block; + req.data = ih_info->data; + req.pasid = ih_info->pasid; + req.pasid_fn = ih_info->pasid_fn; + req.reset = ih_info->reset; + + seqno = ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, false); + + /* When the ACA register cannot be read from FW, the poison + * consumption seqno in the fifo will not pop up, so it is + * necessary to check whether the seqno is the previous seqno. + */ + if (seqno == ras_mgr->last_poison_consumption_seqno) { + /* Pop and discard the previous seqno */ + ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, true); + seqno = ras_core_get_seqno(ras_mgr->ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, false); + } + ras_mgr->last_poison_consumption_seqno = seqno; + req.seqno = seqno; + + return ras_process_add_interrupt_req(ras_mgr->ras_core, &req, false); +} diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h new file mode 100644 index 000000000000..b9502bd21beb --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (c) 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef __AMDGPU_RAS_PROCESS_H__ +#define __AMDGPU_RAS_PROCESS_H__ +#include "ras_process.h" +#include "amdgpu_ras_mgr.h" + +enum ras_ih_type; +int amdgpu_ras_process_init(struct amdgpu_device *adev); +int amdgpu_ras_process_fini(struct amdgpu_device *adev); +int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, + void *data); +int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, + void *data); +int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, + void *data); +#endif diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c new file mode 100644 index 000000000000..f21cd55a25be --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras_sys.h" +#include "amdgpu_ras_mgr.h" +#include "amdgpu_ras.h" +#include "amdgpu_reset.h" + +static int amdgpu_ras_sys_detect_fatal_event(struct ras_core_context *ras_core, void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret; + uint64_t seq_no; + + ret = amdgpu_ras_global_ras_isr(adev); + if (ret) + return ret; + + seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_UE); + RAS_DEV_INFO(adev, + "{%llu} Uncorrectable hardware error(ERREVENT_ATHUB_INTERRUPT) detected!\n", + seq_no); + + return amdgpu_ras_process_handle_unexpected_interrupt(adev, data); +} + +static int amdgpu_ras_sys_poison_consumption_event(struct ras_core_context *ras_core, + void *data) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct ras_event_req *req = (struct ras_event_req *)data; + pasid_notify pasid_fn; + + if (!req) + return -EINVAL; + + if (req->pasid_fn) { + pasid_fn = (pasid_notify)req->pasid_fn; + pasid_fn(adev, req->pasid, req->data); + } + + return 0; +} + +static int amdgpu_ras_sys_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t *seqno) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev); + struct ras_event_manager *event_mgr; + struct ras_event_state *event_state; + struct amdgpu_hive_info *hive; + enum ras_event_type event_type; + uint64_t seq_no; + + if (!ras_mgr || !seqno || + (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)) + return -EINVAL; + + switch (seqno_type) { + case RAS_SEQNO_TYPE_UE: + event_type = RAS_EVENT_TYPE_FATAL; + break; + case RAS_SEQNO_TYPE_CE: + case RAS_SEQNO_TYPE_DE: + event_type = RAS_EVENT_TYPE_POISON_CREATION; + break; + case RAS_SEQNO_TYPE_POISON_CONSUMPTION: + event_type = RAS_EVENT_TYPE_POISON_CONSUMPTION; + break; + default: + event_type = RAS_EVENT_TYPE_INVALID; + break; + } + + hive = amdgpu_get_xgmi_hive(adev); + event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr; + event_state = &event_mgr->event_state[event_type]; + if ((event_type == RAS_EVENT_TYPE_FATAL) && amdgpu_ras_in_recovery(adev)) { + seq_no = event_state->last_seqno; + } else { + seq_no = atomic64_inc_return(&event_mgr->seqno); + event_state->last_seqno = seq_no; + atomic64_inc(&event_state->count); + } + amdgpu_put_xgmi_hive(hive); + + *seqno = seq_no; + return 0; + +} + +static int amdgpu_ras_sys_event_notifier(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data) +{ + struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(ras_core->dev); + int ret = 0; + + switch (event_id) { + case RAS_EVENT_ID__BAD_PAGE_DETECTED: + schedule_delayed_work(&ras_mgr->retire_page_dwork, 0); + break; + case RAS_EVENT_ID__POISON_CONSUMPTION: + amdgpu_ras_sys_poison_consumption_event(ras_core, data); + break; + case RAS_EVENT_ID__RESERVE_BAD_PAGE: + ret = amdgpu_ras_reserve_page(ras_core->dev, *(uint64_t *)data); + break; + case RAS_EVENT_ID__FATAL_ERROR_DETECTED: + ret = amdgpu_ras_sys_detect_fatal_event(ras_core, data); + break; + case RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM: + ret = amdgpu_dpm_send_hbm_bad_pages_num(ras_core->dev, *(uint32_t *)data); + break; + case RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP: + ret = amdgpu_dpm_send_hbm_bad_channel_flag(ras_core->dev, *(uint32_t *)data); + break; + case RAS_EVENT_ID__DEVICE_RMA: + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_RMA, NULL, NULL); + ret = amdgpu_dpm_send_rma_reason(ras_core->dev); + break; + case RAS_EVENT_ID__RESET_GPU: + ret = amdgpu_ras_mgr_reset_gpu(ras_core->dev, *(uint32_t *)data); + break; + default: + RAS_DEV_WARN(ras_core->dev, "Invalid ras notify event:%d\n", event_id); + break; + } + + return ret; +} + +static u64 amdgpu_ras_sys_get_utc_second_timestamp(struct ras_core_context *ras_core) +{ + return ktime_get_real_seconds(); +} + +static int amdgpu_ras_sys_check_gpu_status(struct ras_core_context *ras_core, + uint32_t *status) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + uint32_t gpu_status = 0; + + if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) + gpu_status |= RAS_GPU_STATUS__IN_RESET; + + if (amdgpu_sriov_vf(adev)) + gpu_status |= RAS_GPU_STATUS__IS_VF; + + *status = gpu_status; + + return 0; +} + +static int amdgpu_ras_sys_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + + dev_info->device_id = adev->pdev->device; + dev_info->vendor_id = adev->pdev->vendor; + dev_info->socket_id = adev->smuio.funcs->get_socket_id(adev); + + return 0; +} + +static int amdgpu_ras_sys_gpu_reset_lock(struct ras_core_context *ras_core, + bool down, bool try) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + int ret = 0; + + if (down && try) + ret = down_read_trylock(&adev->reset_domain->sem); + else if (down) + down_read(&adev->reset_domain->sem); + else + up_read(&adev->reset_domain->sem); + + return ret; +} + +static bool amdgpu_ras_sys_detect_ras_interrupt(struct ras_core_context *ras_core) +{ + return !!atomic_read(&amdgpu_ras_in_intr); +} + +static int amdgpu_ras_sys_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev; + struct psp_context *psp = &adev->psp; + struct psp_ring *psp_ring; + struct ta_mem_context *mem_ctx; + + if (mem_type == GPU_MEM_TYPE_RAS_PSP_RING) { + psp_ring = &psp->km_ring; + gpu_mem->mem_bo = adev->firmware.rbuf; + gpu_mem->mem_size = psp_ring->ring_size; + gpu_mem->mem_mc_addr = psp_ring->ring_mem_mc_addr; + gpu_mem->mem_cpu_addr = psp_ring->ring_mem; + } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_CMD) { + gpu_mem->mem_bo = psp->cmd_buf_bo; + gpu_mem->mem_size = PSP_CMD_BUFFER_SIZE; + gpu_mem->mem_mc_addr = psp->cmd_buf_mc_addr; + gpu_mem->mem_cpu_addr = psp->cmd_buf_mem; + } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_FENCE) { + gpu_mem->mem_bo = psp->fence_buf_bo; + gpu_mem->mem_size = PSP_FENCE_BUFFER_SIZE; + gpu_mem->mem_mc_addr = psp->fence_buf_mc_addr; + gpu_mem->mem_cpu_addr = psp->fence_buf; + } else if (mem_type == GPU_MEM_TYPE_RAS_TA_FW) { + gpu_mem->mem_bo = psp->fw_pri_bo; + gpu_mem->mem_size = PSP_1_MEG; + gpu_mem->mem_mc_addr = psp->fw_pri_mc_addr; + gpu_mem->mem_cpu_addr = psp->fw_pri_buf; + } else if (mem_type == GPU_MEM_TYPE_RAS_TA_CMD) { + mem_ctx = &psp->ras_context.context.mem_context; + gpu_mem->mem_bo = mem_ctx->shared_bo; + gpu_mem->mem_size = mem_ctx->shared_mem_size; + gpu_mem->mem_mc_addr = mem_ctx->shared_mc_addr; + gpu_mem->mem_cpu_addr = mem_ctx->shared_buf; + } else { + return -EINVAL; + } + + if (!gpu_mem->mem_bo || !gpu_mem->mem_size || + !gpu_mem->mem_mc_addr || !gpu_mem->mem_cpu_addr) { + RAS_DEV_ERR(ras_core->dev, "The ras psp gpu memory is invalid!\n"); + return -ENOMEM; + } + + return 0; +} + +static int amdgpu_ras_sys_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + + return 0; +} + +const struct ras_sys_func amdgpu_ras_sys_fn = { + .ras_notifier = amdgpu_ras_sys_event_notifier, + .get_utc_second_timestamp = amdgpu_ras_sys_get_utc_second_timestamp, + .gen_seqno = amdgpu_ras_sys_gen_seqno, + .check_gpu_status = amdgpu_ras_sys_check_gpu_status, + .get_device_system_info = amdgpu_ras_sys_get_device_system_info, + .gpu_reset_lock = amdgpu_ras_sys_gpu_reset_lock, + .detect_ras_interrupt = amdgpu_ras_sys_detect_ras_interrupt, + .get_gpu_mem = amdgpu_ras_sys_get_gpu_mem, + .put_gpu_mem = amdgpu_ras_sys_put_gpu_mem, +}; diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h new file mode 100644 index 000000000000..8156531a7b63 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_SYS_H__ +#define __RAS_SYS_H__ +#include +#include +#include +#include +#include "amdgpu.h" + +#define RAS_DEV_ERR(device, fmt, ...) \ + do { \ + if (device) \ + dev_err(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_ERR fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_WARN(device, fmt, ...) \ + do { \ + if (device) \ + dev_warn(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_WARNING fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_INFO(device, fmt, ...) \ + do { \ + if (device) \ + dev_info(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_INFO fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_DEV_DBG(device, fmt, ...) \ + do { \ + if (device) \ + dev_dbg(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \ + else \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ + } while (0) + +#define RAS_INFO(fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__) + +#define RAS_DEV_RREG32_SOC15(dev, ip, inst, reg) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + 0, ip##_HWIP, inst); \ +}) + +#define RAS_DEV_WREG32_SOC15(dev, ip, inst, reg, value) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \ + value, 0, ip##_HWIP, inst); \ +}) + +/* GET_INST returns the physical instance corresponding to a logical instance */ +#define RAS_GET_INST(dev, ip, inst) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + adev->ip_map.logical_to_dev_inst ? \ + adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst; \ +}) + +#define RAS_GET_MASK(dev, ip, mask) \ +({ \ + struct amdgpu_device *adev = (struct amdgpu_device *)dev; \ + (adev->ip_map.logical_to_dev_mask ? \ + adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask); \ +}) + +static inline void *ras_radix_tree_delete_iter(struct radix_tree_root *root, void *iter) +{ + return radix_tree_delete(root, ((struct radix_tree_iter *)iter)->index); +} + +static inline long ras_wait_event_interruptible_timeout(void *wq_head, + int (*condition)(void *param), void *param, unsigned int timeout) +{ + return wait_event_interruptible_timeout(*(wait_queue_head_t *)wq_head, + condition(param), timeout); +} + +extern const struct ras_sys_func amdgpu_ras_sys_fn; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile index e69de29bb2d1..e826a1f86424 100644 --- a/drivers/gpu/drm/amd/ras/rascore/Makefile +++ b/drivers/gpu/drm/amd/ras/rascore/Makefile @@ -0,0 +1,44 @@ +# +# Copyright 2025 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +RAS_CORE_FILES = ras_core.o \ + ras_mp1.o \ + ras_mp1_v13_0.o \ + ras_aca.o \ + ras_aca_v1_0.o \ + ras_eeprom.o \ + ras_umc.o \ + ras_umc_v12_0.o \ + ras_cmd.o \ + ras_gfx.o \ + ras_gfx_v9_0.o \ + ras_process.o \ + ras_nbio.o \ + ras_nbio_v7_9.o \ + ras_log_ring.o \ + ras_cper.o \ + ras_psp.o \ + ras_psp_v13_0.o + + +RAS_CORE = $(addprefix $(AMD_GPU_RAS_PATH)/rascore/,$(RAS_CORE_FILES)) + +AMD_GPU_RAS_FILES += $(RAS_CORE) diff --git a/drivers/gpu/drm/amd/ras/rascore/ras.h b/drivers/gpu/drm/amd/ras/rascore/ras.h new file mode 100644 index 000000000000..fa224b36e3f2 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_H__ +#define __RAS_H__ +#include "ras_sys.h" +#include "ras_umc.h" +#include "ras_aca.h" +#include "ras_eeprom.h" +#include "ras_core_status.h" +#include "ras_process.h" +#include "ras_gfx.h" +#include "ras_cmd.h" +#include "ras_nbio.h" +#include "ras_mp1.h" +#include "ras_psp.h" +#include "ras_log_ring.h" + +#define RAS_HW_ERR "[Hardware Error]: " + +#define RAS_GPU_PAGE_SHIFT 12 +#define RAS_ADDR_TO_PFN(addr) ((addr) >> RAS_GPU_PAGE_SHIFT) +#define RAS_PFN_TO_ADDR(pfn) ((pfn) << RAS_GPU_PAGE_SHIFT) + +#define RAS_CORE_RESET_GPU 0x10000 + +#define GPU_RESET_CAUSE_POISON (RAS_CORE_RESET_GPU | 0x0001) +#define GPU_RESET_CAUSE_FATAL (RAS_CORE_RESET_GPU | 0x0002) +#define GPU_RESET_CAUSE_RMA (RAS_CORE_RESET_GPU | 0x0004) + +enum ras_block_id { + RAS_BLOCK_ID__UMC = 0, + RAS_BLOCK_ID__SDMA, + RAS_BLOCK_ID__GFX, + RAS_BLOCK_ID__MMHUB, + RAS_BLOCK_ID__ATHUB, + RAS_BLOCK_ID__PCIE_BIF, + RAS_BLOCK_ID__HDP, + RAS_BLOCK_ID__XGMI_WAFL, + RAS_BLOCK_ID__DF, + RAS_BLOCK_ID__SMN, + RAS_BLOCK_ID__SEM, + RAS_BLOCK_ID__MP0, + RAS_BLOCK_ID__MP1, + RAS_BLOCK_ID__FUSE, + RAS_BLOCK_ID__MCA, + RAS_BLOCK_ID__VCN, + RAS_BLOCK_ID__JPEG, + RAS_BLOCK_ID__IH, + RAS_BLOCK_ID__MPIO, + + RAS_BLOCK_ID__LAST +}; + +enum ras_ecc_err_type { + RAS_ECC_ERR__NONE = 0, + RAS_ECC_ERR__PARITY = 1, + RAS_ECC_ERR__SINGLE_CORRECTABLE = 2, + RAS_ECC_ERR__MULTI_UNCORRECTABLE = 4, + RAS_ECC_ERR__POISON = 8, +}; + +enum ras_err_type { + RAS_ERR_TYPE__UE = 0, + RAS_ERR_TYPE__CE, + RAS_ERR_TYPE__DE, + RAS_ERR_TYPE__LAST +}; + +enum ras_seqno_type { + RAS_SEQNO_TYPE_INVALID = 0, + RAS_SEQNO_TYPE_UE, + RAS_SEQNO_TYPE_CE, + RAS_SEQNO_TYPE_DE, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, + RAS_SEQNO_TYPE_COUNT_MAX, +}; + +enum ras_seqno_fifo { + SEQNO_FIFO_INVALID = 0, + SEQNO_FIFO_POISON_CREATION, + SEQNO_FIFO_POISON_CONSUMPTION, + SEQNO_FIFO_COUNT_MAX +}; + +enum ras_notify_event { + RAS_EVENT_ID__NONE, + RAS_EVENT_ID__BAD_PAGE_DETECTED, + RAS_EVENT_ID__POISON_CONSUMPTION, + RAS_EVENT_ID__RESERVE_BAD_PAGE, + RAS_EVENT_ID__DEVICE_RMA, + RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + RAS_EVENT_ID__FATAL_ERROR_DETECTED, + RAS_EVENT_ID__RESET_GPU, + RAS_EVENT_ID__RESET_VF, +}; + +enum ras_gpu_status { + RAS_GPU_STATUS__NOT_READY = 0, + RAS_GPU_STATUS__READY = 0x1, + RAS_GPU_STATUS__IN_RESET = 0x2, + RAS_GPU_STATUS__IS_RMA = 0x4, + RAS_GPU_STATUS__IS_VF = 0x8, +}; + +struct ras_core_context; +struct ras_bank_ecc; +struct ras_umc; +struct ras_aca; +struct ras_process; +struct ras_nbio; +struct ras_log_ring; +struct ras_psp; + +struct ras_mp1_sys_func { + int (*mp1_get_valid_bank_count)(struct ras_core_context *ras_core, + u32 msg, u32 *count); + int (*mp1_dump_valid_bank)(struct ras_core_context *ras_core, + u32 msg, u32 idx, u32 reg_idx, u64 *val); +}; + +struct ras_eeprom_sys_func { + int (*eeprom_i2c_xfer)(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 buf_size, bool read); + int (*update_eeprom_i2c_config)(struct ras_core_context *ras_core); +}; + +struct ras_nbio_sys_func { + int (*set_ras_controller_irq_state)(struct ras_core_context *ras_core, + bool state); + int (*set_ras_err_event_athub_irq_state)(struct ras_core_context *ras_core, + bool state); +}; + +struct ras_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + long tm_year; +}; + +struct device_system_info { + uint32_t device_id; + uint32_t vendor_id; + uint32_t socket_id; +}; + +enum gpu_mem_type { + GPU_MEM_TYPE_DEFAULT, + GPU_MEM_TYPE_RAS_PSP_RING, + GPU_MEM_TYPE_RAS_PSP_CMD, + GPU_MEM_TYPE_RAS_PSP_FENCE, + GPU_MEM_TYPE_RAS_TA_FW, + GPU_MEM_TYPE_RAS_TA_CMD, +}; + +struct ras_psp_sys_func { + int (*get_ras_psp_system_status)(struct ras_core_context *ras_core, + struct ras_psp_sys_status *status); + int (*get_ras_ta_init_param)(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param); +}; + +struct ras_sys_func { + int (*gpu_reset_lock)(struct ras_core_context *ras_core, + bool down, bool try); + int (*check_gpu_status)(struct ras_core_context *ras_core, + uint32_t *status); + int (*gen_seqno)(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t *seqno); + int (*async_handle_ras_event)(struct ras_core_context *ras_core, void *data); + int (*ras_notifier)(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data); + u64 (*get_utc_second_timestamp)(struct ras_core_context *ras_core); + int (*get_device_system_info)(struct ras_core_context *ras_core, + struct device_system_info *dev_info); + bool (*detect_ras_interrupt)(struct ras_core_context *ras_core); + int (*get_gpu_mem)(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); + int (*put_gpu_mem)(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +}; + +struct ras_ecc_count { + uint64_t new_ce_count; + uint64_t total_ce_count; + uint64_t new_ue_count; + uint64_t total_ue_count; + uint64_t new_de_count; + uint64_t total_de_count; +}; + +struct ras_bank_ecc { + uint32_t nps; + uint64_t seq_no; + uint64_t status; + uint64_t ipid; + uint64_t addr; +}; + +struct ras_bank_ecc_node { + struct list_head node; + struct ras_bank_ecc ecc; +}; + +struct ras_aca_config { + u32 socket_num_per_hive; + u32 aid_num_per_socket; + u32 xcd_num_per_aid; +}; + +struct ras_mp1_config { + const struct ras_mp1_sys_func *mp1_sys_fn; +}; + +struct ras_nbio_config { + const struct ras_nbio_sys_func *nbio_sys_fn; +}; + +struct ras_psp_config { + const struct ras_psp_sys_func *psp_sys_fn; +}; + +struct ras_umc_config { + uint32_t umc_vram_type; +}; + +struct ras_eeprom_config { + const struct ras_eeprom_sys_func *eeprom_sys_fn; + int eeprom_record_threshold_config; + uint32_t eeprom_record_threshold_count; + void *eeprom_i2c_adapter; + u32 eeprom_i2c_addr; + u32 eeprom_i2c_port; + u16 max_i2c_read_len; + u16 max_i2c_write_len; +}; + +struct ras_core_config { + u32 aca_ip_version; + u32 umc_ip_version; + u32 mp1_ip_version; + u32 gfx_ip_version; + u32 nbio_ip_version; + u32 psp_ip_version; + + bool poison_supported; + bool ras_eeprom_supported; + const struct ras_sys_func *sys_fn; + + struct ras_aca_config aca_cfg; + struct ras_mp1_config mp1_cfg; + struct ras_nbio_config nbio_cfg; + struct ras_psp_config psp_cfg; + struct ras_eeprom_config eeprom_cfg; + struct ras_umc_config umc_cfg; +}; + +struct ras_core_context { + void *dev; + struct ras_core_config *config; + u32 socket_num_per_hive; + u32 aid_num_per_socket; + u32 xcd_num_per_aid; + int max_ue_banks_per_query; + int max_ce_banks_per_query; + struct ras_aca ras_aca; + + bool ras_eeprom_supported; + struct ras_eeprom_control ras_eeprom; + + struct ras_psp ras_psp; + struct ras_umc ras_umc; + struct ras_nbio ras_nbio; + struct ras_gfx ras_gfx; + struct ras_mp1 ras_mp1; + struct ras_process ras_proc; + struct ras_cmd_mgr ras_cmd; + struct ras_log_ring ras_log_ring; + + const struct ras_sys_func *sys_fn; + + /* is poison mode supported */ + bool poison_supported; + + bool is_rma; + bool is_initialized; + + struct kfifo de_seqno_fifo; + struct kfifo consumption_seqno_fifo; + spinlock_t seqno_lock; + + bool ras_core_enabled; +}; + +struct ras_core_context *ras_core_create(struct ras_core_config *init_config); +void ras_core_destroy(struct ras_core_context *ras_core); +int ras_core_sw_init(struct ras_core_context *ras_core); +int ras_core_sw_fini(struct ras_core_context *ras_core); +int ras_core_hw_init(struct ras_core_context *ras_core); +int ras_core_hw_fini(struct ras_core_context *ras_core); +bool ras_core_is_ready(struct ras_core_context *ras_core); +uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type); +uint64_t ras_core_get_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, bool pop); + +int ras_core_put_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t seqno); + +int ras_core_update_ecc_info(struct ras_core_context *ras_core); +int ras_core_query_block_ecc_data(struct ras_core_context *ras_core, + enum ras_block_id block, struct ras_ecc_count *ecc_count); + +bool ras_core_gpu_in_reset(struct ras_core_context *ras_core); +bool ras_core_gpu_is_rma(struct ras_core_context *ras_core); +bool ras_core_gpu_is_vf(struct ras_core_context *ras_core); +bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data); +int ras_core_handle_fatal_error(struct ras_core_context *ras_core); + +uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core); +const char *ras_core_get_ras_block_name(enum ras_block_id block_id); +int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core, + uint64_t timestamp, struct ras_time *tm); + +int ras_core_set_status(struct ras_core_context *ras_core, bool enable); +bool ras_core_is_enabled(struct ras_core_context *ras_core); +uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core); +int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa); +bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core); +int ras_core_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +int ras_core_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem); +bool ras_core_check_safety_watermark(struct ras_core_context *ras_core); +int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core); +void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core); +void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core); +int ras_core_event_notify(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data); +int ras_core_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c new file mode 100644 index 000000000000..e433c70d2989 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c @@ -0,0 +1,672 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_aca.h" +#include "ras_aca_v1_0.h" +#include "ras_mp1_v13_0.h" + +#define ACA_MARK_FATAL_FLAG 0x100 +#define ACA_MARK_UE_READ_FLAG 0x1 + +#define blk_name(block_id) ras_core_get_ras_block_name(block_id) + +static struct aca_regs_dump { + const char *name; + int reg_idx; +} aca_regs[] = { + {"CONTROL", ACA_REG_IDX__CTL}, + {"STATUS", ACA_REG_IDX__STATUS}, + {"ADDR", ACA_REG_IDX__ADDR}, + {"MISC", ACA_REG_IDX__MISC0}, + {"CONFIG", ACA_REG_IDX__CONFG}, + {"IPID", ACA_REG_IDX__IPID}, + {"SYND", ACA_REG_IDX__SYND}, + {"DESTAT", ACA_REG_IDX__DESTAT}, + {"DEADDR", ACA_REG_IDX__DEADDR}, + {"CONTROL_MASK", ACA_REG_IDX__CTL_MASK}, +}; + + +static void aca_report_ecc_info(struct ras_core_context *ras_core, + u64 seq_no, u32 blk, u32 skt, u32 aid, + struct aca_aid_ecc *aid_ecc, + struct aca_bank_ecc *new_ecc) +{ + struct aca_ecc_count ecc_count = {0}; + + ecc_count.new_ue_count = new_ecc->ue_count; + ecc_count.new_de_count = new_ecc->de_count; + ecc_count.new_ce_count = new_ecc->ce_count; + if (blk == RAS_BLOCK_ID__GFX) { + struct aca_ecc_count *xcd_ecc; + int xcd_id; + + for (xcd_id = 0; xcd_id < aid_ecc->xcd.xcd_num; xcd_id++) { + xcd_ecc = &aid_ecc->xcd.xcd[xcd_id].ecc_err; + ecc_count.total_ue_count += xcd_ecc->total_ue_count; + ecc_count.total_de_count += xcd_ecc->total_de_count; + ecc_count.total_ce_count += xcd_ecc->total_ce_count; + } + } else { + ecc_count.total_ue_count = aid_ecc->ecc_err.total_ue_count; + ecc_count.total_de_count = aid_ecc->ecc_err.total_de_count; + ecc_count.total_ce_count = aid_ecc->ecc_err.total_ce_count; + } + + if (ecc_count.new_ue_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new uncorrectable hardware errors detected in %s block\n", + seq_no, skt, aid, ecc_count.new_ue_count, blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u uncorrectable hardware errors detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_ue_count, blk_name(blk)); + } + + if (ecc_count.new_de_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new %s detected in %s block\n", + seq_no, skt, aid, ecc_count.new_de_count, + (blk == RAS_BLOCK_ID__UMC) ? + "deferred hardware errors" : "poison consumption", + blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u %s detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_de_count, + (blk == RAS_BLOCK_ID__UMC) ? + "deferred hardware errors" : "poison consumption", + blk_name(blk)); + } + + if (ecc_count.new_ce_count) { + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u new correctable hardware errors detected in %s block\n", + seq_no, skt, aid, ecc_count.new_ce_count, blk_name(blk)); + RAS_DEV_INFO(ras_core->dev, + "{%llu} socket: %d, die: %d, %u correctable hardware errors detected in total in %s block\n", + seq_no, skt, aid, ecc_count.total_ce_count, blk_name(blk)); + } +} + +static void aca_bank_log(struct ras_core_context *ras_core, + int idx, int total, struct aca_bank_reg *bank, + struct aca_bank_ecc *bank_ecc) +{ + int i; + + RAS_DEV_INFO(ras_core->dev, + "{%llu}" RAS_HW_ERR "Accelerator Check Architecture events logged\n", + bank->seq_no); + /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ + for (i = 0; i < ARRAY_SIZE(aca_regs); i++) + RAS_DEV_INFO(ras_core->dev, + "{%llu}" RAS_HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", + bank->seq_no, idx + 1, total, + aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); +} + +static void aca_log_bank_data(struct ras_core_context *ras_core, + struct aca_bank_reg *bank, struct aca_bank_ecc *bank_ecc, + struct ras_log_batch_tag *batch) +{ + if (bank_ecc->ue_count) + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_UE, bank->regs, batch); + else if (bank_ecc->de_count) + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_DE, bank->regs, batch); + else + ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_CE, bank->regs, batch); +} + +static int aca_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + return ras_mp1_get_bank_count(ras_core, type, count); +} + +static bool aca_match_bank(struct aca_block *aca_blk, struct aca_bank_reg *bank) +{ + const struct aca_bank_hw_ops *bank_ops; + + if (!aca_blk->blk_info) + return false; + + bank_ops = &aca_blk->blk_info->bank_ops; + if (!bank_ops->bank_match) + return false; + + return bank_ops->bank_match(aca_blk, bank); +} + +static int aca_parse_bank(struct ras_core_context *ras_core, + struct aca_block *aca_blk, + struct aca_bank_reg *bank, + struct aca_bank_ecc *ecc) +{ + const struct aca_bank_hw_ops *bank_ops = &aca_blk->blk_info->bank_ops; + + if (!bank_ops || !bank_ops->bank_parse) + return -RAS_CORE_NOT_SUPPORTED; + + return bank_ops->bank_parse(ras_core, aca_blk, bank, ecc); +} + +static int aca_check_block_ecc_info(struct ras_core_context *ras_core, + struct aca_block *aca_blk, struct aca_ecc_info *info) +{ + if (info->socket_id >= aca_blk->ecc.socket_num_per_hive) { + RAS_DEV_ERR(ras_core->dev, + "Socket id (%d) is out of config! max:%u\n", + info->socket_id, aca_blk->ecc.socket_num_per_hive); + return -ENODATA; + } + + if (info->die_id >= aca_blk->ecc.socket[info->socket_id].aid_num) { + RAS_DEV_ERR(ras_core->dev, + "Die id (%d) is out of config! max:%u\n", + info->die_id, aca_blk->ecc.socket[info->socket_id].aid_num); + return -ENODATA; + } + + if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) && + (info->xcd_id >= + aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num)) { + RAS_DEV_ERR(ras_core->dev, + "Xcd id (%d) is out of config! max:%u\n", + info->xcd_id, + aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num); + return -ENODATA; + } + + return 0; +} + +static int aca_log_bad_bank(struct ras_core_context *ras_core, + struct aca_block *aca_blk, struct aca_bank_reg *bank, + struct aca_bank_ecc *bank_ecc) +{ + struct aca_ecc_info *info; + struct aca_ecc_count *ecc_err; + struct aca_aid_ecc *aid_ecc; + int ret; + + info = &bank_ecc->bank_info; + + ret = aca_check_block_ecc_info(ras_core, aca_blk, info); + if (ret) + return ret; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aid_ecc = &aca_blk->ecc.socket[info->socket_id].aid[info->die_id]; + if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) + ecc_err = &aid_ecc->xcd.xcd[info->xcd_id].ecc_err; + else + ecc_err = &aid_ecc->ecc_err; + + ecc_err->new_ce_count += bank_ecc->ce_count; + ecc_err->total_ce_count += bank_ecc->ce_count; + ecc_err->new_ue_count += bank_ecc->ue_count; + ecc_err->total_ue_count += bank_ecc->ue_count; + ecc_err->new_de_count += bank_ecc->de_count; + ecc_err->total_de_count += bank_ecc->de_count; + mutex_unlock(&ras_core->ras_aca.aca_lock); + + if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) && + bank_ecc->de_count) { + struct ras_bank_ecc ras_ecc = {0}; + + ras_ecc.nps = ras_core_get_curr_nps_mode(ras_core); + ras_ecc.addr = bank_ecc->bank_info.addr; + ras_ecc.ipid = bank_ecc->bank_info.ipid; + ras_ecc.status = bank_ecc->bank_info.status; + ras_ecc.seq_no = bank->seq_no; + + if (ras_core_gpu_in_reset(ras_core)) + ras_umc_log_bad_bank_pending(ras_core, &ras_ecc); + else + ras_umc_log_bad_bank(ras_core, &ras_ecc); + } + + aca_report_ecc_info(ras_core, + bank->seq_no, aca_blk->blk_info->ras_block_id, info->socket_id, info->die_id, + &aca_blk->ecc.socket[info->socket_id].aid[info->die_id], bank_ecc); + + return 0; +} + +static struct aca_block *aca_get_bank_aca_block(struct ras_core_context *ras_core, + struct aca_bank_reg *bank) +{ + int i = 0; + + for (i = 0; i < RAS_BLOCK_ID__LAST; i++) + if (aca_match_bank(&ras_core->ras_aca.aca_blk[i], bank)) + return &ras_core->ras_aca.aca_blk[i]; + + return NULL; +} + +static int aca_dump_bank(struct ras_core_context *ras_core, u32 ecc_type, + int idx, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + int i, ret, reg_cnt; + + reg_cnt = min_t(int, 16, ARRAY_SIZE(bank->regs)); + for (i = 0; i < reg_cnt; i++) { + ret = ras_mp1_dump_bank(ras_core, ecc_type, idx, i, &bank->regs[i]); + if (ret) + return ret; + } + + return 0; +} + +static uint64_t aca_get_bank_seqno(struct ras_core_context *ras_core, + enum ras_err_type err_type, struct aca_block *aca_blk, + struct aca_bank_ecc *bank_ecc) +{ + uint64_t seq_no = 0; + + if (bank_ecc->de_count) { + if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_DE, true); + else + seq_no = ras_core_get_seqno(ras_core, + RAS_SEQNO_TYPE_POISON_CONSUMPTION, true); + } else if (bank_ecc->ue_count) { + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_UE, true); + } else { + seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_CE, true); + } + + return seq_no; +} + +static bool aca_dup_update_ue_in_fatal(struct ras_core_context *ras_core, + u32 ecc_type) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (ecc_type != RAS_ERR_TYPE__UE) + return false; + + if (aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) { + if (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG) + return true; + + aca->ue_updated_mark |= ACA_MARK_UE_READ_FLAG; + } + + return false; +} + +void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (!aca) + return; + + aca->ue_updated_mark |= ACA_MARK_FATAL_FLAG; +} + +void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core) +{ + struct ras_aca *aca = &ras_core->ras_aca; + + if (!aca) + return; + + if ((aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) && + (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG)) + aca->ue_updated_mark = 0; +} + +static int aca_banks_update(struct ras_core_context *ras_core, + u32 ecc_type, void *data) +{ + struct aca_bank_reg bank; + struct aca_block *aca_blk; + struct aca_bank_ecc bank_ecc; + struct ras_log_batch_tag *batch_tag = NULL; + u32 count = 0; + int ret = 0; + int i; + + mutex_lock(&ras_core->ras_aca.bank_op_lock); + + if (aca_dup_update_ue_in_fatal(ras_core, ecc_type)) + goto out; + + ret = aca_get_bank_count(ras_core, ecc_type, &count); + if (ret) + goto out; + + if (!count) + goto out; + + batch_tag = ras_log_ring_create_batch_tag(ras_core); + for (i = 0; i < count; i++) { + memset(&bank, 0, sizeof(bank)); + ret = aca_dump_bank(ras_core, ecc_type, i, &bank); + if (ret) + break; + + bank.ecc_type = ecc_type; + + memset(&bank_ecc, 0, sizeof(bank_ecc)); + aca_blk = aca_get_bank_aca_block(ras_core, &bank); + if (aca_blk) + ret = aca_parse_bank(ras_core, aca_blk, &bank, &bank_ecc); + + bank.seq_no = aca_get_bank_seqno(ras_core, ecc_type, aca_blk, &bank_ecc); + + aca_log_bank_data(ras_core, &bank, &bank_ecc, batch_tag); + aca_bank_log(ras_core, i, count, &bank, &bank_ecc); + + if (!ret && aca_blk) + ret = aca_log_bad_bank(ras_core, aca_blk, &bank, &bank_ecc); + + if (ret) + break; + } + ras_log_ring_destroy_batch_tag(ras_core, batch_tag); + +out: + mutex_unlock(&ras_core->ras_aca.bank_op_lock); + return ret; +} + +int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 type, void *data) +{ + /* Update aca bank to aca source error_cache first */ + return aca_banks_update(ras_core, type, data); +} + +static struct aca_block *ras_aca_get_block_handle(struct ras_core_context *ras_core, uint32_t blk) +{ + return &ras_core->ras_aca.aca_blk[blk]; +} + +static int ras_aca_clear_block_ecc_count(struct ras_core_context *ras_core, u32 blk) +{ + struct aca_block *aca_blk; + struct aca_aid_ecc *aid_ecc; + int skt, aid, xcd; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aca_blk = ras_aca_get_block_handle(ras_core, blk); + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + aid_ecc = &aca_blk->ecc.socket[skt].aid[aid]; + if (blk == RAS_BLOCK_ID__GFX) { + for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) + memset(&aid_ecc->xcd.xcd[xcd], + 0, sizeof(struct aca_xcd_ecc)); + } else { + memset(&aid_ecc->ecc_err, 0, sizeof(aid_ecc->ecc_err)); + } + } + } + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core) +{ + enum ras_block_id blk; + int ret; + + for (blk = RAS_BLOCK_ID__UMC; blk < RAS_BLOCK_ID__LAST; blk++) { + ret = ras_aca_clear_block_ecc_count(ras_core, blk); + if (ret) + break; + } + + return ret; +} + +int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk) +{ + struct aca_block *aca_blk; + int skt, aid, xcd; + struct aca_ecc_count *ecc_err; + struct aca_aid_ecc *aid_ecc; + + mutex_lock(&ras_core->ras_aca.aca_lock); + aca_blk = ras_aca_get_block_handle(ras_core, blk); + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + aid_ecc = &aca_blk->ecc.socket[skt].aid[aid]; + if (blk == RAS_BLOCK_ID__GFX) { + for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) { + ecc_err = &aid_ecc->xcd.xcd[xcd].ecc_err; + ecc_err->new_ce_count = 0; + ecc_err->new_ue_count = 0; + ecc_err->new_de_count = 0; + } + } else { + ecc_err = &aid_ecc->ecc_err; + ecc_err->new_ce_count = 0; + ecc_err->new_ue_count = 0; + ecc_err->new_de_count = 0; + } + } + } + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +static int ras_aca_get_block_each_aid_ecc_count(struct ras_core_context *ras_core, + u32 blk, u32 skt, u32 aid, u32 xcd, + struct aca_ecc_count *ecc_count) +{ + struct aca_block *aca_blk; + struct aca_ecc_count *ecc_err; + + aca_blk = ras_aca_get_block_handle(ras_core, blk); + if (blk == RAS_BLOCK_ID__GFX) + ecc_err = &aca_blk->ecc.socket[skt].aid[aid].xcd.xcd[xcd].ecc_err; + else + ecc_err = &aca_blk->ecc.socket[skt].aid[aid].ecc_err; + + ecc_count->new_ce_count = ecc_err->new_ce_count; + ecc_count->total_ce_count = ecc_err->total_ce_count; + ecc_count->new_ue_count = ecc_err->new_ue_count; + ecc_count->total_ue_count = ecc_err->total_ue_count; + ecc_count->new_de_count = ecc_err->new_de_count; + ecc_count->total_de_count = ecc_err->total_de_count; + + return 0; +} + +static inline void _add_ecc_count(struct aca_ecc_count *des, struct aca_ecc_count *src) +{ + des->new_ce_count += src->new_ce_count; + des->total_ce_count += src->total_ce_count; + des->new_ue_count += src->new_ue_count; + des->total_ue_count += src->total_ue_count; + des->new_de_count += src->new_de_count; + des->total_de_count += src->total_de_count; +} + +static const struct ras_aca_ip_func *aca_get_ip_func( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(1, 0, 0): + return &ras_aca_func_v1_0; + default: + RAS_DEV_ERR(ras_core->dev, + "ACA ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, + u32 blk, void *data) +{ + struct ras_ecc_count *err_data = (struct ras_ecc_count *)data; + struct aca_block *aca_blk; + int skt, aid, xcd; + struct aca_ecc_count ecc_xcd; + struct aca_ecc_count ecc_aid; + struct aca_ecc_count ecc; + + if (blk >= RAS_BLOCK_ID__LAST) + return -EINVAL; + + if (!err_data) + return -EINVAL; + + aca_blk = ras_aca_get_block_handle(ras_core, blk); + memset(&ecc, 0, sizeof(ecc)); + + mutex_lock(&ras_core->ras_aca.aca_lock); + if (blk == RAS_BLOCK_ID__GFX) { + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + memset(&ecc_aid, 0, sizeof(ecc_aid)); + for (xcd = 0; + xcd < aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num; + xcd++) { + memset(&ecc_xcd, 0, sizeof(ecc_xcd)); + if (ras_aca_get_block_each_aid_ecc_count(ras_core, + blk, skt, aid, xcd, &ecc_xcd)) + continue; + _add_ecc_count(&ecc_aid, &ecc_xcd); + } + _add_ecc_count(&ecc, &ecc_aid); + } + } + } else { + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) { + memset(&ecc_aid, 0, sizeof(ecc_aid)); + if (ras_aca_get_block_each_aid_ecc_count(ras_core, + blk, skt, aid, 0, &ecc_aid)) + continue; + _add_ecc_count(&ecc, &ecc_aid); + } + } + } + + err_data->new_ce_count = ecc.new_ce_count; + err_data->total_ce_count = ecc.total_ce_count; + err_data->new_ue_count = ecc.new_ue_count; + err_data->total_ue_count = ecc.total_ue_count; + err_data->new_de_count = ecc.new_de_count; + err_data->total_de_count = ecc.total_de_count; + mutex_unlock(&ras_core->ras_aca.aca_lock); + + return 0; +} + +int ras_aca_sw_init(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + struct ras_aca_config *aca_cfg = &ras_core->config->aca_cfg; + struct aca_block *aca_blk; + uint32_t socket_num_per_hive; + uint32_t aid_num_per_socket; + uint32_t xcd_num_per_aid; + int blk, skt, aid; + + socket_num_per_hive = aca_cfg->socket_num_per_hive; + aid_num_per_socket = aca_cfg->aid_num_per_socket; + xcd_num_per_aid = aca_cfg->xcd_num_per_aid; + + if (!xcd_num_per_aid || !aid_num_per_socket || + (socket_num_per_hive > MAX_SOCKET_NUM_PER_HIVE) || + (aid_num_per_socket > MAX_AID_NUM_PER_SOCKET) || + (xcd_num_per_aid > MAX_XCD_NUM_PER_AID)) { + RAS_DEV_ERR(ras_core->dev, "Invalid ACA system configuration: %d, %d, %d\n", + socket_num_per_hive, aid_num_per_socket, xcd_num_per_aid); + return -EINVAL; + } + + memset(ras_aca, 0, sizeof(*ras_aca)); + + for (blk = 0; blk < RAS_BLOCK_ID__LAST; blk++) { + aca_blk = &ras_aca->aca_blk[blk]; + aca_blk->ecc.socket_num_per_hive = socket_num_per_hive; + for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) { + aca_blk->ecc.socket[skt].aid_num = aid_num_per_socket; + if (blk == RAS_BLOCK_ID__GFX) { + for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) + aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num = + xcd_num_per_aid; + } + } + } + + mutex_init(&ras_aca->aca_lock); + mutex_init(&ras_aca->bank_op_lock); + + return 0; +} + +int ras_aca_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + + mutex_destroy(&ras_aca->aca_lock); + mutex_destroy(&ras_aca->bank_op_lock); + + return 0; +} + +int ras_aca_hw_init(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + struct aca_block *aca_blk; + const struct ras_aca_ip_func *ip_func; + int i; + + ras_aca->aca_ip_version = ras_core->config->aca_ip_version; + ip_func = aca_get_ip_func(ras_core, ras_aca->aca_ip_version); + if (!ip_func) + return -EINVAL; + + for (i = 0; i < ip_func->block_num; i++) { + aca_blk = &ras_aca->aca_blk[ip_func->block_info[i]->ras_block_id]; + aca_blk->blk_info = ip_func->block_info[i]; + } + + ras_aca->ue_updated_mark = 0; + + return 0; +} + +int ras_aca_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_aca *ras_aca = &ras_core->ras_aca; + + ras_aca->ue_updated_mark = 0; + + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h new file mode 100644 index 000000000000..f61b02a5f0fc --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_ACA_H__ +#define __RAS_ACA_H__ +#include "ras.h" + +#define MAX_SOCKET_NUM_PER_HIVE 8 +#define MAX_AID_NUM_PER_SOCKET 4 +#define MAX_XCD_NUM_PER_AID 2 +#define MAX_ACA_RAS_BLOCK 20 + +#define ACA_ERROR__UE_MASK (0x1 << RAS_ERR_TYPE__UE) +#define ACA_ERROR__CE_MASK (0x1 << RAS_ERR_TYPE__CE) +#define ACA_ERROR__DE_MASK (0x1 << RAS_ERR_TYPE__DE) + +enum ras_aca_reg_idx { + ACA_REG_IDX__CTL = 0, + ACA_REG_IDX__STATUS = 1, + ACA_REG_IDX__ADDR = 2, + ACA_REG_IDX__MISC0 = 3, + ACA_REG_IDX__CONFG = 4, + ACA_REG_IDX__IPID = 5, + ACA_REG_IDX__SYND = 6, + ACA_REG_IDX__DESTAT = 8, + ACA_REG_IDX__DEADDR = 9, + ACA_REG_IDX__CTL_MASK = 10, + ACA_REG_MAX_COUNT = 16, +}; + +struct ras_core_context; +struct aca_block; + +struct aca_bank_reg { + u32 ecc_type; + u64 seq_no; + u64 regs[ACA_REG_MAX_COUNT]; +}; + +enum aca_ecc_hwip { + ACA_ECC_HWIP__UNKNOWN = -1, + ACA_ECC_HWIP__PSP = 0, + ACA_ECC_HWIP__UMC, + ACA_ECC_HWIP__SMU, + ACA_ECC_HWIP__PCS_XGMI, + ACA_ECC_HWIP_COUNT, +}; + +struct aca_ecc_info { + int die_id; + int socket_id; + int xcd_id; + int hwid; + int mcatype; + uint64_t status; + uint64_t ipid; + uint64_t addr; +}; + +struct aca_bank_ecc { + struct aca_ecc_info bank_info; + u32 ce_count; + u32 ue_count; + u32 de_count; +}; + +struct aca_ecc_count { + u32 new_ce_count; + u32 total_ce_count; + u32 new_ue_count; + u32 total_ue_count; + u32 new_de_count; + u32 total_de_count; +}; + +struct aca_xcd_ecc { + struct aca_ecc_count ecc_err; +}; + +struct aca_aid_ecc { + union { + struct aca_xcd { + struct aca_xcd_ecc xcd[MAX_XCD_NUM_PER_AID]; + u32 xcd_num; + } xcd; + struct aca_ecc_count ecc_err; + }; +}; + +struct aca_socket_ecc { + struct aca_aid_ecc aid[MAX_AID_NUM_PER_SOCKET]; + u32 aid_num; +}; + +struct aca_block_ecc { + struct aca_socket_ecc socket[MAX_SOCKET_NUM_PER_HIVE]; + u32 socket_num_per_hive; +}; + +struct aca_bank_hw_ops { + bool (*bank_match)(struct aca_block *ras_blk, void *data); + int (*bank_parse)(struct ras_core_context *ras_core, + struct aca_block *aca_blk, void *data, void *buf); +}; + +struct aca_block_info { + char name[32]; + u32 ras_block_id; + enum aca_ecc_hwip hwip; + struct aca_bank_hw_ops bank_ops; + u32 mask; +}; + +struct aca_block { + const struct aca_block_info *blk_info; + struct aca_block_ecc ecc; +}; + +struct ras_aca_ip_func { + uint32_t block_num; + const struct aca_block_info **block_info; +}; + +struct ras_aca { + uint32_t aca_ip_version; + const struct ras_aca_ip_func *ip_func; + struct mutex aca_lock; + struct mutex bank_op_lock; + struct aca_block aca_blk[MAX_ACA_RAS_BLOCK]; + uint32_t ue_updated_mark; +}; + +int ras_aca_sw_init(struct ras_core_context *ras_core); +int ras_aca_sw_fini(struct ras_core_context *ras_core); +int ras_aca_hw_init(struct ras_core_context *ras_core); +int ras_aca_hw_fini(struct ras_core_context *ras_core); +int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, u32 blk, void *data); +int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk); +int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core); +int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 ecc_type, void *data); +void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core); +void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c new file mode 100644 index 000000000000..29df98948703 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_aca.h" +#include "ras_core_status.h" +#include "ras_aca_v1_0.h" + +struct ras_aca_hwip { + int hwid; + int mcatype; +}; + +static struct ras_aca_hwip aca_hwid_mcatypes[ACA_ECC_HWIP_COUNT] = { + [ACA_ECC_HWIP__SMU] = {0x01, 0x01}, + [ACA_ECC_HWIP__PCS_XGMI] = {0x50, 0x00}, + [ACA_ECC_HWIP__UMC] = {0x96, 0x00}, +}; + +static int aca_decode_bank_info(struct aca_block *aca_blk, + struct aca_bank_reg *bank, struct aca_ecc_info *info) +{ + u64 ipid; + u32 instidhi, instidlo; + + ipid = bank->regs[ACA_REG_IDX__IPID]; + info->hwid = ACA_REG_IPID_HARDWAREID(ipid); + info->mcatype = ACA_REG_IPID_MCATYPE(ipid); + /* + * Unified DieID Format: SAASS. A:AID, S:Socket. + * Unified DieID[4:4] = InstanceId[0:0] + * Unified DieID[0:3] = InstanceIdHi[0:3] + */ + instidhi = ACA_REG_IPID_INSTANCEIDHI(ipid); + instidlo = ACA_REG_IPID_INSTANCEIDLO(ipid); + info->die_id = ((instidhi >> 2) & 0x03); + info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03); + + if ((aca_blk->blk_info->hwip == ACA_ECC_HWIP__SMU) && + (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX)) + info->xcd_id = + ((instidlo & GENMASK_ULL(31, 1)) == mmSMNAID_XCD0_MCA_SMU) ? 0 : 1; + + return 0; +} + +static bool aca_check_bank_hwip(struct aca_bank_reg *bank, enum aca_ecc_hwip type) +{ + struct ras_aca_hwip *hwip; + int hwid, mcatype; + u64 ipid; + + if (!bank || (type == ACA_ECC_HWIP__UNKNOWN)) + return false; + + hwip = &aca_hwid_mcatypes[type]; + if (!hwip->hwid) + return false; + + ipid = bank->regs[ACA_REG_IDX__IPID]; + hwid = ACA_REG_IPID_HARDWAREID(ipid); + mcatype = ACA_REG_IPID_MCATYPE(ipid); + + return hwip->hwid == hwid && hwip->mcatype == mcatype; +} + +static bool aca_match_bank_default(struct aca_block *aca_blk, void *data) +{ + return aca_check_bank_hwip((struct aca_bank_reg *)data, aca_blk->blk_info->hwip); +} + +static bool aca_match_gfx_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + u32 instlo; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + switch (instlo) { + case mmSMNAID_XCD0_MCA_SMU: + case mmSMNAID_XCD1_MCA_SMU: + case mmSMNXCD_XCD0_MCA_SMU: + return true; + default: + break; + } + + return false; +} + +static bool aca_match_sdma_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */ + static int sdma_err_codes[] = { 33, 34, 35, 36 }; + u32 instlo; + int errcode, i; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]); + errcode &= 0xff; + + /* Check SDMA error codes */ + for (i = 0; i < ARRAY_SIZE(sdma_err_codes); i++) { + if (errcode == sdma_err_codes[i]) + return true; + } + + return false; +} + +static bool aca_match_mmhub_bank(struct aca_block *aca_blk, void *data) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + /* reference to smu driver if header file */ + const int mmhub_err_codes[] = { + 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */ + 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */ + 10, /* CODE_UTCL2_ROUTER */ + 11, /* CODE_VML2 */ + 12, /* CODE_VML2_WALKER */ + 13, /* CODE_MMCANE */ + }; + u32 instlo; + int errcode, i; + + if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip)) + return false; + + instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]); + instlo &= GENMASK_ULL(31, 1); + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]); + errcode &= 0xff; + + /* Check MMHUB error codes */ + for (i = 0; i < ARRAY_SIZE(mmhub_err_codes); i++) { + if (errcode == mmhub_err_codes[i]) + return true; + } + + return false; +} + +static bool aca_check_umc_de(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + return (ras_core->poison_supported && + ACA_REG_STATUS_VAL(mc_umc_status) && + ACA_REG_STATUS_DEFERRED(mc_umc_status)); +} + +static bool aca_check_umc_ue(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + if (aca_check_umc_de(ras_core, mc_umc_status)) + return false; + + return (ACA_REG_STATUS_VAL(mc_umc_status) && + (ACA_REG_STATUS_PCC(mc_umc_status) || + ACA_REG_STATUS_UC(mc_umc_status) || + ACA_REG_STATUS_TCC(mc_umc_status))); +} + +static bool aca_check_umc_ce(struct ras_core_context *ras_core, uint64_t mc_umc_status) +{ + if (aca_check_umc_de(ras_core, mc_umc_status)) + return false; + + return (ACA_REG_STATUS_VAL(mc_umc_status) && + (ACA_REG_STATUS_CECC(mc_umc_status) || + (ACA_REG_STATUS_UECC(mc_umc_status) && + ACA_REG_STATUS_UC(mc_umc_status) == 0) || + /* Identify data parity error in replay mode */ + ((ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0x5 || + ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0xb) && + !(aca_check_umc_ue(ras_core, mc_umc_status))))); +} + +static int aca_parse_umc_bank(struct ras_core_context *ras_core, + struct aca_block *ras_blk, void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + uint32_t ext_error_code; + uint64_t status0; + + status0 = bank->regs[ACA_REG_IDX__STATUS]; + if (!ACA_REG_STATUS_VAL(status0)) + return 0; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS]; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status0); + + if (aca_check_umc_de(ras_core, status0)) + ecc->de_count = 1; + else if (aca_check_umc_ue(ras_core, status0)) + ecc->ue_count = ext_error_code ? + 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + else if (aca_check_umc_ce(ras_core, status0)) + ecc->ce_count = ext_error_code ? + 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + + return 0; +} + +static bool aca_check_bank_is_de(struct ras_core_context *ras_core, + uint64_t status) +{ + return (ACA_REG_STATUS_POISON(status) || + ACA_REG_STATUS_DEFERRED(status)); +} + +static int aca_parse_bank_default(struct ras_core_context *ras_core, + struct aca_block *ras_blk, + void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + u64 misc0 = bank->regs[ACA_REG_IDX__MISC0]; + u64 status = bank->regs[ACA_REG_IDX__STATUS]; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = status; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + if (aca_check_bank_is_de(ras_core, status)) { + ecc->de_count = 1; + } else { + if (bank->ecc_type == RAS_ERR_TYPE__UE) + ecc->ue_count = 1; + else if (bank->ecc_type == RAS_ERR_TYPE__CE) + ecc->ce_count = ACA_REG_MISC0_ERRCNT(misc0); + } + + return 0; +} + +static int aca_parse_xgmi_bank(struct ras_core_context *ras_core, + struct aca_block *ras_blk, + void *data, void *buf) +{ + struct aca_bank_reg *bank = (struct aca_bank_reg *)data; + struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf; + struct aca_ecc_info bank_info; + u64 status, count; + int ext_error_code; + + memset(&bank_info, 0, sizeof(bank_info)); + aca_decode_bank_info(ras_blk, bank, &bank_info); + memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info)); + ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS]; + ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID]; + ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR]; + + status = bank->regs[ACA_REG_IDX__STATUS]; + ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status); + + count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]); + if (bank->ecc_type == RAS_ERR_TYPE__UE) { + if (ext_error_code != 0 && ext_error_code != 9) + count = 0ULL; + ecc->ue_count = count; + } else if (bank->ecc_type == RAS_ERR_TYPE__CE) { + count = ext_error_code == 6 ? count : 0ULL; + ecc->ce_count = count; + } + + return 0; +} + +static const struct aca_block_info aca_v1_0_umc = { + .name = "umc", + .ras_block_id = RAS_BLOCK_ID__UMC, + .hwip = ACA_ECC_HWIP__UMC, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK | ACA_ERROR__DE_MASK, + .bank_ops = { + .bank_match = aca_match_bank_default, + .bank_parse = aca_parse_umc_bank, + }, +}; + +static const struct aca_block_info aca_v1_0_gfx = { + .name = "gfx", + .ras_block_id = RAS_BLOCK_ID__GFX, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK, + .bank_ops = { + .bank_match = aca_match_gfx_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_sdma = { + .name = "sdma", + .ras_block_id = RAS_BLOCK_ID__SDMA, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK, + .bank_ops = { + .bank_match = aca_match_sdma_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_mmhub = { + .name = "mmhub", + .ras_block_id = RAS_BLOCK_ID__MMHUB, + .hwip = ACA_ECC_HWIP__SMU, + .mask = ACA_ERROR__UE_MASK, + .bank_ops = { + .bank_match = aca_match_mmhub_bank, + .bank_parse = aca_parse_bank_default, + }, +}; + +static const struct aca_block_info aca_v1_0_xgmi = { + .name = "xgmi", + .ras_block_id = RAS_BLOCK_ID__XGMI_WAFL, + .hwip = ACA_ECC_HWIP__PCS_XGMI, + .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK, + .bank_ops = { + .bank_match = aca_match_bank_default, + .bank_parse = aca_parse_xgmi_bank, + }, +}; + +static const struct aca_block_info *aca_block_info_v1_0[] = { + &aca_v1_0_umc, + &aca_v1_0_gfx, + &aca_v1_0_sdma, + &aca_v1_0_mmhub, + &aca_v1_0_xgmi, +}; + +const struct ras_aca_ip_func ras_aca_func_v1_0 = { + .block_num = ARRAY_SIZE(aca_block_info_v1_0), + .block_info = aca_block_info_v1_0, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h new file mode 100644 index 000000000000..40e5d94b037f --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_ACA_V1_0_H__ +#define __RAS_ACA_V1_0_H__ +#include "ras.h" + +#define ACA__REG__FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l) +#define ACA_REG_STATUS_VAL(x) ACA__REG__FIELD(x, 63, 63) +#define ACA_REG_STATUS_OVERFLOW(x) ACA__REG__FIELD(x, 62, 62) +#define ACA_REG_STATUS_UC(x) ACA__REG__FIELD(x, 61, 61) +#define ACA_REG_STATUS_EN(x) ACA__REG__FIELD(x, 60, 60) +#define ACA_REG_STATUS_MISCV(x) ACA__REG__FIELD(x, 59, 59) +#define ACA_REG_STATUS_ADDRV(x) ACA__REG__FIELD(x, 58, 58) +#define ACA_REG_STATUS_PCC(x) ACA__REG__FIELD(x, 57, 57) +#define ACA_REG_STATUS_ERRCOREIDVAL(x) ACA__REG__FIELD(x, 56, 56) +#define ACA_REG_STATUS_TCC(x) ACA__REG__FIELD(x, 55, 55) +#define ACA_REG_STATUS_SYNDV(x) ACA__REG__FIELD(x, 53, 53) +#define ACA_REG_STATUS_CECC(x) ACA__REG__FIELD(x, 46, 46) +#define ACA_REG_STATUS_UECC(x) ACA__REG__FIELD(x, 45, 45) +#define ACA_REG_STATUS_DEFERRED(x) ACA__REG__FIELD(x, 44, 44) +#define ACA_REG_STATUS_POISON(x) ACA__REG__FIELD(x, 43, 43) +#define ACA_REG_STATUS_SCRUB(x) ACA__REG__FIELD(x, 40, 40) +#define ACA_REG_STATUS_ERRCOREID(x) ACA__REG__FIELD(x, 37, 32) +#define ACA_REG_STATUS_ADDRLSB(x) ACA__REG__FIELD(x, 29, 24) +#define ACA_REG_STATUS_ERRORCODEEXT(x) ACA__REG__FIELD(x, 21, 16) +#define ACA_REG_STATUS_ERRORCODE(x) ACA__REG__FIELD(x, 15, 0) + +#define ACA_REG_IPID_MCATYPE(x) ACA__REG__FIELD(x, 63, 48) +#define ACA_REG_IPID_INSTANCEIDHI(x) ACA__REG__FIELD(x, 47, 44) +#define ACA_REG_IPID_HARDWAREID(x) ACA__REG__FIELD(x, 43, 32) +#define ACA_REG_IPID_INSTANCEIDLO(x) ACA__REG__FIELD(x, 31, 0) + +#define ACA_REG_MISC0_VALID(x) ACA__REG__FIELD(x, 63, 63) +#define ACA_REG_MISC0_OVRFLW(x) ACA__REG__FIELD(x, 48, 48) +#define ACA_REG_MISC0_ERRCNT(x) ACA__REG__FIELD(x, 43, 32) + +#define ACA_REG_SYND_ERRORINFORMATION(x) ACA__REG__FIELD(x, 17, 0) + +/* NOTE: The following codes refers to the smu header file */ +#define ACA_EXTERROR_CODE_CE 0x3a +#define ACA_EXTERROR_CODE_FAULT 0x3b + +#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */ +#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */ +#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */ +#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */ + +extern const struct ras_aca_ip_func ras_aca_func_v1_0; +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c new file mode 100644 index 000000000000..94e6d7420d94 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_cmd.h" + +#define RAS_CMD_MAJOR_VERSION 6 +#define RAS_CMD_MINOR_VERSION 0 +#define RAS_CMD_VERSION (((RAS_CMD_MAJOR_VERSION) << 10) | (RAS_CMD_MINOR_VERSION)) + +static int ras_cmd_add_device(struct ras_core_context *ras_core) +{ + INIT_LIST_HEAD(&ras_core->ras_cmd.head); + ras_core->ras_cmd.ras_core = ras_core; + ras_core->ras_cmd.dev_handle = (uintptr_t)ras_core ^ RAS_CMD_DEV_HANDLE_MAGIC; + return 0; +} + +static int ras_cmd_remove_device(struct ras_core_context *ras_core) +{ + memset(&ras_core->ras_cmd, 0, sizeof(ras_core->ras_cmd)); + return 0; +} + +static int ras_get_block_ecc_info(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_block_ecc_info_req *input_data = + (struct ras_cmd_block_ecc_info_req *)cmd->input_buff_raw; + struct ras_cmd_block_ecc_info_rsp *output_data = + (struct ras_cmd_block_ecc_info_rsp *)cmd->output_buff_raw; + struct ras_ecc_count err_data; + int ret; + + if (cmd->input_size != sizeof(struct ras_cmd_block_ecc_info_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + memset(&err_data, 0, sizeof(err_data)); + ret = ras_aca_get_block_ecc_count(ras_core, input_data->block_id, &err_data); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + output_data->ce_count = err_data.total_ce_count; + output_data->ue_count = err_data.total_ue_count; + output_data->de_count = err_data.total_de_count; + + cmd->output_size = sizeof(struct ras_cmd_block_ecc_info_rsp); + return RAS_CMD__SUCCESS; +} + +static void ras_cmd_update_bad_page_info(struct ras_cmd_bad_page_record *ras_cmd_record, + struct eeprom_umc_record *record) +{ + ras_cmd_record->retired_page = record->cur_nps_retired_row_pfn; + ras_cmd_record->ts = record->ts; + ras_cmd_record->err_type = record->err_type; + ras_cmd_record->mem_channel = record->mem_channel; + ras_cmd_record->mcumc_id = record->mcumc_id; + ras_cmd_record->address = record->address; + ras_cmd_record->bank = record->bank; + ras_cmd_record->valid = 1; +} + +static int ras_cmd_get_group_bad_pages(struct ras_core_context *ras_core, + uint32_t group_index, struct ras_cmd_bad_pages_info_rsp *output_data) +{ + struct eeprom_umc_record record; + struct ras_cmd_bad_page_record *ras_cmd_record; + uint32_t i = 0, bp_cnt = 0, group_cnt = 0; + + output_data->bp_in_group = 0; + output_data->group_index = 0; + + bp_cnt = ras_umc_get_badpage_count(ras_core); + if (bp_cnt) { + output_data->group_index = group_index; + group_cnt = bp_cnt / RAS_CMD_MAX_BAD_PAGES_PER_GROUP + + ((bp_cnt % RAS_CMD_MAX_BAD_PAGES_PER_GROUP) ? 1 : 0); + + if (group_index >= group_cnt) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + i = group_index * RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + for (; + i < bp_cnt && output_data->bp_in_group < RAS_CMD_MAX_BAD_PAGES_PER_GROUP; + i++) { + if (ras_umc_get_badpage_record(ras_core, i, &record)) + return RAS_CMD__ERROR_GENERIC; + + ras_cmd_record = &output_data->records[i % RAS_CMD_MAX_BAD_PAGES_PER_GROUP]; + + memset(ras_cmd_record, 0, sizeof(*ras_cmd_record)); + ras_cmd_update_bad_page_info(ras_cmd_record, &record); + output_data->bp_in_group++; + } + } + output_data->bp_total_cnt = bp_cnt; + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_bad_pages(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_bad_pages_info_req *input_data = + (struct ras_cmd_bad_pages_info_req *)cmd->input_buff_raw; + struct ras_cmd_bad_pages_info_rsp *output_data = + (struct ras_cmd_bad_pages_info_rsp *)cmd->output_buff_raw; + int ret; + + if (cmd->input_size != sizeof(struct ras_cmd_bad_pages_info_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ret = ras_cmd_get_group_bad_pages(ras_core, input_data->group_index, output_data); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_bad_pages_info_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_clear_bad_page_info(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + if (cmd->input_size != sizeof(struct ras_cmd_dev_handle)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (ras_eeprom_reset_table(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + if (ras_umc_clean_badpage_data(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_reset_all_error_counts(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + if (cmd->input_size != sizeof(struct ras_cmd_dev_handle)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (ras_aca_clear_all_blocks_ecc_count(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + if (ras_umc_clear_logged_ecc(ras_core)) + return RAS_CMD__ERROR_GENERIC; + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_cper_snapshot(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_cper_snapshot_rsp *output_data = + (struct ras_cmd_cper_snapshot_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + + if (cmd->input_size != sizeof(struct ras_cmd_cper_snapshot_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ras_log_ring_get_batch_overview(ras_core, &overview); + + output_data->total_cper_num = overview.logged_batch_count; + output_data->start_cper_id = overview.first_batch_id; + output_data->latest_cper_id = overview.last_batch_id; + + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_cper_snapshot_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_cper_records(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_cper_record_req *req = + (struct ras_cmd_cper_record_req *)cmd->input_buff_raw; + struct ras_cmd_cper_record_rsp *rsp = + (struct ras_cmd_cper_record_rsp *)cmd->output_buff_raw; + struct ras_log_info *trace[MAX_RECORD_PER_BATCH] = {0}; + struct ras_log_batch_overview overview; + uint32_t offset = 0, real_data_len = 0; + uint64_t batch_id; + uint8_t *buffer; + int ret = 0, i, count; + + if (cmd->input_size != sizeof(struct ras_cmd_cper_record_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if (!req->buf_size || !req->buf_ptr || !req->cper_num) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + buffer = kzalloc(req->buf_size, GFP_KERNEL); + if (!buffer) + return RAS_CMD__ERROR_GENERIC; + + ras_log_ring_get_batch_overview(ras_core, &overview); + for (i = 0; i < req->cper_num; i++) { + batch_id = req->cper_start_id + i; + if (batch_id >= overview.last_batch_id) + break; + + count = ras_log_ring_get_batch_records(ras_core, batch_id, trace, + ARRAY_SIZE(trace)); + if (count > 0) { + ret = ras_cper_generate_cper(ras_core, trace, count, + &buffer[offset], req->buf_size - offset, &real_data_len); + if (ret) + break; + + offset += real_data_len; + } + } + + if ((ret && (ret != -ENOMEM)) || + copy_to_user(u64_to_user_ptr(req->buf_ptr), buffer, offset)) { + kfree(buffer); + return RAS_CMD__ERROR_GENERIC; + } + + rsp->real_data_size = offset; + rsp->real_cper_num = i; + rsp->remain_num = (ret == -ENOMEM) ? (req->cper_num - i) : 0; + rsp->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_cper_record_rsp); + + kfree(buffer); + + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_batch_trace_snapshot(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_batch_trace_snapshot_rsp *rsp = + (struct ras_cmd_batch_trace_snapshot_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + + + if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_snapshot_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + ras_log_ring_get_batch_overview(ras_core, &overview); + + rsp->total_batch_num = overview.logged_batch_count; + rsp->start_batch_id = overview.first_batch_id; + rsp->latest_batch_id = overview.last_batch_id; + rsp->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_batch_trace_snapshot_rsp); + return RAS_CMD__SUCCESS; +} + +static int ras_cmd_get_batch_trace_records(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_batch_trace_record_req *input_data = + (struct ras_cmd_batch_trace_record_req *)cmd->input_buff_raw; + struct ras_cmd_batch_trace_record_rsp *output_data = + (struct ras_cmd_batch_trace_record_rsp *)cmd->output_buff_raw; + struct ras_log_batch_overview overview; + struct ras_log_info *trace_arry[MAX_RECORD_PER_BATCH] = {0}; + struct ras_log_info *record; + int i, j, count = 0, offset = 0; + uint64_t id; + bool completed = false; + + if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_record_req)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + if ((!input_data->batch_num) || (input_data->batch_num > RAS_CMD_MAX_BATCH_NUM)) + return RAS_CMD__ERROR_INVALID_INPUT_DATA; + + ras_log_ring_get_batch_overview(ras_core, &overview); + if ((input_data->start_batch_id < overview.first_batch_id) || + (input_data->start_batch_id >= overview.last_batch_id)) + return RAS_CMD__ERROR_INVALID_INPUT_SIZE; + + for (i = 0; i < input_data->batch_num; i++) { + id = input_data->start_batch_id + i; + if (id >= overview.last_batch_id) { + completed = true; + break; + } + + count = ras_log_ring_get_batch_records(ras_core, + id, trace_arry, ARRAY_SIZE(trace_arry)); + if (count > 0) { + if ((offset + count) > RAS_CMD_MAX_TRACE_NUM) + break; + for (j = 0; j < count; j++) { + record = &output_data->records[offset + j]; + record->seqno = trace_arry[j]->seqno; + record->timestamp = trace_arry[j]->timestamp; + record->event = trace_arry[j]->event; + memcpy(&record->aca_reg, + &trace_arry[j]->aca_reg, sizeof(trace_arry[j]->aca_reg)); + } + } else { + count = 0; + } + + output_data->batchs[i].batch_id = id; + output_data->batchs[i].offset = offset; + output_data->batchs[i].trace_num = count; + offset += count; + } + + output_data->start_batch_id = input_data->start_batch_id; + output_data->real_batch_num = i; + output_data->remain_num = completed ? 0 : (input_data->batch_num - i); + output_data->version = 0; + + cmd->output_size = sizeof(struct ras_cmd_batch_trace_record_rsp); + + return RAS_CMD__SUCCESS; +} + +static enum ras_ta_block __get_ras_ta_block(enum ras_block_id block) +{ + switch (block) { + case RAS_BLOCK_ID__UMC: + return RAS_TA_BLOCK__UMC; + case RAS_BLOCK_ID__SDMA: + return RAS_TA_BLOCK__SDMA; + case RAS_BLOCK_ID__GFX: + return RAS_TA_BLOCK__GFX; + case RAS_BLOCK_ID__MMHUB: + return RAS_TA_BLOCK__MMHUB; + case RAS_BLOCK_ID__ATHUB: + return RAS_TA_BLOCK__ATHUB; + case RAS_BLOCK_ID__PCIE_BIF: + return RAS_TA_BLOCK__PCIE_BIF; + case RAS_BLOCK_ID__HDP: + return RAS_TA_BLOCK__HDP; + case RAS_BLOCK_ID__XGMI_WAFL: + return RAS_TA_BLOCK__XGMI_WAFL; + case RAS_BLOCK_ID__DF: + return RAS_TA_BLOCK__DF; + case RAS_BLOCK_ID__SMN: + return RAS_TA_BLOCK__SMN; + case RAS_BLOCK_ID__SEM: + return RAS_TA_BLOCK__SEM; + case RAS_BLOCK_ID__MP0: + return RAS_TA_BLOCK__MP0; + case RAS_BLOCK_ID__MP1: + return RAS_TA_BLOCK__MP1; + case RAS_BLOCK_ID__FUSE: + return RAS_TA_BLOCK__FUSE; + case RAS_BLOCK_ID__MCA: + return RAS_TA_BLOCK__MCA; + case RAS_BLOCK_ID__VCN: + return RAS_TA_BLOCK__VCN; + case RAS_BLOCK_ID__JPEG: + return RAS_TA_BLOCK__JPEG; + default: + return RAS_TA_BLOCK__UMC; + } +} + +static enum ras_ta_error_type __get_ras_ta_err_type(enum ras_ecc_err_type error) +{ + switch (error) { + case RAS_ECC_ERR__NONE: + return RAS_TA_ERROR__NONE; + case RAS_ECC_ERR__PARITY: + return RAS_TA_ERROR__PARITY; + case RAS_ECC_ERR__SINGLE_CORRECTABLE: + return RAS_TA_ERROR__SINGLE_CORRECTABLE; + case RAS_ECC_ERR__MULTI_UNCORRECTABLE: + return RAS_TA_ERROR__MULTI_UNCORRECTABLE; + case RAS_ECC_ERR__POISON: + return RAS_TA_ERROR__POISON; + default: + return RAS_TA_ERROR__NONE; + } +} + +static int ras_cmd_inject_error(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_inject_error_req *req = + (struct ras_cmd_inject_error_req *)cmd->input_buff_raw; + struct ras_cmd_inject_error_rsp *output_data = + (struct ras_cmd_inject_error_rsp *)cmd->output_buff_raw; + int ret = 0; + struct ras_ta_trigger_error_input block_info = { + .block_id = __get_ras_ta_block(req->block_id), + .sub_block_index = req->subblock_id, + .inject_error_type = __get_ras_ta_err_type(req->error_type), + .address = req->address, + .value = req->method, + }; + + ret = ras_psp_trigger_error(ras_core, &block_info, req->instance_mask); + if (!ret) { + output_data->version = 0; + output_data->address = block_info.address; + cmd->output_size = sizeof(struct ras_cmd_inject_error_rsp); + } else { + RAS_DEV_ERR(ras_core->dev, "ras inject block %u failed %d\n", req->block_id, ret); + ret = RAS_CMD__ERROR_ACCESS_DENIED; + } + + return ret; +} + +static struct ras_cmd_func_map ras_cmd_maps[] = { + {RAS_CMD__INJECT_ERROR, ras_cmd_inject_error}, + {RAS_CMD__GET_BLOCK_ECC_STATUS, ras_get_block_ecc_info}, + {RAS_CMD__GET_BAD_PAGES, ras_cmd_get_bad_pages}, + {RAS_CMD__CLEAR_BAD_PAGE_INFO, ras_cmd_clear_bad_page_info}, + {RAS_CMD__RESET_ALL_ERROR_COUNTS, ras_cmd_reset_all_error_counts}, + {RAS_CMD__GET_CPER_SNAPSHOT, ras_cmd_get_cper_snapshot}, + {RAS_CMD__GET_CPER_RECORD, ras_cmd_get_cper_records}, + {RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, ras_cmd_get_batch_trace_snapshot}, + {RAS_CMD__GET_BATCH_TRACE_RECORD, ras_cmd_get_batch_trace_records}, +}; + +int rascore_handle_cmd(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data) +{ + struct ras_cmd_func_map *ras_cmd = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(ras_cmd_maps); i++) { + if (cmd->cmd_id == ras_cmd_maps[i].cmd_id) { + ras_cmd = &ras_cmd_maps[i]; + break; + } + } + + if (!ras_cmd) + return RAS_CMD__ERROR_UKNOWN_CMD; + + return ras_cmd->func(ras_core, cmd, data); +} + +int ras_cmd_init(struct ras_core_context *ras_core) +{ + return ras_cmd_add_device(ras_core); +} + +int ras_cmd_fini(struct ras_core_context *ras_core) +{ + ras_cmd_remove_device(ras_core); + return 0; +} + +int ras_cmd_query_interface_info(struct ras_core_context *ras_core, + struct ras_query_interface_info_rsp *rsp) +{ + rsp->ras_cmd_major_ver = RAS_CMD_MAJOR_VERSION; + rsp->ras_cmd_minor_ver = RAS_CMD_MINOR_VERSION; + + return 0; +} + +int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr) +{ + struct umc_bank_addr umc_bank = {0}; + int ret; + + ret = ras_umc_translate_soc_pa_and_bank(ras_core, &soc_pa, &umc_bank, false); + if (ret) + return RAS_CMD__ERROR_GENERIC; + + bank_addr->stack_id = umc_bank.stack_id; + bank_addr->bank_group = umc_bank.bank_group; + bank_addr->bank = umc_bank.bank; + bank_addr->row = umc_bank.row; + bank_addr->column = umc_bank.column; + bank_addr->channel = umc_bank.channel; + bank_addr->subchannel = umc_bank.subchannel; + + return 0; +} + +int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core, + struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa) +{ + struct umc_bank_addr umc_bank = {0}; + + umc_bank.stack_id = bank_addr.stack_id; + umc_bank.bank_group = bank_addr.bank_group; + umc_bank.bank = bank_addr.bank; + umc_bank.row = bank_addr.row; + umc_bank.column = bank_addr.column; + umc_bank.channel = bank_addr.channel; + umc_bank.subchannel = bank_addr.subchannel; + + return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, &umc_bank, true); +} + +uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core) +{ + return ras_core->ras_cmd.dev_handle; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h new file mode 100644 index 000000000000..48a0715eb821 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h @@ -0,0 +1,426 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_CMD_H__ +#define __RAS_CMD_H__ +#include "ras.h" +#include "ras_eeprom.h" +#include "ras_log_ring.h" +#include "ras_cper.h" + +#define RAS_CMD_DEV_HANDLE_MAGIC 0xFEEDAD00UL + +#define RAS_CMD_MAX_IN_SIZE 256 +#define RAS_CMD_MAX_GPU_NUM 32 +#define RAS_CMD_MAX_BAD_PAGES_PER_GROUP 32 + +/* position of instance value in sub_block_index of + * ta_ras_trigger_error_input, the sub block uses lower 12 bits + */ +#define RAS_TA_INST_MASK 0xfffff000 +#define RAS_TA_INST_SHIFT 0xc + +enum ras_cmd_interface_type { + RAS_CMD_INTERFACE_TYPE_NONE, + RAS_CMD_INTERFACE_TYPE_AMDGPU, + RAS_CMD_INTERFACE_TYPE_VF, + RAS_CMD_INTERFACE_TYPE_PF, +}; + +enum ras_cmd_id_range { + RAS_CMD_ID_COMMON_START = 0, + RAS_CMD_ID_COMMON_END = 0x10000, + RAS_CMD_ID_AMDGPU_START = RAS_CMD_ID_COMMON_END, + RAS_CMD_ID_AMDGPU_END = 0x20000, + RAS_CMD_ID_MXGPU_START = RAS_CMD_ID_AMDGPU_END, + RAS_CMD_ID_MXGPU_END = 0x30000, + RAS_CMD_ID_MXGPU_VF_START = RAS_CMD_ID_MXGPU_END, + RAS_CMD_ID_MXGPU_VF_END = 0x40000, +}; + +enum ras_cmd_id { + RAS_CMD__BEGIN = RAS_CMD_ID_COMMON_START, + RAS_CMD__QUERY_INTERFACE_INFO, + RAS_CMD__GET_DEVICES_INFO, + RAS_CMD__GET_BLOCK_ECC_STATUS, + RAS_CMD__INJECT_ERROR, + RAS_CMD__GET_BAD_PAGES, + RAS_CMD__CLEAR_BAD_PAGE_INFO, + RAS_CMD__RESET_ALL_ERROR_COUNTS, + RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, + RAS_CMD__TRANSLATE_FB_ADDRESS, + RAS_CMD__GET_LINK_TOPOLOGY, + RAS_CMD__GET_CPER_SNAPSHOT, + RAS_CMD__GET_CPER_RECORD, + RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, + RAS_CMD__GET_BATCH_TRACE_RECORD, + RAS_CMD__SUPPORTED_MAX = RAS_CMD_ID_COMMON_END, +}; + +enum ras_cmd_response { + RAS_CMD__SUCCESS = 0, + RAS_CMD__SUCCESS_EXEED_BUFFER, + RAS_CMD__ERROR_UKNOWN_CMD, + RAS_CMD__ERROR_INVALID_CMD, + RAS_CMD__ERROR_VERSION, + RAS_CMD__ERROR_INVALID_INPUT_SIZE, + RAS_CMD__ERROR_INVALID_INPUT_DATA, + RAS_CMD__ERROR_DRV_INIT_FAIL, + RAS_CMD__ERROR_ACCESS_DENIED, + RAS_CMD__ERROR_GENERIC, + RAS_CMD__ERROR_TIMEOUT, +}; + +enum ras_error_type { + RAS_TYPE_ERROR__NONE = 0, + RAS_TYPE_ERROR__PARITY = 1, + RAS_TYPE_ERROR__SINGLE_CORRECTABLE = 2, + RAS_TYPE_ERROR__MULTI_UNCORRECTABLE = 4, + RAS_TYPE_ERROR__POISON = 8, +}; + +struct ras_core_context; +struct ras_cmd_ctx; + +struct ras_cmd_mgr { + struct list_head head; + struct ras_core_context *ras_core; + uint64_t dev_handle; +}; + +struct ras_cmd_func_map { + uint32_t cmd_id; + int (*func)(struct ras_core_context *ras_core, + struct ras_cmd_ctx *cmd, void *data); +}; + +struct ras_device_bdf { + union { + struct { + uint32_t function : 3; + uint32_t device : 5; + uint32_t bus : 8; + uint32_t domain : 16; + }; + uint32_t u32_all; + }; +}; + +struct ras_cmd_param { + uint32_t idx_vf; + void *data; +}; + +#pragma pack(push, 8) +struct ras_cmd_ctx { + uint32_t magic; + union { + struct { + uint16_t ras_cmd_minor_ver : 10; + uint16_t ras_cmd_major_ver : 6; + }; + uint16_t ras_cmd_ver; + }; + union { + struct { + uint16_t plat_major_ver : 10; + uint16_t plat_minor_ver : 6; + }; + uint16_t plat_ver; + }; + uint32_t cmd_id; + uint32_t cmd_res; + uint32_t input_size; + uint32_t output_size; + uint32_t output_buf_size; + uint32_t reserved[5]; + uint8_t input_buff_raw[RAS_CMD_MAX_IN_SIZE]; + uint8_t output_buff_raw[]; +}; + +struct ras_cmd_dev_handle { + uint64_t dev_handle; +}; + +struct ras_cmd_block_ecc_info_req { + struct ras_cmd_dev_handle dev; + uint32_t block_id; + uint32_t subblock_id; + uint32_t reserved[4]; +}; + +struct ras_cmd_block_ecc_info_rsp { + uint32_t version; + uint32_t ce_count; + uint32_t ue_count; + uint32_t de_count; + uint32_t reserved[6]; +}; + +struct ras_cmd_inject_error_req { + struct ras_cmd_dev_handle dev; + uint32_t block_id; + uint32_t subblock_id; + uint64_t address; + uint32_t error_type; + uint32_t instance_mask; + union { + struct { + /* vf index */ + uint64_t vf_idx : 6; + /* method of error injection. i.e persistent, coherent etc */ + uint64_t method : 10; + uint64_t rsv : 48; + }; + uint64_t value; + }; + uint32_t reserved[8]; +}; + +struct ras_cmd_inject_error_rsp { + uint32_t version; + uint32_t reserved[5]; + uint64_t address; +}; + +struct ras_cmd_dev_info { + uint64_t dev_handle; + uint32_t location_id; + uint32_t ecc_enabled; + uint32_t ecc_supported; + uint32_t vf_num; + uint32_t asic_type; + uint32_t oam_id; + uint32_t reserved[8]; +}; + +struct ras_cmd_devices_info_rsp { + uint32_t version; + uint32_t dev_num; + uint32_t reserved[6]; + struct ras_cmd_dev_info devs[RAS_CMD_MAX_GPU_NUM]; +}; + +struct ras_cmd_bad_page_record { + union { + uint64_t address; + uint64_t offset; + }; + uint64_t retired_page; + uint64_t ts; + + uint32_t err_type; + + union { + unsigned char bank; + unsigned char cu; + }; + + unsigned char mem_channel; + unsigned char mcumc_id; + + unsigned char valid; + unsigned char reserved[8]; +}; + +struct ras_cmd_bad_pages_info_req { + struct ras_cmd_dev_handle device; + uint32_t group_index; + uint32_t reserved[5]; +}; + +struct ras_cmd_bad_pages_info_rsp { + uint32_t version; + uint32_t group_index; + uint32_t bp_in_group; + uint32_t bp_total_cnt; + uint32_t reserved[4]; + struct ras_cmd_bad_page_record records[RAS_CMD_MAX_BAD_PAGES_PER_GROUP]; +}; + +struct ras_query_interface_info_req { + uint32_t reserved[8]; +}; + +struct ras_query_interface_info_rsp { + uint32_t version; + uint32_t ras_cmd_major_ver; + uint32_t ras_cmd_minor_ver; + uint32_t plat_major_ver; + uint32_t plat_minor_ver; + uint8_t interface_type; + uint8_t rsv[3]; + uint32_t reserved[8]; +}; + +#define RAS_MAX_NUM_SAFE_RANGES 64 +struct ras_cmd_ras_safe_fb_address_ranges_rsp { + uint32_t version; + uint32_t num_ranges; + uint32_t reserved[4]; + struct { + uint64_t start; + uint64_t size; + uint32_t idx; + uint32_t reserved[3]; + } range[RAS_MAX_NUM_SAFE_RANGES]; +}; + +enum ras_fb_addr_type { + RAS_FB_ADDR_SOC_PHY, /* SPA */ + RAS_FB_ADDR_BANK, + RAS_FB_ADDR_VF_PHY, /* GPA */ + RAS_FB_ADDR_UNKNOWN +}; + +struct ras_fb_bank_addr { + uint32_t stack_id; /* SID */ + uint32_t bank_group; + uint32_t bank; + uint32_t row; + uint32_t column; + uint32_t channel; + uint32_t subchannel; /* Also called Pseudochannel (PC) */ + uint32_t reserved[3]; +}; + +struct ras_fb_vf_phy_addr { + uint32_t vf_idx; + uint32_t reserved; + uint64_t addr; +}; + +union ras_translate_fb_address { + struct ras_fb_bank_addr bank_addr; + uint64_t soc_phy_addr; + struct ras_fb_vf_phy_addr vf_phy_addr; +}; + +struct ras_cmd_translate_fb_address_req { + struct ras_cmd_dev_handle dev; + enum ras_fb_addr_type src_addr_type; + enum ras_fb_addr_type dest_addr_type; + union ras_translate_fb_address trans_addr; +}; + +struct ras_cmd_translate_fb_address_rsp { + uint32_t version; + uint32_t reserved[5]; + union ras_translate_fb_address trans_addr; +}; + +struct ras_dev_link_topology_req { + struct ras_cmd_dev_handle src; + struct ras_cmd_dev_handle dst; +}; + +struct ras_dev_link_topology_rsp { + uint32_t version; + uint32_t link_status; /* HW status of the link */ + uint32_t link_type; /* type of the link */ + uint32_t num_hops; /* number of hops */ + uint32_t reserved[8]; +}; + +struct ras_cmd_cper_snapshot_req { + struct ras_cmd_dev_handle dev; +}; + +struct ras_cmd_cper_snapshot_rsp { + uint32_t version; + uint32_t reserved[4]; + uint32_t total_cper_num; + uint64_t start_cper_id; + uint64_t latest_cper_id; +}; + +struct ras_cmd_cper_record_req { + struct ras_cmd_dev_handle dev; + uint64_t cper_start_id; + uint32_t cper_num; + uint32_t buf_size; + uint64_t buf_ptr; + uint32_t reserved[4]; +}; + +struct ras_cmd_cper_record_rsp { + uint32_t version; + uint32_t real_data_size; + uint32_t real_cper_num; + uint32_t remain_num; + uint32_t reserved[4]; +}; + +struct ras_cmd_batch_trace_snapshot_req { + struct ras_cmd_dev_handle dev; +}; + +struct ras_cmd_batch_trace_snapshot_rsp { + uint32_t version; + uint32_t reserved[4]; + uint32_t total_batch_num; + uint64_t start_batch_id; + uint64_t latest_batch_id; +}; + +struct ras_cmd_batch_trace_record_req { + struct ras_cmd_dev_handle dev; + uint64_t start_batch_id; + uint32_t batch_num; + uint32_t reserved[5]; +}; + +struct batch_ras_trace_info { + uint64_t batch_id; + uint16_t offset; + uint8_t trace_num; + uint8_t rsv; + uint32_t reserved; +}; + +#define RAS_CMD_MAX_BATCH_NUM 300 +#define RAS_CMD_MAX_TRACE_NUM 300 +struct ras_cmd_batch_trace_record_rsp { + uint32_t version; + uint16_t real_batch_num; + uint16_t remain_num; + uint64_t start_batch_id; + uint32_t reserved[2]; + struct batch_ras_trace_info batchs[RAS_CMD_MAX_BATCH_NUM]; + struct ras_log_info records[RAS_CMD_MAX_TRACE_NUM]; +}; + +#pragma pack(pop) + +int ras_cmd_init(struct ras_core_context *ras_core); +int ras_cmd_fini(struct ras_core_context *ras_core); +int rascore_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data); +uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core); +int ras_cmd_query_interface_info(struct ras_core_context *ras_core, + struct ras_query_interface_info_rsp *rsp); +int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr); +int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core, + struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core.c b/drivers/gpu/drm/amd/ras/rascore/ras_core.c new file mode 100644 index 000000000000..01122b55c98a --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_core.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" + +#define RAS_SEQNO_FIFO_SIZE (128 * sizeof(uint64_t)) + +#define IS_LEAP_YEAR(x) ((x % 4 == 0 && x % 100 != 0) || x % 400 == 0) + +static const char * const ras_block_name[] = { + "umc", + "sdma", + "gfx", + "mmhub", + "athub", + "pcie_bif", + "hdp", + "xgmi_wafl", + "df", + "smn", + "sem", + "mp0", + "mp1", + "fuse", + "mca", + "vcn", + "jpeg", + "ih", + "mpio", +}; + +const char *ras_core_get_ras_block_name(enum ras_block_id block_id) +{ + if (block_id >= ARRAY_SIZE(ras_block_name)) + return ""; + + return ras_block_name[block_id]; +} + +int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core, + uint64_t timestamp, struct ras_time *tm) +{ + int days_in_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + uint64_t month = 0, day = 0, hour = 0, minute = 0, second = 0; + uint32_t year = 0; + int seconds_per_day = 24 * 60 * 60; + int seconds_per_hour = 60 * 60; + int seconds_per_minute = 60; + int days, remaining_seconds; + + days = div64_u64_rem(timestamp, seconds_per_day, (uint64_t *)&remaining_seconds); + + /* utc_timestamp follows the Unix epoch */ + year = 1970; + while (days >= 365) { + if (IS_LEAP_YEAR(year)) { + if (days < 366) + break; + days -= 366; + } else { + days -= 365; + } + year++; + } + + days_in_month[1] += IS_LEAP_YEAR(year); + + month = 0; + while (days >= days_in_month[month]) { + days -= days_in_month[month]; + month++; + } + month++; + day = days + 1; + + if (remaining_seconds) { + hour = remaining_seconds / seconds_per_hour; + minute = (remaining_seconds % seconds_per_hour) / seconds_per_minute; + second = remaining_seconds % seconds_per_minute; + } + + tm->tm_year = year; + tm->tm_mon = month; + tm->tm_mday = day; + tm->tm_hour = hour; + tm->tm_min = minute; + tm->tm_sec = second; + + return 0; +} + +bool ras_core_gpu_in_reset(struct ras_core_context *ras_core) +{ + uint32_t status = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->check_gpu_status) + ras_core->sys_fn->check_gpu_status(ras_core, &status); + + return (status & RAS_GPU_STATUS__IN_RESET) ? true : false; +} + +bool ras_core_gpu_is_vf(struct ras_core_context *ras_core) +{ + uint32_t status = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->check_gpu_status) + ras_core->sys_fn->check_gpu_status(ras_core, &status); + + return (status & RAS_GPU_STATUS__IS_VF) ? true : false; +} + +bool ras_core_gpu_is_rma(struct ras_core_context *ras_core) +{ + if (!ras_core) + return false; + + return ras_core->is_rma; +} + +static int ras_core_seqno_fifo_write(struct ras_core_context *ras_core, + enum ras_seqno_fifo fifo_type, uint64_t seqno) +{ + int ret = 0; + struct kfifo *seqno_fifo = NULL; + + if (fifo_type == SEQNO_FIFO_POISON_CREATION) + seqno_fifo = &ras_core->de_seqno_fifo; + else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION) + seqno_fifo = &ras_core->consumption_seqno_fifo; + + if (seqno_fifo) + ret = kfifo_in_spinlocked(seqno_fifo, + &seqno, sizeof(seqno), &ras_core->seqno_lock); + + return ret ? 0 : -EINVAL; +} + +static int ras_core_seqno_fifo_read(struct ras_core_context *ras_core, + enum ras_seqno_fifo fifo_type, uint64_t *seqno, bool pop) +{ + int ret = 0; + struct kfifo *seqno_fifo = NULL; + + if (fifo_type == SEQNO_FIFO_POISON_CREATION) + seqno_fifo = &ras_core->de_seqno_fifo; + else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION) + seqno_fifo = &ras_core->consumption_seqno_fifo; + + if (seqno_fifo) { + if (pop) + ret = kfifo_out_spinlocked(seqno_fifo, + seqno, sizeof(*seqno), &ras_core->seqno_lock); + else + ret = kfifo_out_peek(seqno_fifo, seqno, sizeof(*seqno)); + } + + return ret ? 0 : -EINVAL; +} + +uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type type) +{ + uint64_t seqno = 0; + + if (ras_core->sys_fn && + ras_core->sys_fn->gen_seqno) + ras_core->sys_fn->gen_seqno(ras_core, type, &seqno); + + return seqno; +} + +int ras_core_put_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, uint64_t seqno) +{ + int ret = 0; + + if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX) + return -EINVAL; + + if (seqno_type == RAS_SEQNO_TYPE_DE) + ret = ras_core_seqno_fifo_write(ras_core, + SEQNO_FIFO_POISON_CREATION, seqno); + else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION) + ret = ras_core_seqno_fifo_write(ras_core, + SEQNO_FIFO_POISON_CONSUMPTION, seqno); + else + ret = -EINVAL; + + return ret; +} + +uint64_t ras_core_get_seqno(struct ras_core_context *ras_core, + enum ras_seqno_type seqno_type, bool pop) +{ + uint64_t seq_no; + int ret = -ENODATA; + + if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX) + return 0; + + if (seqno_type == RAS_SEQNO_TYPE_DE) + ret = ras_core_seqno_fifo_read(ras_core, + SEQNO_FIFO_POISON_CREATION, &seq_no, pop); + else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION) + ret = ras_core_seqno_fifo_read(ras_core, + SEQNO_FIFO_POISON_CONSUMPTION, &seq_no, pop); + + if (ret) + seq_no = ras_core_gen_seqno(ras_core, seqno_type); + + return seq_no; +} + +static int ras_core_eeprom_recovery(struct ras_core_context *ras_core) +{ + int count; + int ret; + + count = ras_eeprom_get_record_count(ras_core); + if (!count) + return 0; + + /* Avoid bad page to be loaded again after gpu reset */ + if (ras_umc_get_saved_eeprom_count(ras_core) >= count) + return 0; + + ret = ras_umc_load_bad_pages(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "ras_umc_load_bad_pages failed: %d\n", ret); + return ret; + } + + ras_eeprom_sync_info(ras_core); + + return ret; +} + +struct ras_core_context *ras_core_create(struct ras_core_config *init_config) +{ + struct ras_core_context *ras_core; + struct ras_core_config *config; + + ras_core = kzalloc(sizeof(*ras_core), GFP_KERNEL); + if (!ras_core) + return NULL; + + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) { + kfree(ras_core); + return NULL; + } + + memcpy(config, init_config, sizeof(*config)); + ras_core->config = config; + + return ras_core; +} + +void ras_core_destroy(struct ras_core_context *ras_core) +{ + if (ras_core) + kfree(ras_core->config); + + kfree(ras_core); +} + +int ras_core_sw_init(struct ras_core_context *ras_core) +{ + int ret; + + if (!ras_core->config) { + RAS_DEV_ERR(ras_core->dev, "No ras core config!\n"); + return -EINVAL; + } + + ras_core->sys_fn = ras_core->config->sys_fn; + if (!ras_core->sys_fn) + return -EINVAL; + + ret = kfifo_alloc(&ras_core->de_seqno_fifo, + RAS_SEQNO_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + ret = kfifo_alloc(&ras_core->consumption_seqno_fifo, + RAS_SEQNO_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + spin_lock_init(&ras_core->seqno_lock); + + ret = ras_aca_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_umc_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_cmd_init(ras_core); + if (ret) + return ret; + + ret = ras_log_ring_sw_init(ras_core); + if (ret) + return ret; + + ret = ras_psp_sw_init(ras_core); + if (ret) + return ret; + + return 0; +} + +int ras_core_sw_fini(struct ras_core_context *ras_core) +{ + kfifo_free(&ras_core->de_seqno_fifo); + kfifo_free(&ras_core->consumption_seqno_fifo); + + ras_psp_sw_fini(ras_core); + ras_log_ring_sw_fini(ras_core); + ras_cmd_fini(ras_core); + ras_umc_sw_fini(ras_core); + ras_aca_sw_fini(ras_core); + + return 0; +} + +int ras_core_hw_init(struct ras_core_context *ras_core) +{ + int ret; + + ras_core->ras_eeprom_supported = + ras_core->config->ras_eeprom_supported; + + ras_core->poison_supported = ras_core->config->poison_supported; + + ret = ras_psp_hw_init(ras_core); + if (ret) + return ret; + + ret = ras_aca_hw_init(ras_core); + if (ret) + goto init_err1; + + ret = ras_mp1_hw_init(ras_core); + if (ret) + goto init_err2; + + ret = ras_nbio_hw_init(ras_core); + if (ret) + goto init_err3; + + ret = ras_umc_hw_init(ras_core); + if (ret) + goto init_err4; + + ret = ras_gfx_hw_init(ras_core); + if (ret) + goto init_err5; + + ret = ras_eeprom_hw_init(ras_core); + if (ret) + goto init_err6; + + ret = ras_core_eeprom_recovery(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, + "Failed to recovery ras core, ret:%d\n", ret); + goto init_err6; + } + + ret = ras_eeprom_check_storage_status(ras_core); + if (ret) + goto init_err6; + + ret = ras_process_init(ras_core); + if (ret) + goto init_err7; + + ras_core->is_initialized = true; + + return 0; + +init_err7: + ras_eeprom_hw_fini(ras_core); +init_err6: + ras_gfx_hw_fini(ras_core); +init_err5: + ras_umc_hw_fini(ras_core); +init_err4: + ras_nbio_hw_fini(ras_core); +init_err3: + ras_mp1_hw_fini(ras_core); +init_err2: + ras_aca_hw_fini(ras_core); +init_err1: + ras_psp_hw_fini(ras_core); + return ret; +} + +int ras_core_hw_fini(struct ras_core_context *ras_core) +{ + ras_core->is_initialized = false; + + ras_process_fini(ras_core); + ras_eeprom_hw_fini(ras_core); + ras_gfx_hw_fini(ras_core); + ras_nbio_hw_fini(ras_core); + ras_umc_hw_fini(ras_core); + ras_mp1_hw_fini(ras_core); + ras_aca_hw_fini(ras_core); + ras_psp_hw_fini(ras_core); + + return 0; +} + +bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data) +{ + return ras_nbio_handle_irq_error(ras_core, data); +} + +int ras_core_handle_fatal_error(struct ras_core_context *ras_core) +{ + int ret = 0; + + ras_aca_mark_fatal_flag(ras_core); + + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__FATAL_ERROR_DETECTED, NULL); + + return ret; +} + +uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core) +{ + if (ras_core->ras_nbio.ip_func && + ras_core->ras_nbio.ip_func->get_memory_partition_mode) + return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to get gpu memory nps mode!\n"); + return 0; +} + +int ras_core_update_ecc_info(struct ras_core_context *ras_core) +{ + int ret; + + ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__CE, NULL); + if (!ret) + ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__UE, NULL); + + return ret; +} + +int ras_core_query_block_ecc_data(struct ras_core_context *ras_core, + enum ras_block_id block, struct ras_ecc_count *ecc_count) +{ + int ret; + + if (!ecc_count || (block >= RAS_BLOCK_ID__LAST) || !ras_core) + return -EINVAL; + + ret = ras_aca_get_block_ecc_count(ras_core, block, ecc_count); + if (!ret) + ras_aca_clear_block_new_ecc_count(ras_core, block); + + return ret; +} + +int ras_core_set_status(struct ras_core_context *ras_core, bool enable) +{ + ras_core->ras_core_enabled = enable; + + return 0; +} + +bool ras_core_is_enabled(struct ras_core_context *ras_core) +{ + return ras_core->ras_core_enabled; +} + +uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->get_utc_second_timestamp) + return ras_core->sys_fn->get_utc_second_timestamp(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to get system timestamp!\n"); + return 0; +} + +int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa) +{ + if (!ras_core || !soc_pa || !bank_addr) + return -EINVAL; + + return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, bank_addr, bank_to_pa); +} + +bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->detect_ras_interrupt) + return ras_core->sys_fn->detect_ras_interrupt(ras_core); + + RAS_DEV_ERR(ras_core->dev, "Failed to detect ras interrupt!\n"); + return false; +} + +int ras_core_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem) + return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem); + + RAS_DEV_ERR(ras_core->dev, "Not config get gpu memory API!\n"); + return -EACCES; +} + +int ras_core_put_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem) +{ + if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem) + return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem); + + RAS_DEV_ERR(ras_core->dev, "Not config put gpu memory API!!\n"); + return -EACCES; +} + +bool ras_core_is_ready(struct ras_core_context *ras_core) +{ + return ras_core ? ras_core->is_initialized : false; +} + +bool ras_core_check_safety_watermark(struct ras_core_context *ras_core) +{ + return ras_eeprom_check_safety_watermark(ras_core); +} + +int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + return ras_core->sys_fn->gpu_reset_lock(ras_core, true, true); + + return 1; +} + +void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + ras_core->sys_fn->gpu_reset_lock(ras_core, true, false); +} + +void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core) +{ + if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock) + ras_core->sys_fn->gpu_reset_lock(ras_core, false, false); +} + +int ras_core_event_notify(struct ras_core_context *ras_core, + enum ras_notify_event event_id, void *data) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->ras_notifier) + return ras_core->sys_fn->ras_notifier(ras_core, event_id, data); + + return -RAS_CORE_NOT_SUPPORTED; +} + +int ras_core_get_device_system_info(struct ras_core_context *ras_core, + struct device_system_info *dev_info) +{ + if (ras_core && ras_core->sys_fn && + ras_core->sys_fn->get_device_system_info) + return ras_core->sys_fn->get_device_system_info(ras_core, dev_info); + + return -RAS_CORE_NOT_SUPPORTED; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.c b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c new file mode 100644 index 000000000000..2343991adccf --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" +#include "ras_log_ring.h" +#include "ras_cper.h" + +static const struct ras_cper_guid MCE = CPER_NOTIFY__MCE; +static const struct ras_cper_guid CMC = CPER_NOTIFY__CMC; +static const struct ras_cper_guid BOOT = BOOT__TYPE; + +static const struct ras_cper_guid CRASHDUMP = GPU__CRASHDUMP; +static const struct ras_cper_guid RUNTIME = GPU__NONSTANDARD_ERROR; + +static void cper_get_timestamp(struct ras_core_context *ras_core, + struct ras_cper_timestamp *timestamp, uint64_t utc_second_timestamp) +{ + struct ras_time tm = {0}; + + ras_core_convert_timestamp_to_time(ras_core, utc_second_timestamp, &tm); + timestamp->seconds = tm.tm_sec; + timestamp->minutes = tm.tm_min; + timestamp->hours = tm.tm_hour; + timestamp->flag = 0; + timestamp->day = tm.tm_mday; + timestamp->month = tm.tm_mon; + timestamp->year = tm.tm_year % 100; + timestamp->century = tm.tm_year / 100; +} + +static void fill_section_hdr(struct ras_core_context *ras_core, + struct cper_section_hdr *hdr, enum ras_cper_type type, + enum ras_cper_severity sev, struct ras_log_info *trace) +{ + struct device_system_info dev_info = {0}; + char record_id[16]; + + hdr->signature[0] = 'C'; + hdr->signature[1] = 'P'; + hdr->signature[2] = 'E'; + hdr->signature[3] = 'R'; + hdr->revision = CPER_HDR__REV_1; + hdr->signature_end = 0xFFFFFFFF; + hdr->error_severity = sev; + + hdr->valid_bits.platform_id = 1; + hdr->valid_bits.partition_id = 1; + hdr->valid_bits.timestamp = 1; + + ras_core_get_device_system_info(ras_core, &dev_info); + + cper_get_timestamp(ras_core, &hdr->timestamp, trace->timestamp); + + snprintf(record_id, 9, "%d:%llX", dev_info.socket_id, + RAS_LOG_SEQNO_TO_BATCH_IDX(trace->seqno)); + memcpy(hdr->record_id, record_id, 8); + + snprintf(hdr->platform_id, 16, "0x%04X:0x%04X", + dev_info.vendor_id, dev_info.device_id); + /* pmfw version should be part of creator_id according to CPER spec */ + snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID__AMDGPU); + + switch (type) { + case RAS_CPER_TYPE_BOOT: + hdr->notify_type = BOOT; + break; + case RAS_CPER_TYPE_FATAL: + case RAS_CPER_TYPE_RMA: + hdr->notify_type = MCE; + break; + case RAS_CPER_TYPE_RUNTIME: + if (sev == RAS_CPER_SEV_NON_FATAL_CE) + hdr->notify_type = CMC; + else + hdr->notify_type = MCE; + break; + default: + RAS_DEV_ERR(ras_core->dev, "Unknown CPER Type\n"); + break; + } +} + +static int fill_section_descriptor(struct ras_core_context *ras_core, + struct cper_section_descriptor *descriptor, + enum ras_cper_severity sev, + struct ras_cper_guid sec_type, + uint32_t section_offset, + uint32_t section_length) +{ + struct device_system_info dev_info = {0}; + + descriptor->revision_minor = CPER_SEC__MINOR_REV_1; + descriptor->revision_major = CPER_SEC__MAJOR_REV_22; + descriptor->sec_offset = section_offset; + descriptor->sec_length = section_length; + descriptor->valid_bits.fru_text = 1; + descriptor->flag_bits.primary = 1; + descriptor->severity = sev; + descriptor->sec_type = sec_type; + + ras_core_get_device_system_info(ras_core, &dev_info); + + snprintf(descriptor->fru_text, 20, "OAM%d", dev_info.socket_id); + + if (sev == RAS_CPER_SEV_RMA) + descriptor->flag_bits.exceed_err_threshold = 1; + + if (sev == RAS_CPER_SEV_NON_FATAL_UE) + descriptor->flag_bits.latent_err = 1; + + return 0; +} + +static int fill_section_fatal(struct ras_core_context *ras_core, + struct cper_section_fatal *fatal, struct ras_log_info *trace) +{ + fatal->data.reg_ctx_type = CPER_CTX_TYPE__CRASH; + fatal->data.reg_arr_size = sizeof(fatal->data.reg); + + fatal->data.reg.status = trace->aca_reg.regs[RAS_CPER_ACA_REG_STATUS]; + fatal->data.reg.addr = trace->aca_reg.regs[RAS_CPER_ACA_REG_ADDR]; + fatal->data.reg.ipid = trace->aca_reg.regs[RAS_CPER_ACA_REG_IPID]; + fatal->data.reg.synd = trace->aca_reg.regs[RAS_CPER_ACA_REG_SYND]; + + return 0; +} + +static int fill_section_runtime(struct ras_core_context *ras_core, + struct cper_section_runtime *runtime, struct ras_log_info *trace) +{ + runtime->hdr.valid_bits.err_info_cnt = 1; + runtime->hdr.valid_bits.err_context_cnt = 1; + + runtime->descriptor.error_type = RUNTIME; + runtime->descriptor.ms_chk_bits.err_type_valid = 1; + + runtime->reg.reg_ctx_type = CPER_CTX_TYPE__CRASH; + runtime->reg.reg_arr_size = sizeof(runtime->reg.reg_dump); + + runtime->reg.reg_dump[RAS_CPER_ACA_REG_CTL] = trace->aca_reg.regs[ACA_REG_IDX__CTL]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_STATUS] = trace->aca_reg.regs[ACA_REG_IDX__STATUS]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_ADDR] = trace->aca_reg.regs[ACA_REG_IDX__ADDR]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_MISC0] = trace->aca_reg.regs[ACA_REG_IDX__MISC0]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_CONFIG] = trace->aca_reg.regs[ACA_REG_IDX__CONFG]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_IPID] = trace->aca_reg.regs[ACA_REG_IDX__IPID]; + runtime->reg.reg_dump[RAS_CPER_ACA_REG_SYND] = trace->aca_reg.regs[ACA_REG_IDX__SYND]; + + return 0; +} + +static int cper_generate_runtime_record(struct ras_core_context *ras_core, + struct cper_section_hdr *hdr, struct ras_log_info **trace_arr, uint32_t arr_num, + enum ras_cper_severity sev) +{ + struct cper_section_descriptor *descriptor; + struct cper_section_runtime *runtime; + int i; + + fill_section_hdr(ras_core, hdr, RAS_CPER_TYPE_RUNTIME, sev, trace_arr[0]); + hdr->record_length = RAS_HDR_LEN + ((RAS_SEC_DESC_LEN + RAS_NONSTD_SEC_LEN) * arr_num); + hdr->sec_cnt = arr_num; + for (i = 0; i < arr_num; i++) { + descriptor = (struct cper_section_descriptor *)((uint8_t *)hdr + + RAS_SEC_DESC_OFFSET(i)); + runtime = (struct cper_section_runtime *)((uint8_t *)hdr + + RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i)); + + fill_section_descriptor(ras_core, descriptor, sev, RUNTIME, + RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i), + sizeof(struct cper_section_runtime)); + fill_section_runtime(ras_core, runtime, trace_arr[i]); + } + + return 0; +} + +static int cper_generate_fatal_record(struct ras_core_context *ras_core, + uint8_t *buffer, struct ras_log_info **trace_arr, uint32_t arr_num) +{ + struct ras_cper_fatal_record record = {0}; + int i = 0; + + for (i = 0; i < arr_num; i++) { + fill_section_hdr(ras_core, &record.hdr, RAS_CPER_TYPE_FATAL, + RAS_CPER_SEV_FATAL_UE, trace_arr[i]); + record.hdr.record_length = RAS_HDR_LEN + RAS_SEC_DESC_LEN + RAS_FATAL_SEC_LEN; + record.hdr.sec_cnt = 1; + + fill_section_descriptor(ras_core, &record.descriptor, RAS_CPER_SEV_FATAL_UE, + CRASHDUMP, offsetof(struct ras_cper_fatal_record, fatal), + sizeof(struct cper_section_fatal)); + + fill_section_fatal(ras_core, &record.fatal, trace_arr[i]); + + memcpy(buffer + (i * record.hdr.record_length), + &record, record.hdr.record_length); + } + + return 0; +} + +static int cper_get_record_size(enum ras_cper_type type, uint16_t section_count) +{ + int size = 0; + + size += RAS_HDR_LEN; + size += (RAS_SEC_DESC_LEN * section_count); + + switch (type) { + case RAS_CPER_TYPE_RUNTIME: + case RAS_CPER_TYPE_RMA: + size += (RAS_NONSTD_SEC_LEN * section_count); + break; + case RAS_CPER_TYPE_FATAL: + size += (RAS_FATAL_SEC_LEN * section_count); + size += (RAS_HDR_LEN * (section_count - 1)); + break; + case RAS_CPER_TYPE_BOOT: + size += (RAS_BOOT_SEC_LEN * section_count); + break; + default: + /* should never reach here */ + break; + } + + return size; +} + +static enum ras_cper_type cper_ras_log_event_to_cper_type(enum ras_log_event event) +{ + switch (event) { + case RAS_LOG_EVENT_UE: + return RAS_CPER_TYPE_FATAL; + case RAS_LOG_EVENT_DE: + case RAS_LOG_EVENT_CE: + case RAS_LOG_EVENT_POISON_CREATION: + case RAS_LOG_EVENT_POISON_CONSUMPTION: + return RAS_CPER_TYPE_RUNTIME; + case RAS_LOG_EVENT_RMA: + return RAS_CPER_TYPE_RMA; + default: + /* should never reach here */ + return RAS_CPER_TYPE_RUNTIME; + } +} + +int ras_cper_generate_cper(struct ras_core_context *ras_core, + struct ras_log_info **trace_list, uint32_t count, + uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len) +{ + uint8_t *buffer = buf; + uint64_t buf_size = buf_len; + int record_size, saved_size = 0; + struct cper_section_hdr *hdr; + + /* All the batch traces share the same event */ + record_size = cper_get_record_size( + cper_ras_log_event_to_cper_type(trace_list[0]->event), count); + + if ((record_size + saved_size) > buf_size) + return -ENOMEM; + + hdr = (struct cper_section_hdr *)(buffer + saved_size); + + switch (trace_list[0]->event) { + case RAS_LOG_EVENT_RMA: + cper_generate_runtime_record(ras_core, hdr, trace_list, count, RAS_CPER_SEV_RMA); + break; + case RAS_LOG_EVENT_DE: + cper_generate_runtime_record(ras_core, + hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_UE); + break; + case RAS_LOG_EVENT_CE: + cper_generate_runtime_record(ras_core, + hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_CE); + break; + case RAS_LOG_EVENT_UE: + cper_generate_fatal_record(ras_core, buffer + saved_size, trace_list, count); + break; + default: + RAS_DEV_WARN(ras_core->dev, "Unprocessed trace event: %d\n", trace_list[0]->event); + break; + } + + saved_size += record_size; + + *real_data_len = saved_size; + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.h b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h new file mode 100644 index 000000000000..076c1883c1ce --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_CPER_H__ +#define __RAS_CPER_H__ + +#define CPER_UUID_MAX_SIZE 16 +struct ras_cper_guid { + uint8_t b[CPER_UUID_MAX_SIZE]; +}; + +#define CPER_GUID__INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ + ((struct ras_cper_guid) \ + {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ + (b) & 0xff, ((b) >> 8) & 0xff, \ + (c) & 0xff, ((c) >> 8) & 0xff, \ + (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) + +#define CPER_HDR__REV_1 (0x100) +#define CPER_SEC__MINOR_REV_1 (0x01) +#define CPER_SEC__MAJOR_REV_22 (0x22) +#define CPER_OAM_MAX_COUNT (8) + +#define CPER_CTX_TYPE__CRASH (1) +#define CPER_CTX_TYPE__BOOT (9) + +#define CPER_CREATOR_ID__AMDGPU "amdgpu" + +#define CPER_NOTIFY__MCE \ + CPER_GUID__INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \ + 0xE1, 0x49, 0x13, 0xBB) +#define CPER_NOTIFY__CMC \ + CPER_GUID__INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ + 0xEB, 0xD4, 0xF8, 0x90) +#define BOOT__TYPE \ + CPER_GUID__INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \ + 0xD4, 0x64, 0xB3, 0x8F) + +#define GPU__CRASHDUMP \ + CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \ + 0x72, 0x5F, 0xD6, 0xAE) +#define GPU__NONSTANDARD_ERROR \ + CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \ + 0x17, 0x80, 0x55, 0x1D) +#define PROC_ERR__SECTION_TYPE \ + CPER_GUID__INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \ + 0x24, 0x2B, 0x6E, 0x1D) + +enum ras_cper_type { + RAS_CPER_TYPE_RUNTIME, + RAS_CPER_TYPE_FATAL, + RAS_CPER_TYPE_BOOT, + RAS_CPER_TYPE_RMA, +}; + +enum ras_cper_severity { + RAS_CPER_SEV_NON_FATAL_UE = 0, + RAS_CPER_SEV_FATAL_UE = 1, + RAS_CPER_SEV_NON_FATAL_CE = 2, + RAS_CPER_SEV_RMA = 3, + + RAS_CPER_SEV_UNUSED = 10, +}; + +enum ras_cper_aca_reg { + RAS_CPER_ACA_REG_CTL = 0, + RAS_CPER_ACA_REG_STATUS = 1, + RAS_CPER_ACA_REG_ADDR = 2, + RAS_CPER_ACA_REG_MISC0 = 3, + RAS_CPER_ACA_REG_CONFIG = 4, + RAS_CPER_ACA_REG_IPID = 5, + RAS_CPER_ACA_REG_SYND = 6, + RAS_CPER_ACA_REG_DESTAT = 8, + RAS_CPER_ACA_REG_DEADDR = 9, + RAS_CPER_ACA_REG_MASK = 10, + + RAS_CPER_ACA_REG_COUNT = 16, +}; + +#pragma pack(push, 1) + +struct ras_cper_timestamp { + uint8_t seconds; + uint8_t minutes; + uint8_t hours; + uint8_t flag; + uint8_t day; + uint8_t month; + uint8_t year; + uint8_t century; +}; + +struct cper_section_hdr { + char signature[4]; /* "CPER" */ + uint16_t revision; + uint32_t signature_end; /* 0xFFFFFFFF */ + uint16_t sec_cnt; + enum ras_cper_severity error_severity; + union { + struct { + uint32_t platform_id : 1; + uint32_t timestamp : 1; + uint32_t partition_id : 1; + uint32_t reserved : 29; + } valid_bits; + uint32_t valid_mask; + }; + uint32_t record_length; /* Total size of CPER Entry */ + struct ras_cper_timestamp timestamp; + char platform_id[16]; + struct ras_cper_guid partition_id; /* Reserved */ + char creator_id[16]; + struct ras_cper_guid notify_type; /* CMC, MCE */ + char record_id[8]; /* Unique CPER Entry ID */ + uint32_t flags; /* Reserved */ + uint64_t persistence_info; /* Reserved */ + uint8_t reserved[12]; /* Reserved */ +}; + +struct cper_section_descriptor { + uint32_t sec_offset; /* Offset from the start of CPER entry */ + uint32_t sec_length; + uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */ + uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */ + union { + struct { + uint8_t fru_id : 1; + uint8_t fru_text : 1; + uint8_t reserved : 6; + } valid_bits; + uint8_t valid_mask; + }; + uint8_t reserved; + union { + struct { + uint32_t primary : 1; + uint32_t reserved1 : 2; + uint32_t exceed_err_threshold : 1; + uint32_t latent_err : 1; + uint32_t reserved2 : 27; + } flag_bits; + uint32_t flag_mask; + }; + struct ras_cper_guid sec_type; + char fru_id[16]; + enum ras_cper_severity severity; + char fru_text[20]; +}; + +struct runtime_hdr { + union { + struct { + uint64_t apic_id : 1; + uint64_t fw_id : 1; + uint64_t err_info_cnt : 6; + uint64_t err_context_cnt : 6; + } valid_bits; + uint64_t valid_mask; + }; + uint64_t apic_id; + char fw_id[48]; +}; + +struct runtime_descriptor { + struct ras_cper_guid error_type; + union { + struct { + uint64_t ms_chk : 1; + uint64_t target_addr_id : 1; + uint64_t req_id : 1; + uint64_t resp_id : 1; + uint64_t instr_ptr : 1; + uint64_t reserved : 59; + } valid_bits; + uint64_t valid_mask; + }; + union { + struct { + uint64_t err_type_valid : 1; + uint64_t pcc_valid : 1; + uint64_t uncorr_valid : 1; + uint64_t precise_ip_valid : 1; + uint64_t restartable_ip_valid : 1; + uint64_t overflow_valid : 1; + uint64_t reserved1 : 10; + uint64_t err_type : 2; + uint64_t pcc : 1; + uint64_t uncorr : 1; + uint64_t precised_ip : 1; + uint64_t restartable_ip : 1; + uint64_t overflow : 1; + uint64_t reserved2 : 41; + } ms_chk_bits; + uint64_t ms_chk_mask; + }; + uint64_t target_addr_id; + uint64_t req_id; + uint64_t resp_id; + uint64_t instr_ptr; +}; + +struct runtime_error_reg { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t msr_addr; + uint64_t mm_reg_addr; + uint64_t reg_dump[RAS_CPER_ACA_REG_COUNT]; +}; + +struct cper_section_runtime { + struct runtime_hdr hdr; + struct runtime_descriptor descriptor; + struct runtime_error_reg reg; +}; + +struct crashdump_hdr { + uint64_t reserved1; + uint64_t reserved2; + char fw_id[48]; + uint64_t reserved3[8]; +}; + +struct fatal_reg_info { + uint64_t status; + uint64_t addr; + uint64_t ipid; + uint64_t synd; +}; + +struct crashdump_fatal { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t reserved1; + uint64_t reserved2; + struct fatal_reg_info reg; +}; + +struct crashdump_boot { + uint16_t reg_ctx_type; + uint16_t reg_arr_size; + uint32_t reserved1; + uint64_t reserved2; + uint64_t msg[CPER_OAM_MAX_COUNT]; +}; + +struct cper_section_fatal { + struct crashdump_hdr hdr; + struct crashdump_fatal data; +}; + +struct cper_section_boot { + struct crashdump_hdr hdr; + struct crashdump_boot data; +}; + +struct ras_cper_fatal_record { + struct cper_section_hdr hdr; + struct cper_section_descriptor descriptor; + struct cper_section_fatal fatal; +}; +#pragma pack(pop) + +#define RAS_HDR_LEN (sizeof(struct cper_section_hdr)) +#define RAS_SEC_DESC_LEN (sizeof(struct cper_sec_desc)) + +#define RAS_BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot)) +#define RAS_FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal)) +#define RAS_NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err)) + +#define RAS_SEC_DESC_OFFSET(idx) (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * idx)) + +#define RAS_BOOT_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_BOOT_SEC_LEN * idx)) +#define RAS_FATAL_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_FATAL_SEC_LEN * idx)) +#define RAS_NONSTD_SEC_OFFSET(count, idx) \ + (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_NONSTD_SEC_LEN * idx)) + +struct ras_core_context; +struct ras_log_info; +int ras_cper_generate_cper(struct ras_core_context *ras_core, + struct ras_log_info **trace_list, uint32_t count, + uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c new file mode 100644 index 000000000000..cd6b057bdaf3 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c @@ -0,0 +1,1339 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras_eeprom.h" +#include "ras.h" + +/* These are memory addresses as would be seen by one or more EEPROM + * chips strung on the I2C bus, usually by manipulating pins 1-3 of a + * set of EEPROM devices. They form a continuous memory space. + * + * The I2C device address includes the device type identifier, 1010b, + * which is a reserved value and indicates that this is an I2C EEPROM + * device. It also includes the top 3 bits of the 19 bit EEPROM memory + * address, namely bits 18, 17, and 16. This makes up the 7 bit + * address sent on the I2C bus with bit 0 being the direction bit, + * which is not represented here, and sent by the hardware directly. + * + * For instance, + * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0. + * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h. + * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h. + * Depending on the size of the I2C EEPROM device(s), bits 18:16 may + * address memory in a device or a device on the I2C bus, depending on + * the status of pins 1-3. + * + * The RAS table lives either at address 0 or address 40000h of EEPROM. + */ +#define EEPROM_I2C_MADDR_0 0x0 +#define EEPROM_I2C_MADDR_4 0x40000 + +#define EEPROM_PAGE_BITS 8 +#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS) +#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1) + +#define EEPROM_OFFSET_SIZE 2 +#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF)) + +/* + * The 2 macros bellow represent the actual size in bytes that + * those entities occupy in the EEPROM memory. + * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_umc_record) which + * uses uint64 to store 6b fields such as retired_page. + */ +#define RAS_TABLE_HEADER_SIZE 20 +#define RAS_TABLE_RECORD_SIZE 24 + +/* Table hdr is 'AMDR' */ +#define RAS_TABLE_HDR_VAL 0x414d4452 + +/* Bad GPU tag ‘BADG’ */ +#define RAS_TABLE_HDR_BAD 0x42414447 + +/* + * EEPROM Table structure v1 + * --------------------------------- + * | | + * | EEPROM TABLE HEADER | + * | ( size 20 Bytes ) | + * | | + * --------------------------------- + * | | + * | BAD PAGE RECORD AREA | + * | | + * --------------------------------- + */ + +/* Assume 2-Mbit size EEPROM and take up the whole space. */ +#define RAS_TBL_SIZE_BYTES (256 * 1024) +#define RAS_TABLE_START 0 +#define RAS_HDR_START RAS_TABLE_START +#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE) +#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \ + / RAS_TABLE_RECORD_SIZE) + +/* + * EEPROM Table structrue v2.1 + * --------------------------------- + * | | + * | EEPROM TABLE HEADER | + * | ( size 20 Bytes ) | + * | | + * --------------------------------- + * | | + * | EEPROM TABLE RAS INFO | + * | (available info size 4 Bytes) | + * | ( reserved size 252 Bytes ) | + * | | + * --------------------------------- + * | | + * | BAD PAGE RECORD AREA | + * | | + * --------------------------------- + */ + +/* EEPROM Table V2_1 */ +#define RAS_TABLE_V2_1_INFO_SIZE 256 +#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE +#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \ + RAS_TABLE_V2_1_INFO_SIZE) +#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \ + RAS_TABLE_V2_1_INFO_SIZE) \ + / RAS_TABLE_RECORD_SIZE) + +/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM + * offset off of RAS_TABLE_START. That is, this is something you can + * add to control->i2c_address, and then tell I2C layer to read + * from/write to there. _N is the so called absolute index, + * because it starts right after the table header. + */ +#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \ + (_N) * RAS_TABLE_RECORD_SIZE) + +#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \ + (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE) + +/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off + * of "fri", return the absolute record index off of the end of + * the table header. + */ +#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \ + (_C)->ras_max_record_count) + +#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ + RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE) + +#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \ + RAS_TABLE_HEADER_SIZE - \ + RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE) + +#define to_ras_core_context(x) (container_of(x, struct ras_core_context, ras_eeprom)) + +static bool __is_ras_eeprom_supported(struct ras_core_context *ras_core) +{ + return ras_core->ras_eeprom_supported; +} + +static bool __get_eeprom_i2c_addr(struct ras_core_context *ras_core, + struct ras_eeprom_control *control) +{ + int ret = -EINVAL; + + if (control->sys_func && + control->sys_func->update_eeprom_i2c_config) + ret = control->sys_func->update_eeprom_i2c_config(ras_core); + else + RAS_DEV_WARN(ras_core->dev, + "No eeprom i2c system config!\n"); + + return !ret ? true : false; +} + +static int __ras_eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int ret; + + if (control->sys_func && control->sys_func->eeprom_i2c_xfer) { + ret = control->sys_func->eeprom_i2c_xfer(ras_core, + eeprom_addr, eeprom_buf, buf_size, read); + + if ((ret > 0) && !read) { + /* According to EEPROM specs the length of the + * self-writing cycle, tWR (tW), is 10 ms. + * + * TODO: Use polling on ACK, aka Acknowledge + * Polling, to minimize waiting for the + * internal write cycle to complete, as it is + * usually smaller than tWR (tW). + */ + msleep(10); + } + + return ret; + } + + RAS_DEV_ERR(ras_core->dev, "Error: No eeprom i2c system xfer function!\n"); + return -EINVAL; +} + +static int __eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr, + u8 *eeprom_buf, u32 buf_size, bool read) +{ + u16 limit; + u16 ps; /* Partial size */ + int res = 0, r; + + if (read) + limit = ras_core->ras_eeprom.max_read_len; + else + limit = ras_core->ras_eeprom.max_write_len; + + if (limit && (limit <= EEPROM_OFFSET_SIZE)) { + RAS_DEV_ERR(ras_core->dev, + "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d", + eeprom_addr, buf_size, + read ? "read" : "write", EEPROM_OFFSET_SIZE); + return -EINVAL; + } + + ras_core_down_gpu_reset_lock(ras_core); + + if (limit == 0) { + res = __ras_eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, buf_size, read); + } else { + /* The "limit" includes all data bytes sent/received, + * which would include the EEPROM_OFFSET_SIZE bytes. + * Account for them here. + */ + limit -= EEPROM_OFFSET_SIZE; + for ( ; buf_size > 0; + buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) { + ps = (buf_size < limit) ? buf_size : limit; + + r = __ras_eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, ps, read); + if (r < 0) + break; + + res += r; + } + } + + ras_core_up_gpu_reset_lock(ras_core); + + return res; +} + +static int __eeprom_read(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 bytes) +{ + return __eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, bytes, true); +} + +static int __eeprom_write(struct ras_core_context *ras_core, + u32 eeprom_addr, u8 *eeprom_buf, u32 bytes) +{ + return __eeprom_xfer(ras_core, eeprom_addr, + eeprom_buf, bytes, false); +} + +static void +__encode_table_header_to_buf(struct ras_eeprom_table_header *hdr, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + + pp[0] = cpu_to_le32(hdr->header); + pp[1] = cpu_to_le32(hdr->version); + pp[2] = cpu_to_le32(hdr->first_rec_offset); + pp[3] = cpu_to_le32(hdr->tbl_size); + pp[4] = cpu_to_le32(hdr->checksum); +} + +static void +__decode_table_header_from_buf(struct ras_eeprom_table_header *hdr, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + + hdr->header = le32_to_cpu(pp[0]); + hdr->version = le32_to_cpu(pp[1]); + hdr->first_rec_offset = le32_to_cpu(pp[2]); + hdr->tbl_size = le32_to_cpu(pp[3]); + hdr->checksum = le32_to_cpu(pp[4]); +} + +static int __write_table_header(struct ras_eeprom_control *control) +{ + u8 buf[RAS_TABLE_HEADER_SIZE]; + struct ras_core_context *ras_core = to_ras_core_context(control); + int res; + + memset(buf, 0, sizeof(buf)); + __encode_table_header_to_buf(&control->tbl_hdr, buf); + + /* i2c may be unstable in gpu reset */ + res = __eeprom_write(ras_core, + control->i2c_address + + control->ras_header_offset, + buf, RAS_TABLE_HEADER_SIZE); + + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Failed to write EEPROM table header:%d\n", res); + } else if (res < RAS_TABLE_HEADER_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Short write:%d out of %d\n", res, RAS_TABLE_HEADER_SIZE); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +static void +__encode_table_ras_info_to_buf(struct ras_eeprom_table_ras_info *rai, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + u32 tmp; + + tmp = ((uint32_t)(rai->rma_status) & 0xFF) | + (((uint32_t)(rai->health_percent) << 8) & 0xFF00) | + (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000); + pp[0] = cpu_to_le32(tmp); +} + +static void +__decode_table_ras_info_from_buf(struct ras_eeprom_table_ras_info *rai, + unsigned char *buf) +{ + u32 *pp = (uint32_t *)buf; + u32 tmp; + + tmp = le32_to_cpu(pp[0]); + rai->rma_status = tmp & 0xFF; + rai->health_percent = (tmp >> 8) & 0xFF; + rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF; +} + +static int __write_table_ras_info(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u8 *buf; + int res; + + buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Failed to alloc buf to write table ras info\n"); + return -ENOMEM; + } + + __encode_table_ras_info_to_buf(&control->tbl_rai, buf); + + /* i2c may be unstable in gpu reset */ + res = __eeprom_write(ras_core, + control->i2c_address + + control->ras_info_offset, + buf, RAS_TABLE_V2_1_INFO_SIZE); + + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Failed to write EEPROM table ras info:%d\n", res); + } else if (res < RAS_TABLE_V2_1_INFO_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Short write:%d out of %d\n", res, RAS_TABLE_V2_1_INFO_SIZE); + res = -EIO; + } else { + res = 0; + } + + kfree(buf); + + return res; +} + +static u8 __calc_hdr_byte_sum(const struct ras_eeprom_control *control) +{ + int ii; + u8 *pp, csum; + u32 sz; + + /* Header checksum, skip checksum field in the calculation */ + sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); + pp = (u8 *) &control->tbl_hdr; + csum = 0; + for (ii = 0; ii < sz; ii++, pp++) + csum += *pp; + + return csum; +} + +static u8 __calc_ras_info_byte_sum(const struct ras_eeprom_control *control) +{ + int ii; + u8 *pp, csum; + u32 sz; + + sz = sizeof(control->tbl_rai); + pp = (u8 *) &control->tbl_rai; + csum = 0; + for (ii = 0; ii < sz; ii++, pp++) + csum += *pp; + + return csum; +} + +static int ras_eeprom_correct_header_tag( + struct ras_eeprom_control *control, + uint32_t header) +{ + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + u8 *hh; + int res; + u8 csum; + + csum = -hdr->checksum; + + hh = (void *) &hdr->header; + csum -= (hh[0] + hh[1] + hh[2] + hh[3]); + hh = (void *) &header; + csum += hh[0] + hh[1] + hh[2] + hh[3]; + csum = -csum; + mutex_lock(&control->ras_tbl_mutex); + hdr->header = header; + hdr->checksum = csum; + res = __write_table_header(control); + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +static void ras_set_eeprom_table_version(struct ras_eeprom_control *control) +{ + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + + hdr->version = RAS_TABLE_VER_V3; +} + +int ras_eeprom_reset_table(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_header *hdr = &control->tbl_hdr; + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + u8 csum; + int res; + + mutex_lock(&control->ras_tbl_mutex); + + hdr->header = RAS_TABLE_HDR_VAL; + ras_set_eeprom_table_version(control); + + if (hdr->version >= RAS_TABLE_VER_V2_1) { + hdr->first_rec_offset = RAS_RECORD_START_V2_1; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE; + rai->rma_status = RAS_GPU_HEALTH_USABLE; + /** + * GPU health represented as a percentage. + * 0 means worst health, 100 means fully health. + */ + rai->health_percent = 100; + /* ecc_page_threshold = 0 means disable bad page retirement */ + rai->ecc_page_threshold = control->record_threshold_count; + } else { + hdr->first_rec_offset = RAS_RECORD_START; + hdr->tbl_size = RAS_TABLE_HEADER_SIZE; + } + + csum = __calc_hdr_byte_sum(control); + if (hdr->version >= RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); + csum = -csum; + hdr->checksum = csum; + res = __write_table_header(control); + if (!res && hdr->version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); + + control->ras_num_recs = 0; + control->ras_fri = 0; + + control->bad_channel_bitmap = 0; + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + &control->ras_num_recs); + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + &control->bad_channel_bitmap); + control->update_channel_flag = false; + + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +static void +__encode_table_record_to_buf(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + unsigned char *buf) +{ + __le64 tmp = 0; + int i = 0; + + /* Next are all record fields according to EEPROM page spec in LE foramt */ + buf[i++] = record->err_type; + + buf[i++] = record->bank; + + tmp = cpu_to_le64(record->ts); + memcpy(buf + i, &tmp, 8); + i += 8; + + tmp = cpu_to_le64((record->offset & 0xffffffffffff)); + memcpy(buf + i, &tmp, 6); + i += 6; + + buf[i++] = record->mem_channel; + buf[i++] = record->mcumc_id; + + tmp = cpu_to_le64((record->retired_row_pfn & 0xffffffffffff)); + memcpy(buf + i, &tmp, 6); +} + +static void +__decode_table_record_from_buf(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + unsigned char *buf) +{ + __le64 tmp = 0; + int i = 0; + + /* Next are all record fields according to EEPROM page spec in LE foramt */ + record->err_type = buf[i++]; + + record->bank = buf[i++]; + + memcpy(&tmp, buf + i, 8); + record->ts = le64_to_cpu(tmp); + i += 8; + + memcpy(&tmp, buf + i, 6); + record->offset = (le64_to_cpu(tmp) & 0xffffffffffff); + i += 6; + + record->mem_channel = buf[i++]; + record->mcumc_id = buf[i++]; + + memcpy(&tmp, buf + i, 6); + record->retired_row_pfn = (le64_to_cpu(tmp) & 0xffffffffffff); +} + +bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + bool ret = false; + int bad_page_count; + + if (!__is_ras_eeprom_supported(ras_core) || + !control->record_threshold_config) + return false; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) { + if (bad_page_count > control->record_threshold_count) + RAS_DEV_WARN(ras_core->dev, "RAS records:%d exceed threshold:%d", + bad_page_count, control->record_threshold_count); + + if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) || + (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) { + RAS_DEV_WARN(ras_core->dev, + "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n"); + ret = false; + } else { + ras_core->is_rma = true; + RAS_DEV_WARN(ras_core->dev, + "Please consider adjusting the customized threshold.\n"); + ret = true; + } + } + + return ret; +} + +/** + * __ras_eeprom_write -- write indexed from buffer to EEPROM + * @control: pointer to control structure + * @buf: pointer to buffer containing data to write + * @fri: start writing at this index + * @num: number of records to write + * + * The caller must hold the table mutex in @control. + * Return 0 on success, -errno otherwise. + */ +static int __ras_eeprom_write(struct ras_eeprom_control *control, + u8 *buf, const u32 fri, const u32 num) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u32 buf_size; + int res; + + /* i2c may be unstable in gpu reset */ + buf_size = num * RAS_TABLE_RECORD_SIZE; + res = __eeprom_write(ras_core, + control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri), + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Writing %d EEPROM table records error:%d\n", num, res); + } else if (res < buf_size) { + /* Short write, return error.*/ + RAS_DEV_ERR(ras_core->dev, + "Wrote %d records out of %d\n", + (res/RAS_TABLE_RECORD_SIZE), num); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +static int ras_eeprom_append_table(struct ras_eeprom_control *control, + struct eeprom_umc_record *record, + const u32 num) +{ + u32 a, b, i; + u8 *buf, *pp; + int res; + + buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Encode all of them in one go. + */ + pp = buf; + for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) { + __encode_table_record_to_buf(control, &record[i], pp); + + /* update bad channel bitmap */ + if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) && + !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { + control->bad_channel_bitmap |= 1 << record[i].mem_channel; + control->update_channel_flag = true; + } + } + + /* a, first record index to write into. + * b, last record index to write into. + * a = first index to read (fri) + number of records in the table, + * b = a + @num - 1. + * Let N = control->ras_max_num_record_count, then we have, + * case 0: 0 <= a <= b < N, + * just append @num records starting at a; + * case 1: 0 <= a < N <= b, + * append (N - a) records starting at a, and + * append the remainder, b % N + 1, starting at 0. + * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases, + * case 2a: 0 <= a <= b < N + * append num records starting at a; and fix fri if b overwrote it, + * and since a <= b, if b overwrote it then a must've also, + * and if b didn't overwrite it, then a didn't also. + * case 2b: 0 <= b < a < N + * write num records starting at a, which wraps around 0=N + * and overwrite fri unconditionally. Now from case 2a, + * this means that b eclipsed fri to overwrite it and wrap + * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally + * set fri = b + 1 (mod N). + * Now, since fri is updated in every case, except the trivial case 0, + * the number of records present in the table after writing, is, + * num_recs - 1 = b - fri (mod N), and we take the positive value, + * by adding an arbitrary multiple of N before taking the modulo N + * as shown below. + */ + a = control->ras_fri + control->ras_num_recs; + b = a + num - 1; + if (b < control->ras_max_record_count) { + res = __ras_eeprom_write(control, buf, a, num); + } else if (a < control->ras_max_record_count) { + u32 g0, g1; + + g0 = control->ras_max_record_count - a; + g1 = b % control->ras_max_record_count + 1; + res = __ras_eeprom_write(control, buf, a, g0); + if (res) + goto Out; + res = __ras_eeprom_write(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, + 0, g1); + if (res) + goto Out; + if (g1 > control->ras_fri) + control->ras_fri = g1 % control->ras_max_record_count; + } else { + a %= control->ras_max_record_count; + b %= control->ras_max_record_count; + + if (a <= b) { + /* Note that, b - a + 1 = num. */ + res = __ras_eeprom_write(control, buf, a, num); + if (res) + goto Out; + if (b >= control->ras_fri) + control->ras_fri = (b + 1) % control->ras_max_record_count; + } else { + u32 g0, g1; + + /* b < a, which means, we write from + * a to the end of the table, and from + * the start of the table to b. + */ + g0 = control->ras_max_record_count - a; + g1 = b + 1; + res = __ras_eeprom_write(control, buf, a, g0); + if (res) + goto Out; + res = __ras_eeprom_write(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1); + if (res) + goto Out; + control->ras_fri = g1 % control->ras_max_record_count; + } + } + control->ras_num_recs = 1 + + (control->ras_max_record_count + b - control->ras_fri) + % control->ras_max_record_count; +Out: + kfree(buf); + return res; +} + +static int ras_eeprom_update_header(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + int threshold_config = control->record_threshold_config; + u8 *buf, *pp, csum; + u32 buf_size; + int bad_page_count; + int res; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + /* Modify the header if it exceeds. + */ + if (threshold_config != 0 && + bad_page_count > control->record_threshold_count) { + RAS_DEV_WARN(ras_core->dev, + "Saved bad pages %d reaches threshold value %d\n", + bad_page_count, control->record_threshold_count); + control->tbl_hdr.header = RAS_TABLE_HDR_BAD; + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { + control->tbl_rai.rma_status = RAS_GPU_RETIRED__ECC_REACH_THRESHOLD; + control->tbl_rai.health_percent = 0; + } + + if ((threshold_config != WARN_NONSTOP_OVER_THRESHOLD) && + (threshold_config != NONSTOP_OVER_THRESHOLD)) + ras_core->is_rma = true; + + /* ignore the -ENOTSUPP return value */ + ras_core_event_notify(ras_core, RAS_EVENT_ID__DEVICE_RMA, NULL); + } + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + else + control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + control->tbl_hdr.checksum = 0; + + buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "allocating memory for table of size %d bytes failed\n", + control->tbl_hdr.tbl_size); + res = -ENOMEM; + goto Out; + } + + res = __eeprom_read(ras_core, + control->i2c_address + + control->ras_record_offset, + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "EEPROM failed reading records:%d\n", res); + goto Out; + } else if (res < buf_size) { + RAS_DEV_ERR(ras_core->dev, + "EEPROM read %d out of %d bytes\n", res, buf_size); + res = -EIO; + goto Out; + } + + /** + * bad page records have been stored in eeprom, + * now calculate gpu health percent + */ + if (threshold_config != 0 && + control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 && + bad_page_count <= control->record_threshold_count) + control->tbl_rai.health_percent = ((control->record_threshold_count - + bad_page_count) * 100) / control->record_threshold_count; + + /* Recalc the checksum. + */ + csum = 0; + for (pp = buf; pp < buf + buf_size; pp++) + csum += *pp; + + csum += __calc_hdr_byte_sum(control); + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + csum += __calc_ras_info_byte_sum(control); + /* avoid sign extension when assigning to "checksum" */ + csum = -csum; + control->tbl_hdr.checksum = csum; + res = __write_table_header(control); + if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1) + res = __write_table_ras_info(control); +Out: + kfree(buf); + return res; +} + +/** + * ras_core_eeprom_append -- append records to the EEPROM RAS table + * @control: pointer to control structure + * @record: array of records to append + * @num: number of records in @record array + * + * Append @num records to the table, calculate the checksum and write + * the table back to EEPROM. The maximum number of records that + * can be appended is between 1 and control->ras_max_record_count, + * regardless of how many records are already stored in the table. + * + * Return 0 on success or if EEPROM is not supported, -errno on error. + */ +int ras_eeprom_append(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, const u32 num) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int res; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (num == 0) { + RAS_DEV_ERR(ras_core->dev, "will not append 0 records\n"); + return -EINVAL; + } else if ((num + control->ras_num_recs) > control->ras_max_record_count) { + RAS_DEV_ERR(ras_core->dev, + "cannot append %d records than the size of table %d\n", + num, control->ras_max_record_count); + return -EINVAL; + } + + mutex_lock(&control->ras_tbl_mutex); + res = ras_eeprom_append_table(control, record, num); + if (!res) + res = ras_eeprom_update_header(control); + + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +/** + * __ras_eeprom_read -- read indexed from EEPROM into buffer + * @control: pointer to control structure + * @buf: pointer to buffer to read into + * @fri: first record index, start reading at this index, absolute index + * @num: number of records to read + * + * The caller must hold the table mutex in @control. + * Return 0 on success, -errno otherwise. + */ +static int __ras_eeprom_read(struct ras_eeprom_control *control, + u8 *buf, const u32 fri, const u32 num) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + u32 buf_size; + int res; + + /* i2c may be unstable in gpu reset */ + buf_size = num * RAS_TABLE_RECORD_SIZE; + res = __eeprom_read(ras_core, + control->i2c_address + + RAS_INDEX_TO_OFFSET(control, fri), + buf, buf_size); + if (res < 0) { + RAS_DEV_ERR(ras_core->dev, + "Reading %d EEPROM table records error:%d\n", num, res); + } else if (res < buf_size) { + /* Short read, return error. + */ + RAS_DEV_ERR(ras_core->dev, + "Read %d records out of %d\n", + (res/RAS_TABLE_RECORD_SIZE), num); + res = -EIO; + } else { + res = 0; + } + + return res; +} + +int ras_eeprom_read(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, const u32 num) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + int i, res; + u8 *buf, *pp; + u32 g0, g1; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (num == 0) { + RAS_DEV_ERR(ras_core->dev, "will not read 0 records\n"); + return -EINVAL; + } else if (num > control->ras_num_recs) { + RAS_DEV_ERR(ras_core->dev, + "too many records to read:%d available:%d\n", + num, control->ras_num_recs); + return -EINVAL; + } + + buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Determine how many records to read, from the first record + * index, fri, to the end of the table, and from the beginning + * of the table, such that the total number of records is + * @num, and we handle wrap around when fri > 0 and + * fri + num > RAS_MAX_RECORD_COUNT. + * + * First we compute the index of the last element + * which would be fetched from each region, + * g0 is in [fri, fri + num - 1], and + * g1 is in [0, RAS_MAX_RECORD_COUNT - 1]. + * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of + * the last element to fetch, we set g0 to _the number_ + * of elements to fetch, @num, since we know that the last + * indexed to be fetched does not exceed the table. + * + * If, however, g0 >= RAS_MAX_RECORD_COUNT, then + * we set g0 to the number of elements to read + * until the end of the table, and g1 to the number of + * elements to read from the beginning of the table. + */ + g0 = control->ras_fri + num - 1; + g1 = g0 % control->ras_max_record_count; + if (g0 < control->ras_max_record_count) { + g0 = num; + g1 = 0; + } else { + g0 = control->ras_max_record_count - control->ras_fri; + g1 += 1; + } + + mutex_lock(&control->ras_tbl_mutex); + res = __ras_eeprom_read(control, buf, control->ras_fri, g0); + if (res) + goto Out; + if (g1) { + res = __ras_eeprom_read(control, + buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1); + if (res) + goto Out; + } + + res = 0; + + /* Read up everything? Then transform. + */ + pp = buf; + for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) { + __decode_table_record_from_buf(control, &record[i], pp); + + /* update bad channel bitmap */ + if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) && + !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) { + control->bad_channel_bitmap |= 1 << record[i].mem_channel; + control->update_channel_flag = true; + } + } +Out: + kfree(buf); + mutex_unlock(&control->ras_tbl_mutex); + + return res; +} + +uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + + /* get available eeprom table version first before eeprom table init */ + ras_set_eeprom_table_version(control); + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + return RAS_MAX_RECORD_COUNT_V2_1; + else + return RAS_MAX_RECORD_COUNT; +} + +/** + * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum + * @control: pointer to control structure + * + * Check the checksum of the stored in EEPROM RAS table. + * + * Return 0 if the checksum is correct, + * positive if it is not correct, and + * -errno on I/O error. + */ +static int __verify_ras_table_checksum(struct ras_eeprom_control *control) +{ + struct ras_core_context *ras_core = to_ras_core_context(control); + int buf_size, res; + u8 csum, *buf, *pp; + + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) + buf_size = RAS_TABLE_HEADER_SIZE + + RAS_TABLE_V2_1_INFO_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + else + buf_size = RAS_TABLE_HEADER_SIZE + + control->ras_num_recs * RAS_TABLE_RECORD_SIZE; + + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Out of memory checking RAS table checksum.\n"); + return -ENOMEM; + } + + res = __eeprom_read(ras_core, + control->i2c_address + + control->ras_header_offset, + buf, buf_size); + if (res < buf_size) { + RAS_DEV_ERR(ras_core->dev, + "Partial read for checksum, res:%d\n", res); + /* On partial reads, return -EIO. + */ + if (res >= 0) + res = -EIO; + goto Out; + } + + csum = 0; + for (pp = buf; pp < buf + buf_size; pp++) + csum += *pp; +Out: + kfree(buf); + return res < 0 ? res : csum; +} + +static int __read_table_ras_info(struct ras_eeprom_control *control) +{ + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + struct ras_core_context *ras_core = to_ras_core_context(control); + unsigned char *buf; + int res; + + buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); + if (!buf) { + RAS_DEV_ERR(ras_core->dev, + "Failed to alloc buf to read EEPROM table ras info\n"); + return -ENOMEM; + } + + /** + * EEPROM table V2_1 supports ras info, + * read EEPROM table ras info + */ + res = __eeprom_read(ras_core, + control->i2c_address + control->ras_info_offset, + buf, RAS_TABLE_V2_1_INFO_SIZE); + if (res < RAS_TABLE_V2_1_INFO_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Failed to read EEPROM table ras info, res:%d\n", res); + res = res >= 0 ? -EIO : res; + goto Out; + } + + __decode_table_ras_info_from_buf(rai, buf); + +Out: + kfree(buf); + return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res; +} + +static int __check_ras_table_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 }; + struct ras_eeprom_table_header *hdr; + int res; + + hdr = &control->tbl_hdr; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (!__get_eeprom_i2c_addr(ras_core, control)) + return -EINVAL; + + control->ras_header_offset = RAS_HDR_START; + control->ras_info_offset = RAS_TABLE_V2_1_INFO_START; + mutex_init(&control->ras_tbl_mutex); + + /* Read the table header from EEPROM address */ + res = __eeprom_read(ras_core, + control->i2c_address + control->ras_header_offset, + buf, RAS_TABLE_HEADER_SIZE); + if (res < RAS_TABLE_HEADER_SIZE) { + RAS_DEV_ERR(ras_core->dev, + "Failed to read EEPROM table header, res:%d\n", res); + return res >= 0 ? -EIO : res; + } + + __decode_table_header_from_buf(hdr, buf); + + if (hdr->header != RAS_TABLE_HDR_VAL && + hdr->header != RAS_TABLE_HDR_BAD) { + RAS_DEV_INFO(ras_core->dev, "Creating a new EEPROM table"); + return ras_eeprom_reset_table(ras_core); + } + + switch (hdr->version) { + case RAS_TABLE_VER_V2_1: + case RAS_TABLE_VER_V3: + control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr); + control->ras_record_offset = RAS_RECORD_START_V2_1; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1; + break; + case RAS_TABLE_VER_V1: + control->ras_num_recs = RAS_NUM_RECS(hdr); + control->ras_record_offset = RAS_RECORD_START; + control->ras_max_record_count = RAS_MAX_RECORD_COUNT; + break; + default: + RAS_DEV_ERR(ras_core->dev, + "RAS header invalid, unsupported version: %u", + hdr->version); + return -EINVAL; + } + + if (control->ras_num_recs > control->ras_max_record_count) { + RAS_DEV_ERR(ras_core->dev, + "RAS header invalid, records in header: %u max allowed :%u", + control->ras_num_recs, control->ras_max_record_count); + return -EINVAL; + } + + control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset); + + return 0; +} + +int ras_eeprom_check_storage_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_header *hdr; + int bad_page_count; + int res = 0; + + if (!__is_ras_eeprom_supported(ras_core)) + return 0; + + if (!__get_eeprom_i2c_addr(ras_core, control)) + return -EINVAL; + + hdr = &control->tbl_hdr; + + bad_page_count = ras_umc_get_badpage_count(ras_core); + if (hdr->header == RAS_TABLE_HDR_VAL) { + RAS_DEV_INFO(ras_core->dev, + "Found existing EEPROM table with %d records\n", + bad_page_count); + + if (hdr->version >= RAS_TABLE_VER_V2_1) { + res = __read_table_ras_info(control); + if (res) + return res; + } + + res = __verify_ras_table_checksum(control); + if (res) + RAS_DEV_ERR(ras_core->dev, + "RAS table incorrect checksum or error:%d\n", res); + + /* Warn if we are at 90% of the threshold or above + */ + if (10 * bad_page_count >= 9 * control->record_threshold_count) + RAS_DEV_WARN(ras_core->dev, + "RAS records:%u exceeds 90%% of threshold:%d\n", + bad_page_count, + control->record_threshold_count); + + } else if (hdr->header == RAS_TABLE_HDR_BAD && + control->record_threshold_config != 0) { + if (hdr->version >= RAS_TABLE_VER_V2_1) { + res = __read_table_ras_info(control); + if (res) + return res; + } + + res = __verify_ras_table_checksum(control); + if (res) + RAS_DEV_ERR(ras_core->dev, + "RAS Table incorrect checksum or error:%d\n", res); + + if (control->record_threshold_count >= bad_page_count) { + /* This means that, the threshold was increased since + * the last time the system was booted, and now, + * ras->record_threshold_count - control->num_recs > 0, + * so that at least one more record can be saved, + * before the page count threshold is reached. + */ + RAS_DEV_INFO(ras_core->dev, + "records:%d threshold:%d, resetting RAS table header signature", + bad_page_count, + control->record_threshold_count); + res = ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL); + } else { + RAS_DEV_ERR(ras_core->dev, "RAS records:%d exceed threshold:%d", + bad_page_count, control->record_threshold_count); + if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) || + (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) { + RAS_DEV_WARN(ras_core->dev, + "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n"); + res = 0; + } else { + ras_core->is_rma = true; + RAS_DEV_ERR(ras_core->dev, + "User defined threshold is set, runtime service will be halt when threshold is reached\n"); + } + } + } + + return res < 0 ? res : 0; +} + +int ras_eeprom_hw_init(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + struct ras_eeprom_config *eeprom_cfg; + + if (!ras_core) + return -EINVAL; + + ras_core->is_rma = false; + + control = &ras_core->ras_eeprom; + + memset(control, 0, sizeof(*control)); + + eeprom_cfg = &ras_core->config->eeprom_cfg; + control->record_threshold_config = + eeprom_cfg->eeprom_record_threshold_config; + + control->record_threshold_count = ras_eeprom_max_record_count(ras_core); + if (eeprom_cfg->eeprom_record_threshold_count < + control->record_threshold_count) + control->record_threshold_count = + eeprom_cfg->eeprom_record_threshold_count; + + control->sys_func = eeprom_cfg->eeprom_sys_fn; + control->max_read_len = eeprom_cfg->max_i2c_read_len; + control->max_write_len = eeprom_cfg->max_i2c_write_len; + control->i2c_adapter = eeprom_cfg->eeprom_i2c_adapter; + control->i2c_port = eeprom_cfg->eeprom_i2c_port; + control->i2c_address = eeprom_cfg->eeprom_i2c_addr; + + control->update_channel_flag = false; + + return __check_ras_table_status(ras_core); +} + +int ras_eeprom_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + + if (!ras_core) + return -EINVAL; + + control = &ras_core->ras_eeprom; + mutex_destroy(&control->ras_tbl_mutex); + + return 0; +} + +uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core) +{ + if (!ras_core) + return 0; + + return ras_core->ras_eeprom.ras_num_recs; +} + +void ras_eeprom_sync_info(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control; + + if (!ras_core) + return; + + control = &ras_core->ras_eeprom; + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM, + &control->ras_num_recs); + ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP, + &control->bad_channel_bitmap); +} + +enum ras_gpu_health_status + ras_eeprom_check_gpu_status(struct ras_core_context *ras_core) +{ + struct ras_eeprom_control *control = &ras_core->ras_eeprom; + struct ras_eeprom_table_ras_info *rai = &control->tbl_rai; + + if (!__is_ras_eeprom_supported(ras_core) || + !control->record_threshold_config) + return RAS_GPU_HEALTH_NONE; + + if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) + return RAS_GPU_IN_BAD_STATUS; + + return rai->rma_status; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h new file mode 100644 index 000000000000..2abe566c18b6 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_EEPROM_H__ +#define __RAS_EEPROM_H__ +#include "ras_sys.h" + +#define RAS_TABLE_VER_V1 0x00010000 +#define RAS_TABLE_VER_V2_1 0x00021000 +#define RAS_TABLE_VER_V3 0x00030000 + +#define NONSTOP_OVER_THRESHOLD -2 +#define WARN_NONSTOP_OVER_THRESHOLD -1 +#define DISABLE_RETIRE_PAGE 0 + +/* + * Bad address pfn : eeprom_umc_record.retired_row_pfn[39:0], + * nps mode: eeprom_umc_record.retired_row_pfn[47:40] + */ +#define EEPROM_RECORD_UMC_ADDR_MASK 0xFFFFFFFFFFULL +#define EEPROM_RECORD_UMC_NPS_MASK 0xFF0000000000ULL +#define EEPROM_RECORD_UMC_NPS_SHIFT 40 + +#define EEPROM_RECORD_UMC_NPS_MODE(RECORD) \ + (((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_NPS_MASK) >> \ + EEPROM_RECORD_UMC_NPS_SHIFT) + +#define EEPROM_RECORD_UMC_ADDR_PFN(RECORD) \ + ((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_ADDR_MASK) + +#define EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(RECORD, ADDR, NPS) \ +do { \ + uint64_t tmp = (NPS); \ + tmp = ((tmp << EEPROM_RECORD_UMC_NPS_SHIFT) & EEPROM_RECORD_UMC_NPS_MASK); \ + tmp |= (ADDR) & EEPROM_RECORD_UMC_ADDR_MASK; \ + (RECORD)->retired_row_pfn = tmp; \ +} while (0) + +enum ras_gpu_health_status { + RAS_GPU_HEALTH_NONE = 0, + RAS_GPU_HEALTH_USABLE = 1, + RAS_GPU_RETIRED__ECC_REACH_THRESHOLD = 2, + RAS_GPU_IN_BAD_STATUS = 3, +}; + +enum ras_eeprom_err_type { + RAS_EEPROM_ERR_NA, + RAS_EEPROM_ERR_RECOVERABLE, + RAS_EEPROM_ERR_NON_RECOVERABLE, + RAS_EEPROM_ERR_COUNT, +}; + +struct ras_eeprom_table_header { + uint32_t header; + uint32_t version; + uint32_t first_rec_offset; + uint32_t tbl_size; + uint32_t checksum; +} __packed; + +struct ras_eeprom_table_ras_info { + u8 rma_status; + u8 health_percent; + u16 ecc_page_threshold; + u32 padding[64 - 1]; +} __packed; + +struct ras_eeprom_control { + struct ras_eeprom_table_header tbl_hdr; + struct ras_eeprom_table_ras_info tbl_rai; + + /* record threshold */ + int record_threshold_config; + uint32_t record_threshold_count; + bool update_channel_flag; + + const struct ras_eeprom_sys_func *sys_func; + void *i2c_adapter; + u32 i2c_port; + u16 max_read_len; + u16 max_write_len; + + /* Base I2C EEPPROM 19-bit memory address, + * where the table is located. For more information, + * see top of amdgpu_eeprom.c. + */ + u32 i2c_address; + + /* The byte offset off of @i2c_address + * where the table header is found, + * and where the records start--always + * right after the header. + */ + u32 ras_header_offset; + u32 ras_info_offset; + u32 ras_record_offset; + + /* Number of records in the table. + */ + u32 ras_num_recs; + + /* First record index to read, 0-based. + * Range is [0, num_recs-1]. This is + * an absolute index, starting right after + * the table header. + */ + u32 ras_fri; + + /* Maximum possible number of records + * we could store, i.e. the maximum capacity + * of the table. + */ + u32 ras_max_record_count; + + /* Protect table access via this mutex. + */ + struct mutex ras_tbl_mutex; + + /* Record channel info which occurred bad pages + */ + u32 bad_channel_bitmap; +}; + +/* + * Represents single table record. Packed to be easily serialized into byte + * stream. + */ +struct eeprom_umc_record { + + union { + uint64_t address; + uint64_t offset; + }; + + uint64_t retired_row_pfn; + uint64_t ts; + + enum ras_eeprom_err_type err_type; + + union { + unsigned char bank; + unsigned char cu; + }; + + unsigned char mem_channel; + unsigned char mcumc_id; + + /* The following variables will not be saved to eeprom. + */ + uint64_t cur_nps_retired_row_pfn; + uint32_t cur_nps_bank; + uint32_t cur_nps; +}; + +struct ras_core_context; +int ras_eeprom_hw_init(struct ras_core_context *ras_core); +int ras_eeprom_hw_fini(struct ras_core_context *ras_core); + +int ras_eeprom_reset_table(struct ras_core_context *ras_core); + +bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core); + +int ras_eeprom_read(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, const u32 num); + +int ras_eeprom_append(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, const u32 num); + +uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core); +uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core); +void ras_eeprom_sync_info(struct ras_core_context *ras_core); + +int ras_eeprom_check_storage_status(struct ras_core_context *ras_core); +enum ras_gpu_health_status + ras_eeprom_check_gpu_status(struct ras_core_context *ras_core); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c new file mode 100644 index 000000000000..f5ce28777705 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_gfx_v9_0.h" +#include "ras_gfx.h" +#include "ras_core_status.h" + +static const struct ras_gfx_ip_func *ras_gfx_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(9, 4, 3): + case IP_VERSION(9, 4, 4): + case IP_VERSION(9, 5, 0): + return &gfx_ras_func_v9_0; + default: + RAS_DEV_ERR(ras_core->dev, + "GFX ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock) +{ + struct ras_gfx *gfx = &ras_core->ras_gfx; + + return gfx->ip_func->get_ta_subblock(ras_core, + error_type, subblock, ta_subblock); +} + +int ras_gfx_hw_init(struct ras_core_context *ras_core) +{ + struct ras_gfx *gfx = &ras_core->ras_gfx; + + gfx->gfx_ip_version = ras_core->config->gfx_ip_version; + + gfx->ip_func = ras_gfx_get_ip_funcs(ras_core, gfx->gfx_ip_version); + + return gfx->ip_func ? RAS_CORE_OK : -EINVAL; +} + +int ras_gfx_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h new file mode 100644 index 000000000000..8a42d69fb0ad --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_GFX_H__ +#define __RAS_GFX_H__ + +struct ras_gfx_ip_func { + int (*get_ta_subblock)(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock); +}; + +struct ras_gfx { + uint32_t gfx_ip_version; + const struct ras_gfx_ip_func *ip_func; +}; + +int ras_gfx_hw_init(struct ras_core_context *ras_core); +int ras_gfx_hw_fini(struct ras_core_context *ras_core); + +int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock); + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c new file mode 100644 index 000000000000..6213d3f125be --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_gfx_v9_0.h" +#include "ras_core_status.h" + +enum ta_gfx_v9_subblock { + /*CPC*/ + TA_GFX_V9__GFX_CPC_INDEX_START = 0, + TA_GFX_V9__GFX_CPC_SCRATCH = TA_GFX_V9__GFX_CPC_INDEX_START, + TA_GFX_V9__GFX_CPC_UCODE, + TA_GFX_V9__GFX_DC_STATE_ME1, + TA_GFX_V9__GFX_DC_CSINVOC_ME1, + TA_GFX_V9__GFX_DC_RESTORE_ME1, + TA_GFX_V9__GFX_DC_STATE_ME2, + TA_GFX_V9__GFX_DC_CSINVOC_ME2, + TA_GFX_V9__GFX_DC_RESTORE_ME2, + TA_GFX_V9__GFX_CPC_INDEX_END = TA_GFX_V9__GFX_DC_RESTORE_ME2, + /* CPF*/ + TA_GFX_V9__GFX_CPF_INDEX_START, + TA_GFX_V9__GFX_CPF_ROQ_ME2 = TA_GFX_V9__GFX_CPF_INDEX_START, + TA_GFX_V9__GFX_CPF_ROQ_ME1, + TA_GFX_V9__GFX_CPF_TAG, + TA_GFX_V9__GFX_CPF_INDEX_END = TA_GFX_V9__GFX_CPF_TAG, + /* CPG*/ + TA_GFX_V9__GFX_CPG_INDEX_START, + TA_GFX_V9__GFX_CPG_DMA_ROQ = TA_GFX_V9__GFX_CPG_INDEX_START, + TA_GFX_V9__GFX_CPG_DMA_TAG, + TA_GFX_V9__GFX_CPG_TAG, + TA_GFX_V9__GFX_CPG_INDEX_END = TA_GFX_V9__GFX_CPG_TAG, + /* GDS*/ + TA_GFX_V9__GFX_GDS_INDEX_START, + TA_GFX_V9__GFX_GDS_MEM = TA_GFX_V9__GFX_GDS_INDEX_START, + TA_GFX_V9__GFX_GDS_INPUT_QUEUE, + TA_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM, + TA_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM, + TA_GFX_V9__GFX_GDS_OA_PIPE_MEM, + TA_GFX_V9__GFX_GDS_INDEX_END = TA_GFX_V9__GFX_GDS_OA_PIPE_MEM, + /* SPI*/ + TA_GFX_V9__GFX_SPI_SR_MEM, + /* SQ*/ + TA_GFX_V9__GFX_SQ_INDEX_START, + TA_GFX_V9__GFX_SQ_SGPR = TA_GFX_V9__GFX_SQ_INDEX_START, + TA_GFX_V9__GFX_SQ_LDS_D, + TA_GFX_V9__GFX_SQ_LDS_I, + TA_GFX_V9__GFX_SQ_VGPR, /* VGPR = SP*/ + TA_GFX_V9__GFX_SQ_INDEX_END = TA_GFX_V9__GFX_SQ_VGPR, + /* SQC (3 ranges)*/ + TA_GFX_V9__GFX_SQC_INDEX_START, + /* SQC range 0*/ + TA_GFX_V9__GFX_SQC_INDEX0_START = TA_GFX_V9__GFX_SQC_INDEX_START, + TA_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO = + TA_GFX_V9__GFX_SQC_INDEX0_START, + TA_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF, + TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + TA_GFX_V9__GFX_SQC_INDEX0_END = + TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + /* SQC range 1*/ + TA_GFX_V9__GFX_SQC_INDEX1_START, + TA_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM = + TA_GFX_V9__GFX_SQC_INDEX1_START, + TA_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX1_END = + TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + /* SQC range 2*/ + TA_GFX_V9__GFX_SQC_INDEX2_START, + TA_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM = + TA_GFX_V9__GFX_SQC_INDEX2_START, + TA_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO, + TA_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO, + TA_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, + TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX2_END = + TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + TA_GFX_V9__GFX_SQC_INDEX_END = TA_GFX_V9__GFX_SQC_INDEX2_END, + /* TA*/ + TA_GFX_V9__GFX_TA_INDEX_START, + TA_GFX_V9__GFX_TA_FS_DFIFO = TA_GFX_V9__GFX_TA_INDEX_START, + TA_GFX_V9__GFX_TA_FS_AFIFO, + TA_GFX_V9__GFX_TA_FL_LFIFO, + TA_GFX_V9__GFX_TA_FX_LFIFO, + TA_GFX_V9__GFX_TA_FS_CFIFO, + TA_GFX_V9__GFX_TA_INDEX_END = TA_GFX_V9__GFX_TA_FS_CFIFO, + /* TCA*/ + TA_GFX_V9__GFX_TCA_INDEX_START, + TA_GFX_V9__GFX_TCA_HOLE_FIFO = TA_GFX_V9__GFX_TCA_INDEX_START, + TA_GFX_V9__GFX_TCA_REQ_FIFO, + TA_GFX_V9__GFX_TCA_INDEX_END = TA_GFX_V9__GFX_TCA_REQ_FIFO, + /* TCC (5 sub-ranges)*/ + TA_GFX_V9__GFX_TCC_INDEX_START, + /* TCC range 0*/ + TA_GFX_V9__GFX_TCC_INDEX0_START = TA_GFX_V9__GFX_TCC_INDEX_START, + TA_GFX_V9__GFX_TCC_CACHE_DATA = TA_GFX_V9__GFX_TCC_INDEX0_START, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0, + TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1, + TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0, + TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1, + TA_GFX_V9__GFX_TCC_HIGH_RATE_TAG, + TA_GFX_V9__GFX_TCC_LOW_RATE_TAG, + TA_GFX_V9__GFX_TCC_INDEX0_END = TA_GFX_V9__GFX_TCC_LOW_RATE_TAG, + /* TCC range 1*/ + TA_GFX_V9__GFX_TCC_INDEX1_START, + TA_GFX_V9__GFX_TCC_IN_USE_DEC = TA_GFX_V9__GFX_TCC_INDEX1_START, + TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + TA_GFX_V9__GFX_TCC_INDEX1_END = + TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + /* TCC range 2*/ + TA_GFX_V9__GFX_TCC_INDEX2_START, + TA_GFX_V9__GFX_TCC_RETURN_DATA = TA_GFX_V9__GFX_TCC_INDEX2_START, + TA_GFX_V9__GFX_TCC_RETURN_CONTROL, + TA_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO, + TA_GFX_V9__GFX_TCC_WRITE_RETURN, + TA_GFX_V9__GFX_TCC_WRITE_CACHE_READ, + TA_GFX_V9__GFX_TCC_SRC_FIFO, + TA_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM, + TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + TA_GFX_V9__GFX_TCC_INDEX2_END = + TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + /* TCC range 3*/ + TA_GFX_V9__GFX_TCC_INDEX3_START, + TA_GFX_V9__GFX_TCC_LATENCY_FIFO = TA_GFX_V9__GFX_TCC_INDEX3_START, + TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + TA_GFX_V9__GFX_TCC_INDEX3_END = + TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + /* TCC range 4*/ + TA_GFX_V9__GFX_TCC_INDEX4_START, + TA_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN = + TA_GFX_V9__GFX_TCC_INDEX4_START, + TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + TA_GFX_V9__GFX_TCC_INDEX4_END = + TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + TA_GFX_V9__GFX_TCC_INDEX_END = TA_GFX_V9__GFX_TCC_INDEX4_END, + /* TCI*/ + TA_GFX_V9__GFX_TCI_WRITE_RAM, + /* TCP*/ + TA_GFX_V9__GFX_TCP_INDEX_START, + TA_GFX_V9__GFX_TCP_CACHE_RAM = TA_GFX_V9__GFX_TCP_INDEX_START, + TA_GFX_V9__GFX_TCP_LFIFO_RAM, + TA_GFX_V9__GFX_TCP_CMD_FIFO, + TA_GFX_V9__GFX_TCP_VM_FIFO, + TA_GFX_V9__GFX_TCP_DB_RAM, + TA_GFX_V9__GFX_TCP_UTCL1_LFIFO0, + TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + TA_GFX_V9__GFX_TCP_INDEX_END = TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + /* TD*/ + TA_GFX_V9__GFX_TD_INDEX_START, + TA_GFX_V9__GFX_TD_SS_FIFO_LO = TA_GFX_V9__GFX_TD_INDEX_START, + TA_GFX_V9__GFX_TD_SS_FIFO_HI, + TA_GFX_V9__GFX_TD_CS_FIFO, + TA_GFX_V9__GFX_TD_INDEX_END = TA_GFX_V9__GFX_TD_CS_FIFO, + /* EA (3 sub-ranges)*/ + TA_GFX_V9__GFX_EA_INDEX_START, + /* EA range 0*/ + TA_GFX_V9__GFX_EA_INDEX0_START = TA_GFX_V9__GFX_EA_INDEX_START, + TA_GFX_V9__GFX_EA_DRAMRD_CMDMEM = TA_GFX_V9__GFX_EA_INDEX0_START, + TA_GFX_V9__GFX_EA_DRAMWR_CMDMEM, + TA_GFX_V9__GFX_EA_DRAMWR_DATAMEM, + TA_GFX_V9__GFX_EA_RRET_TAGMEM, + TA_GFX_V9__GFX_EA_WRET_TAGMEM, + TA_GFX_V9__GFX_EA_GMIRD_CMDMEM, + TA_GFX_V9__GFX_EA_GMIWR_CMDMEM, + TA_GFX_V9__GFX_EA_GMIWR_DATAMEM, + TA_GFX_V9__GFX_EA_INDEX0_END = TA_GFX_V9__GFX_EA_GMIWR_DATAMEM, + /* EA range 1*/ + TA_GFX_V9__GFX_EA_INDEX1_START, + TA_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = TA_GFX_V9__GFX_EA_INDEX1_START, + TA_GFX_V9__GFX_EA_DRAMWR_PAGEMEM, + TA_GFX_V9__GFX_EA_IORD_CMDMEM, + TA_GFX_V9__GFX_EA_IOWR_CMDMEM, + TA_GFX_V9__GFX_EA_IOWR_DATAMEM, + TA_GFX_V9__GFX_EA_GMIRD_PAGEMEM, + TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + TA_GFX_V9__GFX_EA_INDEX1_END = TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + /* EA range 2*/ + TA_GFX_V9__GFX_EA_INDEX2_START, + TA_GFX_V9__GFX_EA_MAM_D0MEM = TA_GFX_V9__GFX_EA_INDEX2_START, + TA_GFX_V9__GFX_EA_MAM_D1MEM, + TA_GFX_V9__GFX_EA_MAM_D2MEM, + TA_GFX_V9__GFX_EA_MAM_D3MEM, + TA_GFX_V9__GFX_EA_INDEX2_END = TA_GFX_V9__GFX_EA_MAM_D3MEM, + TA_GFX_V9__GFX_EA_INDEX_END = TA_GFX_V9__GFX_EA_INDEX2_END, + /* UTC VM L2 bank*/ + TA_GFX_V9__UTC_VML2_BANK_CACHE, + /* UTC VM walker*/ + TA_GFX_V9__UTC_VML2_WALKER, + /* UTC ATC L2 2MB cache*/ + TA_GFX_V9__UTC_ATCL2_CACHE_2M_BANK, + /* UTC ATC L2 4KB cache*/ + TA_GFX_V9__UTC_ATCL2_CACHE_4K_BANK, + TA_GFX_V9__GFX_MAX +}; + +struct ras_gfx_subblock_t { + unsigned char *name; + int ta_subblock; + int hw_supported_error_type; + int sw_supported_error_type; +}; + +#define RAS_GFX_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \ + [RAS_GFX_V9__##subblock] = { \ + #subblock, \ + TA_GFX_V9__##subblock, \ + ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \ + (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \ + } + +const struct ras_gfx_subblock_t ras_gfx_v9_0_subblocks[] = { + RAS_GFX_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0, + 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0, + 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0, + 1), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0, + 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0, + 0), + RAS_GFX_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0), + RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0), +}; + +static int gfx_v9_0_get_ta_subblock(struct ras_core_context *ras_core, + uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock) +{ + const struct ras_gfx_subblock_t *gfx_subblock; + + if (subblock >= ARRAY_SIZE(ras_gfx_v9_0_subblocks)) + return -EINVAL; + + gfx_subblock = &ras_gfx_v9_0_subblocks[subblock]; + if (!gfx_subblock->name) + return -EPERM; + + if (!(gfx_subblock->hw_supported_error_type & error_type)) { + RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, hardware do not support type 0x%x\n", + gfx_subblock->name, error_type); + return -EPERM; + } + + if (!(gfx_subblock->sw_supported_error_type & error_type)) { + RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, driver do not support type 0x%x\n", + gfx_subblock->name, error_type); + return -EPERM; + } + + *ta_subblock = gfx_subblock->ta_subblock; + + return 0; +} + +const struct ras_gfx_ip_func gfx_ras_func_v9_0 = { + .get_ta_subblock = gfx_v9_0_get_ta_subblock, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h new file mode 100644 index 000000000000..659b56619747 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_GFX_V9_0_H__ +#define __RAS_GFX_V9_0_H__ + +enum ras_gfx_v9_subblock { + /* CPC */ + RAS_GFX_V9__GFX_CPC_INDEX_START = 0, + RAS_GFX_V9__GFX_CPC_SCRATCH = + RAS_GFX_V9__GFX_CPC_INDEX_START, + RAS_GFX_V9__GFX_CPC_UCODE, + RAS_GFX_V9__GFX_DC_STATE_ME1, + RAS_GFX_V9__GFX_DC_CSINVOC_ME1, + RAS_GFX_V9__GFX_DC_RESTORE_ME1, + RAS_GFX_V9__GFX_DC_STATE_ME2, + RAS_GFX_V9__GFX_DC_CSINVOC_ME2, + RAS_GFX_V9__GFX_DC_RESTORE_ME2, + RAS_GFX_V9__GFX_CPC_INDEX_END = + RAS_GFX_V9__GFX_DC_RESTORE_ME2, + /* CPF */ + RAS_GFX_V9__GFX_CPF_INDEX_START, + RAS_GFX_V9__GFX_CPF_ROQ_ME2 = + RAS_GFX_V9__GFX_CPF_INDEX_START, + RAS_GFX_V9__GFX_CPF_ROQ_ME1, + RAS_GFX_V9__GFX_CPF_TAG, + RAS_GFX_V9__GFX_CPF_INDEX_END = RAS_GFX_V9__GFX_CPF_TAG, + /* CPG */ + RAS_GFX_V9__GFX_CPG_INDEX_START, + RAS_GFX_V9__GFX_CPG_DMA_ROQ = + RAS_GFX_V9__GFX_CPG_INDEX_START, + RAS_GFX_V9__GFX_CPG_DMA_TAG, + RAS_GFX_V9__GFX_CPG_TAG, + RAS_GFX_V9__GFX_CPG_INDEX_END = RAS_GFX_V9__GFX_CPG_TAG, + /* GDS */ + RAS_GFX_V9__GFX_GDS_INDEX_START, + RAS_GFX_V9__GFX_GDS_MEM = RAS_GFX_V9__GFX_GDS_INDEX_START, + RAS_GFX_V9__GFX_GDS_INPUT_QUEUE, + RAS_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM, + RAS_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM, + RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM, + RAS_GFX_V9__GFX_GDS_INDEX_END = + RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM, + /* SPI */ + RAS_GFX_V9__GFX_SPI_SR_MEM, + /* SQ */ + RAS_GFX_V9__GFX_SQ_INDEX_START, + RAS_GFX_V9__GFX_SQ_SGPR = RAS_GFX_V9__GFX_SQ_INDEX_START, + RAS_GFX_V9__GFX_SQ_LDS_D, + RAS_GFX_V9__GFX_SQ_LDS_I, + RAS_GFX_V9__GFX_SQ_VGPR, + RAS_GFX_V9__GFX_SQ_INDEX_END = RAS_GFX_V9__GFX_SQ_VGPR, + /* SQC (3 ranges) */ + RAS_GFX_V9__GFX_SQC_INDEX_START, + /* SQC range 0 */ + RAS_GFX_V9__GFX_SQC_INDEX0_START = + RAS_GFX_V9__GFX_SQC_INDEX_START, + RAS_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO = + RAS_GFX_V9__GFX_SQC_INDEX0_START, + RAS_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF, + RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + RAS_GFX_V9__GFX_SQC_INDEX0_END = + RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + /* SQC range 1 */ + RAS_GFX_V9__GFX_SQC_INDEX1_START, + RAS_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM = + RAS_GFX_V9__GFX_SQC_INDEX1_START, + RAS_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX1_END = + RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM, + /* SQC range 2 */ + RAS_GFX_V9__GFX_SQC_INDEX2_START, + RAS_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM = + RAS_GFX_V9__GFX_SQC_INDEX2_START, + RAS_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, + RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX2_END = + RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM, + RAS_GFX_V9__GFX_SQC_INDEX_END = + RAS_GFX_V9__GFX_SQC_INDEX2_END, + /* TA */ + RAS_GFX_V9__GFX_TA_INDEX_START, + RAS_GFX_V9__GFX_TA_FS_DFIFO = + RAS_GFX_V9__GFX_TA_INDEX_START, + RAS_GFX_V9__GFX_TA_FS_AFIFO, + RAS_GFX_V9__GFX_TA_FL_LFIFO, + RAS_GFX_V9__GFX_TA_FX_LFIFO, + RAS_GFX_V9__GFX_TA_FS_CFIFO, + RAS_GFX_V9__GFX_TA_INDEX_END = RAS_GFX_V9__GFX_TA_FS_CFIFO, + /* TCA */ + RAS_GFX_V9__GFX_TCA_INDEX_START, + RAS_GFX_V9__GFX_TCA_HOLE_FIFO = + RAS_GFX_V9__GFX_TCA_INDEX_START, + RAS_GFX_V9__GFX_TCA_REQ_FIFO, + RAS_GFX_V9__GFX_TCA_INDEX_END = + RAS_GFX_V9__GFX_TCA_REQ_FIFO, + /* TCC (5 sub-ranges) */ + RAS_GFX_V9__GFX_TCC_INDEX_START, + /* TCC range 0 */ + RAS_GFX_V9__GFX_TCC_INDEX0_START = + RAS_GFX_V9__GFX_TCC_INDEX_START, + RAS_GFX_V9__GFX_TCC_CACHE_DATA = + RAS_GFX_V9__GFX_TCC_INDEX0_START, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0, + RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1, + RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0, + RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1, + RAS_GFX_V9__GFX_TCC_HIGH_RATE_TAG, + RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG, + RAS_GFX_V9__GFX_TCC_INDEX0_END = + RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG, + /* TCC range 1 */ + RAS_GFX_V9__GFX_TCC_INDEX1_START, + RAS_GFX_V9__GFX_TCC_IN_USE_DEC = + RAS_GFX_V9__GFX_TCC_INDEX1_START, + RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + RAS_GFX_V9__GFX_TCC_INDEX1_END = + RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER, + /* TCC range 2 */ + RAS_GFX_V9__GFX_TCC_INDEX2_START, + RAS_GFX_V9__GFX_TCC_RETURN_DATA = + RAS_GFX_V9__GFX_TCC_INDEX2_START, + RAS_GFX_V9__GFX_TCC_RETURN_CONTROL, + RAS_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO, + RAS_GFX_V9__GFX_TCC_WRITE_RETURN, + RAS_GFX_V9__GFX_TCC_WRITE_CACHE_READ, + RAS_GFX_V9__GFX_TCC_SRC_FIFO, + RAS_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM, + RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + RAS_GFX_V9__GFX_TCC_INDEX2_END = + RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO, + /* TCC range 3 */ + RAS_GFX_V9__GFX_TCC_INDEX3_START, + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO = + RAS_GFX_V9__GFX_TCC_INDEX3_START, + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + RAS_GFX_V9__GFX_TCC_INDEX3_END = + RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + /* TCC range 4 */ + RAS_GFX_V9__GFX_TCC_INDEX4_START, + RAS_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN = + RAS_GFX_V9__GFX_TCC_INDEX4_START, + RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + RAS_GFX_V9__GFX_TCC_INDEX4_END = + RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER, + RAS_GFX_V9__GFX_TCC_INDEX_END = + RAS_GFX_V9__GFX_TCC_INDEX4_END, + /* TCI */ + RAS_GFX_V9__GFX_TCI_WRITE_RAM, + /* TCP */ + RAS_GFX_V9__GFX_TCP_INDEX_START, + RAS_GFX_V9__GFX_TCP_CACHE_RAM = + RAS_GFX_V9__GFX_TCP_INDEX_START, + RAS_GFX_V9__GFX_TCP_LFIFO_RAM, + RAS_GFX_V9__GFX_TCP_CMD_FIFO, + RAS_GFX_V9__GFX_TCP_VM_FIFO, + RAS_GFX_V9__GFX_TCP_DB_RAM, + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO0, + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + RAS_GFX_V9__GFX_TCP_INDEX_END = + RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1, + /* TD */ + RAS_GFX_V9__GFX_TD_INDEX_START, + RAS_GFX_V9__GFX_TD_SS_FIFO_LO = + RAS_GFX_V9__GFX_TD_INDEX_START, + RAS_GFX_V9__GFX_TD_SS_FIFO_HI, + RAS_GFX_V9__GFX_TD_CS_FIFO, + RAS_GFX_V9__GFX_TD_INDEX_END = RAS_GFX_V9__GFX_TD_CS_FIFO, + /* EA (3 sub-ranges) */ + RAS_GFX_V9__GFX_EA_INDEX_START, + /* EA range 0 */ + RAS_GFX_V9__GFX_EA_INDEX0_START = + RAS_GFX_V9__GFX_EA_INDEX_START, + RAS_GFX_V9__GFX_EA_DRAMRD_CMDMEM = + RAS_GFX_V9__GFX_EA_INDEX0_START, + RAS_GFX_V9__GFX_EA_DRAMWR_CMDMEM, + RAS_GFX_V9__GFX_EA_DRAMWR_DATAMEM, + RAS_GFX_V9__GFX_EA_RRET_TAGMEM, + RAS_GFX_V9__GFX_EA_WRET_TAGMEM, + RAS_GFX_V9__GFX_EA_GMIRD_CMDMEM, + RAS_GFX_V9__GFX_EA_GMIWR_CMDMEM, + RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM, + RAS_GFX_V9__GFX_EA_INDEX0_END = + RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM, + /* EA range 1 */ + RAS_GFX_V9__GFX_EA_INDEX1_START, + RAS_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = + RAS_GFX_V9__GFX_EA_INDEX1_START, + RAS_GFX_V9__GFX_EA_DRAMWR_PAGEMEM, + RAS_GFX_V9__GFX_EA_IORD_CMDMEM, + RAS_GFX_V9__GFX_EA_IOWR_CMDMEM, + RAS_GFX_V9__GFX_EA_IOWR_DATAMEM, + RAS_GFX_V9__GFX_EA_GMIRD_PAGEMEM, + RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + RAS_GFX_V9__GFX_EA_INDEX1_END = + RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM, + /* EA range 2 */ + RAS_GFX_V9__GFX_EA_INDEX2_START, + RAS_GFX_V9__GFX_EA_MAM_D0MEM = + RAS_GFX_V9__GFX_EA_INDEX2_START, + RAS_GFX_V9__GFX_EA_MAM_D1MEM, + RAS_GFX_V9__GFX_EA_MAM_D2MEM, + RAS_GFX_V9__GFX_EA_MAM_D3MEM, + RAS_GFX_V9__GFX_EA_INDEX2_END = + RAS_GFX_V9__GFX_EA_MAM_D3MEM, + RAS_GFX_V9__GFX_EA_INDEX_END = + RAS_GFX_V9__GFX_EA_INDEX2_END, + /* UTC VM L2 bank */ + RAS_GFX_V9__UTC_VML2_BANK_CACHE, + /* UTC VM walker */ + RAS_GFX_V9__UTC_VML2_WALKER, + /* UTC ATC L2 2MB cache */ + RAS_GFX_V9__UTC_ATCL2_CACHE_2M_BANK, + /* UTC ATC L2 4KB cache */ + RAS_GFX_V9__UTC_ATCL2_CACHE_4K_BANK, + RAS_GFX_V9__GFX_MAX +}; + +extern const struct ras_gfx_ip_func gfx_ras_func_v9_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c new file mode 100644 index 000000000000..d0621464f1a7 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_core_status.h" +#include "ras_log_ring.h" + +#define RAS_LOG_MAX_QUERY_SIZE 0xC000 +#define RAS_LOG_MEM_TEMP_SIZE 0x200 +#define RAS_LOG_MEMPOOL_SIZE \ + (RAS_LOG_MAX_QUERY_SIZE + RAS_LOG_MEM_TEMP_SIZE) + +#define BATCH_IDX_TO_TREE_IDX(batch_idx, sn) (((batch_idx) << 8) | (sn)) + +static const uint64_t ras_rma_aca_reg[ACA_REG_MAX_COUNT] = { + [ACA_REG_IDX__CTL] = 0x1, + [ACA_REG_IDX__STATUS] = 0xB000000000000137, + [ACA_REG_IDX__ADDR] = 0x0, + [ACA_REG_IDX__MISC0] = 0x0, + [ACA_REG_IDX__CONFG] = 0x1ff00000002, + [ACA_REG_IDX__IPID] = 0x9600000000, + [ACA_REG_IDX__SYND] = 0x0, +}; + +static uint64_t ras_log_ring_get_logged_ecc_count(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint64_t count = 0; + + if (log_ring->logged_ecc_count < 0) { + RAS_DEV_WARN(ras_core->dev, + "Error: the logged ras count should not less than 0!\n"); + count = 0; + } else { + count = log_ring->logged_ecc_count; + } + + if (count > RAS_LOG_MEMPOOL_SIZE) + RAS_DEV_WARN(ras_core->dev, + "Error: the logged ras count is out of range!\n"); + + return count; +} + +static int ras_log_ring_add_data(struct ras_core_context *ras_core, + struct ras_log_info *log, struct ras_log_batch_tag *batch_tag) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + int ret = 0; + + if (batch_tag && (batch_tag->sub_seqno >= MAX_RECORD_PER_BATCH)) { + RAS_DEV_ERR(ras_core->dev, + "Invalid batch sub seqno:%d, batch:0x%llx\n", + batch_tag->sub_seqno, batch_tag->batch_id); + return -EINVAL; + } + + spin_lock_irqsave(&log_ring->spin_lock, flags); + if (batch_tag) { + log->seqno = + BATCH_IDX_TO_TREE_IDX(batch_tag->batch_id, batch_tag->sub_seqno); + batch_tag->sub_seqno++; + } else { + log->seqno = BATCH_IDX_TO_TREE_IDX(log_ring->mono_upward_batch_id, 0); + log_ring->mono_upward_batch_id++; + } + ret = radix_tree_insert(&log_ring->ras_log_root, log->seqno, log); + if (!ret) + log_ring->logged_ecc_count++; + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + if (ret) { + RAS_DEV_ERR(ras_core->dev, + "Failed to add ras log! seqno:0x%llx, ret:%d\n", + log->seqno, ret); + mempool_free(log, log_ring->ras_log_mempool); + } + + return ret; +} + +static int ras_log_ring_delete_data(struct ras_core_context *ras_core, uint32_t count) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + uint32_t i = 0, j = 0; + uint64_t batch_id, idx; + void *data; + int ret = -ENODATA; + + if (count > ras_log_ring_get_logged_ecc_count(ras_core)) + return -EINVAL; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_id = log_ring->last_del_batch_id; + while (batch_id < log_ring->mono_upward_batch_id) { + for (j = 0; j < MAX_RECORD_PER_BATCH; j++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, j); + data = radix_tree_delete(&log_ring->ras_log_root, idx); + if (data) { + mempool_free(data, log_ring->ras_log_mempool); + log_ring->logged_ecc_count--; + i++; + } + } + batch_id = ++log_ring->last_del_batch_id; + if (i >= count) { + ret = 0; + break; + } + } + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + return ret; +} + +static void ras_log_ring_clear_log_tree(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint64_t batch_id, idx; + unsigned long flags = 0; + void *data; + int j; + + if ((log_ring->mono_upward_batch_id <= log_ring->last_del_batch_id) && + !log_ring->logged_ecc_count) + return; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_id = log_ring->last_del_batch_id; + while (batch_id < log_ring->mono_upward_batch_id) { + for (j = 0; j < MAX_RECORD_PER_BATCH; j++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, j); + data = radix_tree_delete(&log_ring->ras_log_root, idx); + if (data) { + mempool_free(data, log_ring->ras_log_mempool); + log_ring->logged_ecc_count--; + } + } + batch_id++; + } + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + +} + +int ras_log_ring_sw_init(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + memset(log_ring, 0, sizeof(*log_ring)); + + log_ring->ras_log_mempool = mempool_create_kmalloc_pool( + RAS_LOG_MEMPOOL_SIZE, sizeof(struct ras_log_info)); + if (!log_ring->ras_log_mempool) + return -ENOMEM; + + INIT_RADIX_TREE(&log_ring->ras_log_root, GFP_KERNEL); + + spin_lock_init(&log_ring->spin_lock); + + return 0; +} + +int ras_log_ring_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + ras_log_ring_clear_log_tree(ras_core); + log_ring->logged_ecc_count = 0; + log_ring->last_del_batch_id = 0; + log_ring->mono_upward_batch_id = 0; + + mempool_destroy(log_ring->ras_log_mempool); + + return 0; +} + +struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + struct ras_log_batch_tag *batch_tag; + unsigned long flags = 0; + + batch_tag = kzalloc(sizeof(*batch_tag), GFP_KERNEL); + if (!batch_tag) + return NULL; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + batch_tag->batch_id = log_ring->mono_upward_batch_id; + log_ring->mono_upward_batch_id++; + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + batch_tag->sub_seqno = 0; + batch_tag->timestamp = ras_core_get_utc_second_timestamp(ras_core); + return batch_tag; +} + +void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core, + struct ras_log_batch_tag *batch_tag) +{ + kfree(batch_tag); +} + +void ras_log_ring_add_log_event(struct ras_core_context *ras_core, + enum ras_log_event event, void *data, struct ras_log_batch_tag *batch_tag) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + struct ras_log_info *log; + void *obj; + + obj = mempool_alloc_preallocated(log_ring->ras_log_mempool); + if (!obj || + (ras_log_ring_get_logged_ecc_count(ras_core) >= RAS_LOG_MEMPOOL_SIZE)) { + ras_log_ring_delete_data(ras_core, RAS_LOG_MEM_TEMP_SIZE); + if (!obj) + obj = mempool_alloc_preallocated(log_ring->ras_log_mempool); + } + + if (!obj) { + RAS_DEV_ERR(ras_core->dev, "ERROR: Failed to alloc ras log buffer!\n"); + return; + } + + log = (struct ras_log_info *)obj; + + memset(log, 0, sizeof(*log)); + log->timestamp = + batch_tag ? batch_tag->timestamp : ras_core_get_utc_second_timestamp(ras_core); + log->event = event; + + if (data) + memcpy(&log->aca_reg, data, sizeof(log->aca_reg)); + + if (event == RAS_LOG_EVENT_RMA) + memcpy(&log->aca_reg, ras_rma_aca_reg, sizeof(log->aca_reg)); + + ras_log_ring_add_data(ras_core, log, batch_tag); +} + +static struct ras_log_info *ras_log_ring_lookup_data(struct ras_core_context *ras_core, + uint64_t idx) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + unsigned long flags = 0; + void *data; + + spin_lock_irqsave(&log_ring->spin_lock, flags); + data = radix_tree_lookup(&log_ring->ras_log_root, idx); + spin_unlock_irqrestore(&log_ring->spin_lock, flags); + + return (struct ras_log_info *)data; +} + +int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_id, + struct ras_log_info **log_arr, uint32_t arr_num) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + uint32_t i, idx, count = 0; + void *data; + + if ((batch_id >= log_ring->mono_upward_batch_id) || + (batch_id < log_ring->last_del_batch_id)) + return -EINVAL; + + for (i = 0; i < MAX_RECORD_PER_BATCH; i++) { + idx = BATCH_IDX_TO_TREE_IDX(batch_id, i); + data = ras_log_ring_lookup_data(ras_core, idx); + if (data) { + log_arr[count++] = data; + if (count >= arr_num) + break; + } + } + + return count; +} + +int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core, + struct ras_log_batch_overview *overview) +{ + struct ras_log_ring *log_ring = &ras_core->ras_log_ring; + + overview->logged_batch_count = + log_ring->mono_upward_batch_id - log_ring->last_del_batch_id; + overview->last_batch_id = log_ring->mono_upward_batch_id; + overview->first_batch_id = log_ring->last_del_batch_id; + + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h new file mode 100644 index 000000000000..0ff6cc35678d --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_LOG_RING_H__ +#define __RAS_LOG_RING_H__ +#include "ras_aca.h" + +#define MAX_RECORD_PER_BATCH 32 + +#define RAS_LOG_SEQNO_TO_BATCH_IDX(seqno) ((seqno) >> 8) + +enum ras_log_event { + RAS_LOG_EVENT_NONE, + RAS_LOG_EVENT_UE, + RAS_LOG_EVENT_DE, + RAS_LOG_EVENT_CE, + RAS_LOG_EVENT_POISON_CREATION, + RAS_LOG_EVENT_POISON_CONSUMPTION, + RAS_LOG_EVENT_RMA, + RAS_LOG_EVENT_COUNT_MAX, +}; + +struct ras_aca_reg { + uint64_t regs[ACA_REG_MAX_COUNT]; +}; + +struct ras_log_info { + uint64_t seqno; + uint64_t timestamp; + enum ras_log_event event; + union { + struct ras_aca_reg aca_reg; + }; +}; + +struct ras_log_batch_tag { + uint64_t batch_id; + uint64_t timestamp; + uint32_t sub_seqno; +}; + +struct ras_log_ring { + void *ras_log_mempool; + struct radix_tree_root ras_log_root; + spinlock_t spin_lock; + uint64_t mono_upward_batch_id; + uint64_t last_del_batch_id; + int logged_ecc_count; +}; + +struct ras_log_batch_overview { + uint64_t first_batch_id; + uint64_t last_batch_id; + uint32_t logged_batch_count; +}; + +struct ras_core_context; + +int ras_log_ring_sw_init(struct ras_core_context *ras_core); +int ras_log_ring_sw_fini(struct ras_core_context *ras_core); + +struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core); +void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core, + struct ras_log_batch_tag *tag); +void ras_log_ring_add_log_event(struct ras_core_context *ras_core, + enum ras_log_event event, void *data, struct ras_log_batch_tag *tag); + +int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_idx, + struct ras_log_info **log_arr, uint32_t arr_num); + +int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core, + struct ras_log_batch_overview *overview); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c new file mode 100644 index 000000000000..f3321df85021 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_mp1.h" +#include "ras_mp1_v13_0.h" + +static const struct ras_mp1_ip_func *ras_mp1_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + return &mp1_ras_func_v13_0; + default: + RAS_DEV_ERR(ras_core->dev, + "MP1 ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_mp1_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + return mp1->ip_func->get_valid_bank_count(ras_core, type, count); +} + +int ras_mp1_dump_bank(struct ras_core_context *ras_core, + u32 type, u32 idx, u32 reg_idx, u64 *val) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + return mp1->ip_func->dump_valid_bank(ras_core, type, idx, reg_idx, val); +} + +int ras_mp1_hw_init(struct ras_core_context *ras_core) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + + mp1->mp1_ip_version = ras_core->config->mp1_ip_version; + mp1->sys_func = ras_core->config->mp1_cfg.mp1_sys_fn; + if (!mp1->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS mp1 sys function not configured!\n"); + return -EINVAL; + } + + mp1->ip_func = ras_mp1_get_ip_funcs(ras_core, mp1->mp1_ip_version); + + return mp1->ip_func ? RAS_CORE_OK : -EINVAL; +} + +int ras_mp1_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h new file mode 100644 index 000000000000..de1d08286f41 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_MP1_H__ +#define __RAS_MP1_H__ +#include "ras.h" + +enum ras_err_type; +struct ras_mp1_ip_func { + int (*get_valid_bank_count)(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count); + int (*dump_valid_bank)(struct ras_core_context *ras_core, + enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val); +}; + +struct ras_mp1 { + uint32_t mp1_ip_version; + const struct ras_mp1_ip_func *ip_func; + const struct ras_mp1_sys_func *sys_func; +}; + +int ras_mp1_hw_init(struct ras_core_context *ras_core); +int ras_mp1_hw_fini(struct ras_core_context *ras_core); + +int ras_mp1_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count); + +int ras_mp1_dump_bank(struct ras_core_context *ras_core, + u32 ecc_type, u32 idx, u32 reg_idx, u64 *val); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c new file mode 100644 index 000000000000..310d39fc816b --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_mp1.h" +#include "ras_core_status.h" +#include "ras_mp1_v13_0.h" + +#define RAS_MP1_MSG_QueryValidMcaCount 0x36 +#define RAS_MP1_MSG_McaBankDumpDW 0x37 +#define RAS_MP1_MSG_ClearMcaOnRead 0x39 +#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A +#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B + +#define MAX_UE_BANKS_PER_QUERY 12 +#define MAX_CE_BANKS_PER_QUERY 12 + +static int mp1_v13_0_get_bank_count(struct ras_core_context *ras_core, + enum ras_err_type type, u32 *count) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + const struct ras_mp1_sys_func *sys_func = mp1->sys_func; + uint32_t bank_count = 0; + u32 msg; + int ret; + + if (!count) + return -EINVAL; + + if (!sys_func || !sys_func->mp1_get_valid_bank_count) + return -RAS_CORE_NOT_SUPPORTED; + + switch (type) { + case RAS_ERR_TYPE__UE: + msg = RAS_MP1_MSG_QueryValidMcaCount; + break; + case RAS_ERR_TYPE__CE: + case RAS_ERR_TYPE__DE: + msg = RAS_MP1_MSG_QueryValidMcaCeCount; + break; + default: + return -EINVAL; + } + + ret = sys_func->mp1_get_valid_bank_count(ras_core, msg, &bank_count); + if (!ret) { + if (((type == RAS_ERR_TYPE__UE) && (bank_count >= MAX_UE_BANKS_PER_QUERY)) || + ((type == RAS_ERR_TYPE__CE) && (bank_count >= MAX_CE_BANKS_PER_QUERY))) + return -EINVAL; + + *count = bank_count; + } + + return ret; +} + +static int mp1_v13_0_dump_bank(struct ras_core_context *ras_core, + enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val) +{ + struct ras_mp1 *mp1 = &ras_core->ras_mp1; + const struct ras_mp1_sys_func *sys_func = mp1->sys_func; + u32 msg; + + if (!sys_func || !sys_func->mp1_dump_valid_bank) + return -RAS_CORE_NOT_SUPPORTED; + + switch (type) { + case RAS_ERR_TYPE__UE: + msg = RAS_MP1_MSG_McaBankDumpDW; + break; + case RAS_ERR_TYPE__CE: + case RAS_ERR_TYPE__DE: + msg = RAS_MP1_MSG_McaBankCeDumpDW; + break; + default: + return -EINVAL; + } + + return sys_func->mp1_dump_valid_bank(ras_core, msg, idx, reg_idx, val); +} + +const struct ras_mp1_ip_func mp1_ras_func_v13_0 = { + .get_valid_bank_count = mp1_v13_0_get_bank_count, + .dump_valid_bank = mp1_v13_0_dump_bank, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h new file mode 100644 index 000000000000..2edfdb5f6a75 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_MP1_V13_0_H__ +#define __RAS_MP1_V13_0_H__ +#include "ras_mp1.h" + +extern const struct ras_mp1_ip_func mp1_ras_func_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c new file mode 100644 index 000000000000..8bf1f35d595e --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_nbio.h" +#include "ras_nbio_v7_9.h" + +static const struct ras_nbio_ip_func *ras_nbio_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(7, 9, 0): + return &ras_nbio_v7_9; + default: + RAS_DEV_ERR(ras_core->dev, + "NBIO ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_nbio_hw_init(struct ras_core_context *ras_core) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + nbio->nbio_ip_version = ras_core->config->nbio_ip_version; + nbio->sys_func = ras_core->config->nbio_cfg.nbio_sys_fn; + if (!nbio->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS nbio sys function not configured!\n"); + return -EINVAL; + } + + nbio->ip_func = ras_nbio_get_ip_funcs(ras_core, nbio->nbio_ip_version); + if (!nbio->ip_func) + return -EINVAL; + + if (nbio->sys_func) { + if (nbio->sys_func->set_ras_controller_irq_state) + nbio->sys_func->set_ras_controller_irq_state(ras_core, true); + if (nbio->sys_func->set_ras_err_event_athub_irq_state) + nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, true); + } + + return 0; +} + +int ras_nbio_hw_fini(struct ras_core_context *ras_core) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + if (nbio->sys_func) { + if (nbio->sys_func->set_ras_controller_irq_state) + nbio->sys_func->set_ras_controller_irq_state(ras_core, false); + if (nbio->sys_func->set_ras_err_event_athub_irq_state) + nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, false); + } + + return 0; +} + +bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data) +{ + struct ras_nbio *nbio = &ras_core->ras_nbio; + + if (nbio->ip_func) { + if (nbio->ip_func->handle_ras_controller_intr_no_bifring) + nbio->ip_func->handle_ras_controller_intr_no_bifring(ras_core); + if (nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring) + nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring(ras_core); + } + + return true; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h new file mode 100644 index 000000000000..0a1313e59a02 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_NBIO_H__ +#define __RAS_NBIO_H__ +#include "ras.h" + +struct ras_core_context; + +struct ras_nbio_ip_func { + int (*handle_ras_controller_intr_no_bifring)(struct ras_core_context *ras_core); + int (*handle_ras_err_event_athub_intr_no_bifring)(struct ras_core_context *ras_core); + uint32_t (*get_memory_partition_mode)(struct ras_core_context *ras_core); +}; + +struct ras_nbio { + uint32_t nbio_ip_version; + const struct ras_nbio_ip_func *ip_func; + const struct ras_nbio_sys_func *sys_func; +}; + +int ras_nbio_hw_init(struct ras_core_context *ras_core); +int ras_nbio_hw_fini(struct ras_core_context *ras_core); +bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c new file mode 100644 index 000000000000..f17d708ec668 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_nbio_v7_9.h" + +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1 +#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L + +#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2 +#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe + +#define regBIF_BX0_BIF_INTR_CNTL 0x0101 +#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2 + +/* BIF_BX0_BIF_INTR_CNTL */ +#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0 +#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L + +#define regBIF_BX_PF0_PARTITION_MEM_STATUS 0x0164 +#define regBIF_BX_PF0_PARTITION_MEM_STATUS_BASE_IDX 2 +/* BIF_BX_PF0_PARTITION_MEM_STATUS */ +#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0 +#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4 +#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL +#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L + + +static int nbio_v7_9_handle_ras_controller_intr_no_bifring(struct ras_core_context *ras_core) +{ + uint32_t bif_doorbell_intr_cntl = 0; + + bif_doorbell_intr_cntl = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_CNTLR_INTERRUPT_CLEAR, 1); + + RAS_DEV_WREG32_SOC15(ras_core->dev, + NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + /* TODO: handle ras controller interrupt */ + } + + return 0; +} + +static int nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct ras_core_context *ras_core) +{ + uint32_t bif_doorbell_intr_cntl = 0; + int ret = 0; + + bif_doorbell_intr_cntl = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); + + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_BX0_BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + + RAS_DEV_WREG32_SOC15(ras_core->dev, + NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + ret = ras_core_handle_fatal_error(ras_core); + } + + return ret; +} + +static uint32_t nbio_v7_9_get_memory_partition_mode(struct ras_core_context *ras_core) +{ + uint32_t mem_status; + uint32_t mem_mode; + + mem_status = + RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS); + + /* Each bit represents a mode 1-8*/ + mem_mode = REG_GET_FIELD(mem_status, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE); + + return ffs(mem_mode); +} + +const struct ras_nbio_ip_func ras_nbio_v7_9 = { + .handle_ras_controller_intr_no_bifring = + nbio_v7_9_handle_ras_controller_intr_no_bifring, + .handle_ras_err_event_athub_intr_no_bifring = + nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring, + .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h new file mode 100644 index 000000000000..8711c82a927f --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_NBIO_V7_9_H__ +#define __RAS_NBIO_V7_9_H__ +#include "ras_nbio.h" + +extern const struct ras_nbio_ip_func ras_nbio_v7_9; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.c b/drivers/gpu/drm/amd/ras/rascore/ras_process.c new file mode 100644 index 000000000000..02f0657f78a3 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_process.h" + +#define RAS_EVENT_FIFO_SIZE (128 * sizeof(struct ras_event_req)) + +#define RAS_POLLING_ECC_TIMEOUT 300 + +static int ras_process_put_event(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = kfifo_in_spinlocked(&ras_proc->event_fifo, + req, sizeof(*req), &ras_proc->fifo_spinlock); + if (!ret) { + RAS_DEV_ERR(ras_core->dev, "Poison message fifo is full!\n"); + return -ENOSPC; + } + + return 0; +} + +static int ras_process_add_reset_gpu_event(struct ras_core_context *ras_core, + uint32_t reset_cause) +{ + struct ras_event_req req = {0}; + + req.reset = reset_cause; + + return ras_process_put_event(ras_core, &req); +} + +static int ras_process_get_event(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + return kfifo_out_spinlocked(&ras_proc->event_fifo, + req, sizeof(*req), &ras_proc->fifo_spinlock); +} + +static void ras_process_clear_event_fifo(struct ras_core_context *ras_core) +{ + struct ras_event_req req; + int ret; + + do { + ret = ras_process_get_event(ras_core, &req); + } while (ret); +} + +#define AMDGPU_RAS_WAITING_DATA_READY 200 +static int ras_process_umc_event(struct ras_core_context *ras_core, + uint32_t event_count) +{ + struct ras_ecc_count ecc_data; + int ret = 0; + uint32_t timeout = 0; + uint32_t detected_de_count = 0; + + do { + memset(&ecc_data, 0, sizeof(ecc_data)); + ret = ras_core_update_ecc_info(ras_core); + if (ret) + return ret; + + ret = ras_core_query_block_ecc_data(ras_core, RAS_BLOCK_ID__UMC, &ecc_data); + if (ret) + return ret; + + if (ecc_data.new_de_count) { + detected_de_count += ecc_data.new_de_count; + timeout = 0; + } else { + if (!timeout && event_count) + timeout = AMDGPU_RAS_WAITING_DATA_READY; + + if (timeout) { + if (!--timeout) + break; + + msleep(1); + } + } + } while (detected_de_count < event_count); + + if (detected_de_count && ras_core_gpu_is_rma(ras_core)) + ras_process_add_reset_gpu_event(ras_core, GPU_RESET_CAUSE_RMA); + + return 0; +} + +static int ras_process_non_umc_event(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + struct ras_event_req req; + uint32_t event_count = kfifo_len(&ras_proc->event_fifo); + uint32_t reset_flags = 0; + int ret = 0, i; + + for (i = 0; i < event_count; i++) { + memset(&req, 0, sizeof(req)); + ret = ras_process_get_event(ras_core, &req); + if (!ret) + continue; + + ras_core_event_notify(ras_core, + RAS_EVENT_ID__POISON_CONSUMPTION, &req); + + reset_flags |= req.reset; + + if (req.reset == GPU_RESET_CAUSE_RMA) + continue; + + if (req.reset) + RAS_DEV_INFO(ras_core->dev, + "{%llu} GPU reset for %s RAS poison consumption is issued!\n", + req.seqno, ras_core_get_ras_block_name(req.block)); + else + RAS_DEV_INFO(ras_core->dev, + "{%llu} %s RAS poison consumption is issued!\n", + req.seqno, ras_core_get_ras_block_name(req.block)); + } + + if (reset_flags) { + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__RESET_GPU, &reset_flags); + if (!ret && (reset_flags & GPU_RESET_CAUSE_RMA)) + return -RAS_CORE_GPU_IN_MODE1_RESET; + } + + return ret; +} + +int ras_process_handle_ras_event(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + uint32_t umc_event_count; + int ret; + + ras_aca_clear_fatal_flag(ras_core); + ras_umc_log_pending_bad_bank(ras_core); + + do { + umc_event_count = atomic_read(&ras_proc->umc_interrupt_count); + ret = ras_process_umc_event(ras_core, umc_event_count); + if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) + break; + + if (umc_event_count) + atomic_sub(umc_event_count, &ras_proc->umc_interrupt_count); + } while (atomic_read(&ras_proc->umc_interrupt_count)); + + if ((ret != -RAS_CORE_GPU_IN_MODE1_RESET) && + (kfifo_len(&ras_proc->event_fifo))) + ret = ras_process_non_umc_event(ras_core); + + if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) { + /* Clear poison fifo */ + ras_process_clear_event_fifo(ras_core); + atomic_set(&ras_proc->umc_interrupt_count, 0); + } + + return ret; +} + +static int thread_wait_condition(void *param) +{ + struct ras_process *ras_proc = (struct ras_process *)param; + + return (kthread_should_stop() || + atomic_read(&ras_proc->ras_interrupt_req)); +} + +static int ras_process_thread(void *context) +{ + struct ras_core_context *ras_core = (struct ras_core_context *)context; + struct ras_process *ras_proc = &ras_core->ras_proc; + + while (!kthread_should_stop()) { + ras_wait_event_interruptible_timeout(&ras_proc->ras_process_wq, + thread_wait_condition, ras_proc, + msecs_to_jiffies(RAS_POLLING_ECC_TIMEOUT)); + + if (kthread_should_stop()) + break; + + if (!ras_core->is_initialized) + continue; + + atomic_set(&ras_proc->ras_interrupt_req, 0); + + if (ras_core_gpu_in_reset(ras_core)) + continue; + + if (ras_core->sys_fn && ras_core->sys_fn->async_handle_ras_event) + ras_core->sys_fn->async_handle_ras_event(ras_core, NULL); + else + ras_process_handle_ras_event(ras_core); + } + + return 0; +} + +int ras_process_init(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = kfifo_alloc(&ras_proc->event_fifo, RAS_EVENT_FIFO_SIZE, GFP_KERNEL); + if (ret) + return ret; + + spin_lock_init(&ras_proc->fifo_spinlock); + + init_waitqueue_head(&ras_proc->ras_process_wq); + + ras_proc->ras_process_thread = kthread_run(ras_process_thread, + (void *)ras_core, "ras_process_thread"); + if (!ras_proc->ras_process_thread) { + RAS_DEV_ERR(ras_core->dev, "Failed to create ras_process_thread.\n"); + ret = -ENOMEM; + goto err; + } + + return 0; + +err: + ras_process_fini(ras_core); + return ret; +} + +int ras_process_fini(struct ras_core_context *ras_core) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + if (ras_proc->ras_process_thread) { + kthread_stop(ras_proc->ras_process_thread); + ras_proc->ras_process_thread = NULL; + } + + kfifo_free(&ras_proc->event_fifo); + + return 0; +} + +static int ras_process_add_umc_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + + atomic_inc(&ras_proc->umc_interrupt_count); + atomic_inc(&ras_proc->ras_interrupt_req); + + wake_up(&ras_proc->ras_process_wq); + return 0; +} + +static int ras_process_add_non_umc_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req) +{ + struct ras_process *ras_proc = &ras_core->ras_proc; + int ret; + + ret = ras_process_put_event(ras_core, req); + if (!ret) { + atomic_inc(&ras_proc->ras_interrupt_req); + wake_up(&ras_proc->ras_process_wq); + } + + return ret; +} + +int ras_process_add_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req, bool is_umc) +{ + int ret; + + if (!ras_core) + return -EINVAL; + + if (!ras_core->is_initialized) + return -EPERM; + + if (is_umc) + ret = ras_process_add_umc_interrupt_req(ras_core, req); + else + ret = ras_process_add_non_umc_interrupt_req(ras_core, req); + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.h b/drivers/gpu/drm/amd/ras/rascore/ras_process.h new file mode 100644 index 000000000000..28458b50510e --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_PROCESS_H__ +#define __RAS_PROCESS_H__ + +struct ras_event_req { + uint64_t seqno; + uint32_t idx_vf; + uint32_t block; + uint16_t pasid; + uint32_t reset; + void *pasid_fn; + void *data; +}; + +struct ras_process { + void *dev; + void *ras_process_thread; + wait_queue_head_t ras_process_wq; + atomic_t ras_interrupt_req; + atomic_t umc_interrupt_count; + struct kfifo event_fifo; + spinlock_t fifo_spinlock; +}; + +struct ras_core_context; +int ras_process_init(struct ras_core_context *ras_core); +int ras_process_fini(struct ras_core_context *ras_core); +int ras_process_handle_ras_event(struct ras_core_context *ras_core); +int ras_process_add_interrupt_req(struct ras_core_context *ras_core, + struct ras_event_req *req, bool is_umc); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c new file mode 100644 index 000000000000..ccdb42d2dd60 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c @@ -0,0 +1,750 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_ta_if.h" +#include "ras_psp.h" +#include "ras_psp_v13_0.h" + +/* position of instance value in sub_block_index of + * ta_ras_trigger_error_input, the sub block uses lower 12 bits + */ +#define RAS_TA_INST_MASK 0xfffff000 +#define RAS_TA_INST_SHIFT 0xc + +static const struct ras_psp_ip_func *ras_psp_get_ip_funcs( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(13, 0, 6): + case IP_VERSION(13, 0, 14): + case IP_VERSION(13, 0, 12): + return &ras_psp_v13_0; + default: + RAS_DEV_ERR(ras_core->dev, + "psp ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +static int ras_psp_sync_system_ras_psp_status(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + struct ras_psp_sys_status status = {0}; + int ret; + + if (psp->sys_func && psp->sys_func->get_ras_psp_system_status) { + ret = psp->sys_func->get_ras_psp_system_status(ras_core, &status); + if (ret) + return ret; + + if (status.initialized) { + ta_ctx->preload_ras_ta_enabled = true; + ta_ctx->ras_ta_initialized = status.initialized; + ta_ctx->session_id = status.session_id; + } + + psp_ctx->external_mutex = status.psp_cmd_mutex; + } + + return 0; +} + +static int ras_psp_get_ras_ta_init_param(struct ras_core_context *ras_core, + struct ras_ta_init_param *ras_ta_param) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + if (psp->sys_func && psp->sys_func->get_ras_ta_init_param) + return psp->sys_func->get_ras_ta_init_param(ras_core, ras_ta_param); + + RAS_DEV_ERR(ras_core->dev, "Not config get_ras_ta_init_param API!!\n"); + return -EACCES; +} + +static struct gpu_mem_block *ras_psp_get_gpu_mem(struct ras_core_context *ras_core, + enum gpu_mem_type mem_type) +{ + struct ras_psp *psp = &ras_core->ras_psp; + struct gpu_mem_block *gpu_mem = NULL; + int ret; + + switch (mem_type) { + case GPU_MEM_TYPE_RAS_PSP_RING: + gpu_mem = &psp->psp_ring.ras_ring_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_PSP_CMD: + gpu_mem = &psp->psp_ctx.psp_cmd_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_PSP_FENCE: + gpu_mem = &psp->psp_ctx.out_fence_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_TA_FW: + gpu_mem = &psp->ta_ctx.fw_gpu_mem; + break; + case GPU_MEM_TYPE_RAS_TA_CMD: + gpu_mem = &psp->ta_ctx.cmd_gpu_mem; + break; + default: + return NULL; + } + + if (!gpu_mem->ref_count) { + ret = ras_core_get_gpu_mem(ras_core, mem_type, gpu_mem); + if (ret) + return NULL; + gpu_mem->mem_type = mem_type; + } + + gpu_mem->ref_count++; + + return gpu_mem; +} + +static int ras_psp_put_gpu_mem(struct ras_core_context *ras_core, + struct gpu_mem_block *gpu_mem) +{ + if (!gpu_mem) + return 0; + + gpu_mem->ref_count--; + + if (gpu_mem->ref_count > 0) { + return 0; + } else if (gpu_mem->ref_count < 0) { + RAS_DEV_WARN(ras_core->dev, + "Duplicate free gpu memory %u\n", gpu_mem->mem_type); + } else { + ras_core_put_gpu_mem(ras_core, gpu_mem->mem_type, gpu_mem); + memset(gpu_mem, 0, sizeof(*gpu_mem)); + } + + return 0; +} + +static void __acquire_psp_cmd_lock(struct ras_core_context *ras_core) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + + if (psp_ctx->external_mutex) + mutex_lock(psp_ctx->external_mutex); + else + mutex_lock(&psp_ctx->internal_mutex); +} + +static void __release_psp_cmd_lock(struct ras_core_context *ras_core) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + + if (psp_ctx->external_mutex) + mutex_unlock(psp_ctx->external_mutex); + else + mutex_unlock(&psp_ctx->internal_mutex); +} + +static uint32_t __get_ring_frame_slot(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + uint32_t ras_ring_wptr_dw; + + ras_ring_wptr_dw = psp->ip_func->psp_ras_ring_wptr_get(ras_core); + + return div64_u64((ras_ring_wptr_dw << 2), sizeof(struct psp_gfx_rb_frame)); +} + +static int __set_ring_frame_slot(struct ras_core_context *ras_core, + uint32_t slot) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + return psp->ip_func->psp_ras_ring_wptr_set(ras_core, + (slot * sizeof(struct psp_gfx_rb_frame)) >> 2); +} + +static int write_frame_to_ras_psp_ring(struct ras_core_context *ras_core, + struct psp_gfx_rb_frame *frame) +{ + struct gpu_mem_block *ring_mem; + struct psp_gfx_rb_frame *rb_frame; + uint32_t max_frame_slot; + uint32_t slot_idx; + uint32_t write_flush_read_back = 0; + int ret = 0; + + ring_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_RING); + if (!ring_mem) + return -ENOMEM; + + max_frame_slot = + div64_u64(ring_mem->mem_size, sizeof(struct psp_gfx_rb_frame)); + + rb_frame = + (struct psp_gfx_rb_frame *)ring_mem->mem_cpu_addr; + + slot_idx = __get_ring_frame_slot(ras_core); + if (slot_idx >= max_frame_slot) + slot_idx = 0; + + memcpy(&rb_frame[slot_idx], frame, sizeof(*frame)); + + /* Do a read to force the write of the frame before writing + * write pointer. + */ + write_flush_read_back = rb_frame[slot_idx].fence_value; + if (write_flush_read_back != frame->fence_value) { + RAS_DEV_ERR(ras_core->dev, + "Failed to submit ring cmd! cmd:0x%x:0x%x, fence:0x%x:0x%x value:%u, expected:%u\n", + rb_frame[slot_idx].cmd_buf_addr_hi, + rb_frame[slot_idx].cmd_buf_addr_lo, + rb_frame[slot_idx].fence_addr_hi, + rb_frame[slot_idx].fence_addr_lo, + write_flush_read_back, frame->fence_value); + ret = -EACCES; + goto err; + } + + slot_idx++; + + if (slot_idx >= max_frame_slot) + slot_idx = 0; + + __set_ring_frame_slot(ras_core, slot_idx); + +err: + ras_psp_put_gpu_mem(ras_core, ring_mem); + return ret; +} + +static int send_psp_cmd(struct ras_core_context *ras_core, + enum psp_gfx_cmd_id gfx_cmd_id, void *cmd_data, + uint32_t cmd_size, struct psp_cmd_resp *resp) +{ + struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx; + struct gpu_mem_block *psp_cmd_buf = NULL; + struct gpu_mem_block *psp_fence_buf = NULL; + struct psp_gfx_cmd_resp *gfx_cmd; + struct psp_gfx_rb_frame rb_frame; + int ret = 0; + int timeout = 1000; + + if (!cmd_data || (cmd_size > sizeof(union psp_gfx_commands)) || !resp) { + RAS_DEV_ERR(ras_core->dev, "Invalid RAS PSP command, id: %u\n", gfx_cmd_id); + return -EINVAL; + } + + __acquire_psp_cmd_lock(ras_core); + + psp_cmd_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_CMD); + if (!psp_cmd_buf) { + ret = -ENOMEM; + goto exit; + } + + psp_fence_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_FENCE); + if (!psp_fence_buf) { + ret = -ENOMEM; + goto exit; + } + + gfx_cmd = (struct psp_gfx_cmd_resp *)psp_cmd_buf->mem_cpu_addr; + memset(gfx_cmd, 0, sizeof(*gfx_cmd)); + gfx_cmd->cmd_id = gfx_cmd_id; + memcpy(&gfx_cmd->cmd, cmd_data, cmd_size); + + psp_ctx->in_fence_value++; + + memset(&rb_frame, 0, sizeof(rb_frame)); + rb_frame.cmd_buf_addr_hi = upper_32_bits(psp_cmd_buf->mem_mc_addr); + rb_frame.cmd_buf_addr_lo = lower_32_bits(psp_cmd_buf->mem_mc_addr); + rb_frame.fence_addr_hi = upper_32_bits(psp_fence_buf->mem_mc_addr); + rb_frame.fence_addr_lo = lower_32_bits(psp_fence_buf->mem_mc_addr); + rb_frame.fence_value = psp_ctx->in_fence_value; + + ret = write_frame_to_ras_psp_ring(ras_core, &rb_frame); + if (ret) { + psp_ctx->in_fence_value--; + goto exit; + } + + while (*((uint64_t *)psp_fence_buf->mem_cpu_addr) != + psp_ctx->in_fence_value) { + if (--timeout == 0) + break; + /* + * Shouldn't wait for timeout when err_event_athub occurs, + * because gpu reset thread triggered and lock resource should + * be released for psp resume sequence. + */ + if (ras_core_ras_interrupt_detected(ras_core)) + break; + + msleep(2); + } + + resp->status = gfx_cmd->resp.status; + resp->session_id = gfx_cmd->resp.session_id; + +exit: + ras_psp_put_gpu_mem(ras_core, psp_cmd_buf); + ras_psp_put_gpu_mem(ras_core, psp_fence_buf); + + __release_psp_cmd_lock(ras_core); + + return ret; +} + +static void __check_ras_ta_cmd_resp(struct ras_core_context *ras_core, + struct ras_ta_cmd *ras_cmd) +{ + + if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) { + RAS_DEV_WARN(ras_core->dev, "ECC switch disabled\n"); + ras_cmd->ras_status = RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE; + } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag) + RAS_DEV_WARN(ras_core->dev, "RAS internal register access blocked\n"); + + switch (ras_cmd->ras_status) { + case RAS_TA_STATUS__ERROR_UNSUPPORTED_IP: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: cmd failed due to unsupported ip\n"); + break; + case RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: cmd failed due to unsupported error injection\n"); + break; + case RAS_TA_STATUS__SUCCESS: + break; + case RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED: + if (ras_cmd->cmd_id == RAS_TA_CMD_ID__TRIGGER_ERROR) + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: Inject error to critical region is not allowed\n"); + break; + default: + RAS_DEV_WARN(ras_core->dev, + "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); + break; + } +} + +static int send_ras_ta_runtime_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id, void *in, uint32_t in_size, + void *out, uint32_t out_size) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct gpu_mem_block *cmd_mem; + struct ras_ta_cmd *ras_cmd; + struct psp_gfx_cmd_invoke_cmd invoke_cmd = {0}; + struct psp_cmd_resp resp = {0}; + int ret = 0; + + if (!in || (in_size > sizeof(union ras_ta_cmd_input)) || + (cmd_id >= MAX_RAS_TA_CMD_ID)) { + RAS_DEV_ERR(ras_core->dev, "Invalid RAS TA command, id: %u\n", cmd_id); + return -EINVAL; + } + + ras_psp_sync_system_ras_psp_status(ras_core); + + cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD); + if (!cmd_mem) + return -ENOMEM; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) { + ret = -EACCES; + goto out; + } + + ras_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr; + + mutex_lock(&ta_ctx->ta_mutex); + + memset(ras_cmd, 0, sizeof(*ras_cmd)); + ras_cmd->cmd_id = cmd_id; + memcpy(&ras_cmd->ras_in_message, in, in_size); + + invoke_cmd.ta_cmd_id = cmd_id; + invoke_cmd.session_id = ta_ctx->session_id; + + ret = send_psp_cmd(ras_core, GFX_CMD_ID_INVOKE_CMD, + &invoke_cmd, sizeof(invoke_cmd), &resp); + + /* If err_event_athub occurs error inject was successful, however + * return status from TA is no long reliable + */ + if (ras_core_ras_interrupt_detected(ras_core)) { + ret = 0; + goto unlock; + } + + if (ret || resp.status) { + RAS_DEV_ERR(ras_core->dev, + "RAS: Failed to send psp cmd! ret:%d, status:%u\n", + ret, resp.status); + ret = -ESTRPIPE; + goto unlock; + } + + if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) { + RAS_DEV_WARN(ras_core->dev, "RAS: Unsupported Interface\n"); + ret = -EINVAL; + goto unlock; + } + + if (!ras_cmd->ras_status && out && out_size) + memcpy(out, &ras_cmd->ras_out_message, out_size); + + __check_ras_ta_cmd_resp(ras_core, ras_cmd); + +unlock: + mutex_unlock(&ta_ctx->ta_mutex); + ras_core_up_gpu_reset_lock(ras_core); +out: + ras_psp_put_gpu_mem(ras_core, cmd_mem); + return ret; +} + +static int trigger_ras_ta_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask) +{ + uint32_t dev_mask = 0; + + switch (info->block_id) { + case RAS_TA_BLOCK__GFX: + if (ras_gfx_get_ta_subblock(ras_core, info->inject_error_type, + info->sub_block_index, &info->sub_block_index)) + return -EINVAL; + + dev_mask = RAS_GET_MASK(ras_core->dev, GC, instance_mask); + break; + case RAS_TA_BLOCK__SDMA: + dev_mask = RAS_GET_MASK(ras_core->dev, SDMA0, instance_mask); + break; + case RAS_TA_BLOCK__VCN: + case RAS_TA_BLOCK__JPEG: + dev_mask = RAS_GET_MASK(ras_core->dev, VCN, instance_mask); + break; + default: + dev_mask = instance_mask; + break; + } + + /* reuse sub_block_index for backward compatibility */ + dev_mask <<= RAS_TA_INST_SHIFT; + dev_mask &= RAS_TA_INST_MASK; + info->sub_block_index |= dev_mask; + + return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__TRIGGER_ERROR, + info, sizeof(*info), NULL, 0); +} + +static int send_load_ta_fw_cmd(struct ras_core_context *ras_core, + struct ras_ta_ctx *ta_ctx) +{ + struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin; + struct gpu_mem_block *fw_mem; + struct gpu_mem_block *cmd_mem; + struct ras_ta_cmd *ta_cmd; + struct ras_ta_init_flags *ta_init_flags; + struct psp_gfx_cmd_load_ta psp_load_ta_cmd; + struct psp_cmd_resp resp = {0}; + struct ras_ta_image_header *fw_hdr = NULL; + int ret; + + fw_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_FW); + if (!fw_mem) + return -ENOMEM; + + cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD); + if (!cmd_mem) { + ret = -ENOMEM; + goto err; + } + + ret = ras_psp_get_ras_ta_init_param(ras_core, &ta_ctx->init_param); + if (ret) + goto err; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) { + ret = -EACCES; + goto err; + } + + /* copy ras ta binary to shared gpu memory */ + memcpy(fw_mem->mem_cpu_addr, fw_bin->bin_addr, fw_bin->bin_size); + fw_mem->mem_size = fw_bin->bin_size; + + /* Initialize ras ta startup parameter */ + ta_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr; + ta_init_flags = &ta_cmd->ras_in_message.init_flags; + + ta_init_flags->poison_mode_en = ta_ctx->init_param.poison_mode_en; + ta_init_flags->dgpu_mode = ta_ctx->init_param.dgpu_mode; + ta_init_flags->xcc_mask = ta_ctx->init_param.xcc_mask; + ta_init_flags->channel_dis_num = ta_ctx->init_param.channel_dis_num; + ta_init_flags->nps_mode = ta_ctx->init_param.nps_mode; + ta_init_flags->active_umc_mask = ta_ctx->init_param.active_umc_mask; + + /* Setup load ras ta command */ + memset(&psp_load_ta_cmd, 0, sizeof(psp_load_ta_cmd)); + psp_load_ta_cmd.app_phy_addr_lo = lower_32_bits(fw_mem->mem_mc_addr); + psp_load_ta_cmd.app_phy_addr_hi = upper_32_bits(fw_mem->mem_mc_addr); + psp_load_ta_cmd.app_len = fw_mem->mem_size; + psp_load_ta_cmd.cmd_buf_phy_addr_lo = lower_32_bits(cmd_mem->mem_mc_addr); + psp_load_ta_cmd.cmd_buf_phy_addr_hi = upper_32_bits(cmd_mem->mem_mc_addr); + psp_load_ta_cmd.cmd_buf_len = cmd_mem->mem_size; + + ret = send_psp_cmd(ras_core, GFX_CMD_ID_LOAD_TA, + &psp_load_ta_cmd, sizeof(psp_load_ta_cmd), &resp); + if (!ret && !resp.status) { + /* Read TA version at FW offset 0x60 if TA version not found*/ + fw_hdr = (struct ras_ta_image_header *)fw_bin->bin_addr; + RAS_DEV_INFO(ras_core->dev, "PSP: RAS TA(version:%X.%X.%X.%X) is loaded.\n", + (fw_hdr->image_version >> 24) & 0xFF, (fw_hdr->image_version >> 16) & 0xFF, + (fw_hdr->image_version >> 8) & 0xFF, fw_hdr->image_version & 0xFF); + ta_ctx->ta_version = fw_hdr->image_version; + ta_ctx->session_id = resp.session_id; + ta_ctx->ras_ta_initialized = true; + } else { + RAS_DEV_ERR(ras_core->dev, + "Failed to load RAS TA! ret:%d, status:%d\n", ret, resp.status); + } + + ras_core_up_gpu_reset_lock(ras_core); + +err: + ras_psp_put_gpu_mem(ras_core, fw_mem); + ras_psp_put_gpu_mem(ras_core, cmd_mem); + return ret; +} + +static int load_ras_ta_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin; + int ret; + + fw_bin->bin_addr = ras_ta_load->bin_addr; + fw_bin->bin_size = ras_ta_load->bin_size; + fw_bin->fw_version = ras_ta_load->fw_version; + fw_bin->feature_version = ras_ta_load->feature_version; + + ret = send_load_ta_fw_cmd(ras_core, ta_ctx); + if (!ret) { + ras_ta_load->out_session_id = ta_ctx->session_id; + ras_ta_load->out_loaded_ta_version = ta_ctx->ta_version; + } + + return ret; +} + +static int unload_ras_ta_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct psp_gfx_cmd_unload_ta cmd_unload_ta = {0}; + struct psp_cmd_resp resp = {0}; + int ret; + + if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) + return -EACCES; + + cmd_unload_ta.session_id = ta_ctx->session_id; + ret = send_psp_cmd(ras_core, GFX_CMD_ID_UNLOAD_TA, + &cmd_unload_ta, sizeof(cmd_unload_ta), &resp); + if (ret || resp.status) { + RAS_DEV_ERR(ras_core->dev, + "Failed to unload RAS TA! ret:%d, status:%u\n", + ret, resp.status); + goto unlock; + } + + kfree(ta_ctx->fw_bin.bin_addr); + memset(&ta_ctx->fw_bin, 0, sizeof(ta_ctx->fw_bin)); + ta_ctx->ta_version = 0; + ta_ctx->ras_ta_initialized = false; + ta_ctx->session_id = 0; + +unlock: + ras_core_up_gpu_reset_lock(ras_core); + + return ret; +} + +int ras_psp_load_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + struct ras_psp_ta_unload ras_ta_unload = {0}; + int ret; + + if (ta_ctx->preload_ras_ta_enabled) + return 0; + + if (!ras_ta_load) + return -EINVAL; + + if (ta_ctx->ras_ta_initialized) { + ras_ta_unload.ras_session_id = ta_ctx->session_id; + ret = unload_ras_ta_firmware(ras_core, &ras_ta_unload); + if (ret) + return ret; + } + + return load_ras_ta_firmware(ras_core, ras_ta_load); +} + +int ras_psp_unload_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (ta_ctx->preload_ras_ta_enabled) + return 0; + + if ((!ras_ta_unload) || + (ras_ta_unload->ras_session_id != ta_ctx->session_id)) + return -EINVAL; + + return unload_ras_ta_firmware(ras_core, ras_ta_unload); +} + +int ras_psp_trigger_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) { + RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!"); + return -ENOEXEC; + } + + if (!info) + return -EINVAL; + + return trigger_ras_ta_error(ras_core, info, instance_mask); +} + +int ras_psp_query_address(struct ras_core_context *ras_core, + struct ras_ta_query_address_input *addr_in, + struct ras_ta_query_address_output *addr_out) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + + if (!ta_ctx->preload_ras_ta_enabled && + !ta_ctx->ras_ta_initialized) { + RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!"); + return -ENOEXEC; + } + + if (!addr_in || !addr_out) + return -EINVAL; + + return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS, + addr_in, sizeof(*addr_in), addr_out, sizeof(*addr_out)); +} + +int ras_psp_sw_init(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + memset(psp, 0, sizeof(*psp)); + + psp->sys_func = ras_core->config->psp_cfg.psp_sys_fn; + if (!psp->sys_func) { + RAS_DEV_ERR(ras_core->dev, "RAS psp sys function not configured!\n"); + return -EINVAL; + } + + mutex_init(&psp->psp_ctx.internal_mutex); + mutex_init(&psp->ta_ctx.ta_mutex); + + return 0; +} + +int ras_psp_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + mutex_destroy(&psp->psp_ctx.internal_mutex); + mutex_destroy(&psp->ta_ctx.ta_mutex); + + memset(psp, 0, sizeof(*psp)); + + return 0; +} + +int ras_psp_hw_init(struct ras_core_context *ras_core) +{ + struct ras_psp *psp = &ras_core->ras_psp; + + psp->psp_ip_version = ras_core->config->psp_ip_version; + + psp->ip_func = ras_psp_get_ip_funcs(ras_core, psp->psp_ip_version); + if (!psp->ip_func) + return -EINVAL; + + /* After GPU reset, the system RAS PSP status may change. + * therefore, it is necessary to synchronize the system status again. + */ + ras_psp_sync_system_ras_psp_status(ras_core); + + return 0; +} + +int ras_psp_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} + +bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id) +{ + struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx; + bool ret = false; + + if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) + return false; + + switch (cmd_id) { + case RAS_TA_CMD_ID__QUERY_ADDRESS: + /* Currently, querying the address from RAS TA is only supported + * when the RAS TA firmware is loaded during driver installation. + */ + if (ta_ctx->preload_ras_ta_enabled) + ret = true; + break; + case RAS_TA_CMD_ID__TRIGGER_ERROR: + ret = true; + break; + default: + ret = false; + break; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h new file mode 100644 index 000000000000..71776fecfd66 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_PSP_H__ +#define __RAS_PSP_H__ +#include "ras.h" +#include "ras_ta_if.h" + +struct ras_core_context; +struct ras_ta_trigger_error_input; +struct ras_ta_query_address_input; +struct ras_ta_query_address_output; +enum ras_ta_cmd_id; + +struct ras_ta_image_header { + uint32_t reserved1[24]; + uint32_t image_version; /* [0x60] Off Chip Firmware Version */ + uint32_t reserved2[39]; +}; + +struct ras_psp_sys_status { + bool initialized; + uint32_t session_id; + void *psp_cmd_mutex; +}; + +struct ras_ta_init_param { + uint8_t poison_mode_en; + uint8_t dgpu_mode; + uint16_t xcc_mask; + uint8_t channel_dis_num; + uint8_t nps_mode; + uint32_t active_umc_mask; +}; + +struct gpu_mem_block { + uint32_t mem_type; + void *mem_bo; + uint64_t mem_mc_addr; + void *mem_cpu_addr; + uint32_t mem_size; + int ref_count; + void *private; +}; + +struct ras_psp_ip_func { + uint32_t (*psp_ras_ring_wptr_get)(struct ras_core_context *ras_core); + int (*psp_ras_ring_wptr_set)(struct ras_core_context *ras_core, uint32_t wptr); +}; + +struct ras_psp_ring { + struct gpu_mem_block ras_ring_gpu_mem; +}; + +struct psp_cmd_resp { + uint32_t status; + uint32_t session_id; +}; + +struct ras_psp_ctx { + void *external_mutex; + struct mutex internal_mutex; + uint64_t in_fence_value; + struct gpu_mem_block psp_cmd_gpu_mem; + struct gpu_mem_block out_fence_gpu_mem; +}; + +struct ras_ta_fw_bin { + uint32_t fw_version; + uint32_t feature_version; + uint32_t bin_size; + uint8_t *bin_addr; +}; + +struct ras_ta_ctx { + bool preload_ras_ta_enabled; + bool ras_ta_initialized; + uint32_t session_id; + uint32_t resp_status; + uint32_t ta_version; + struct mutex ta_mutex; + struct ras_ta_fw_bin fw_bin; + struct ras_ta_init_param init_param; + struct gpu_mem_block fw_gpu_mem; + struct gpu_mem_block cmd_gpu_mem; +}; + +struct ras_psp { + uint32_t psp_ip_version; + struct ras_psp_ring psp_ring; + struct ras_psp_ctx psp_ctx; + struct ras_ta_ctx ta_ctx; + const struct ras_psp_ip_func *ip_func; + const struct ras_psp_sys_func *sys_func; +}; + +struct ras_psp_ta_load { + uint32_t fw_version; + uint32_t feature_version; + uint32_t bin_size; + uint8_t *bin_addr; + uint64_t out_session_id; + uint32_t out_loaded_ta_version; +}; + +struct ras_psp_ta_unload { + uint64_t ras_session_id; +}; + +int ras_psp_sw_init(struct ras_core_context *ras_core); +int ras_psp_sw_fini(struct ras_core_context *ras_core); +int ras_psp_hw_init(struct ras_core_context *ras_core); +int ras_psp_hw_fini(struct ras_core_context *ras_core); +int ras_psp_load_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_load *ras_ta_load); +int ras_psp_unload_firmware(struct ras_core_context *ras_core, + struct ras_psp_ta_unload *ras_ta_unload); +int ras_psp_trigger_error(struct ras_core_context *ras_core, + struct ras_ta_trigger_error_input *info, uint32_t instance_mask); +int ras_psp_query_address(struct ras_core_context *ras_core, + struct ras_ta_query_address_input *addr_in, + struct ras_ta_query_address_output *addr_out); +bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core, + enum ras_ta_cmd_id cmd_id); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c new file mode 100644 index 000000000000..626cf39b75ac --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "ras.h" +#include "ras_psp_v13_0.h" + +#define regMP0_SMN_C2PMSG_67 0x0083 +#define regMP0_SMN_C2PMSG_67_BASE_IDX 0 + +static uint32_t ras_psp_v13_0_ring_wptr_get(struct ras_core_context *ras_core) +{ + return RAS_DEV_RREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67); +} + +static int ras_psp_v13_0_ring_wptr_set(struct ras_core_context *ras_core, uint32_t value) +{ + RAS_DEV_WREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67, value); + + return 0; +} + +const struct ras_psp_ip_func ras_psp_v13_0 = { + .psp_ras_ring_wptr_get = ras_psp_v13_0_ring_wptr_get, + .psp_ras_ring_wptr_set = ras_psp_v13_0_ring_wptr_set, +}; diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h new file mode 100644 index 000000000000..b705ffe38a12 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_PSP_V13_0_H__ +#define __RAS_PSP_V13_0_H__ +#include "ras_psp.h" + +extern const struct ras_psp_ip_func ras_psp_v13_0; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h new file mode 100644 index 000000000000..0921e36d3274 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _RAS_TA_IF_H +#define _RAS_TA_IF_H +#include "ras.h" + +#define RAS_TA_HOST_IF_VER 0 + +/* Responses have bit 31 set */ +#define RSP_ID_MASK (1U << 31) +#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) + +/* invalid node instance value */ +#define RAS_TA_INV_NODE 0xffff + +/* RAS related enumerations */ +/**********************************************************/ +enum ras_ta_cmd_id { + RAS_TA_CMD_ID__ENABLE_FEATURES = 0, + RAS_TA_CMD_ID__DISABLE_FEATURES, + RAS_TA_CMD_ID__TRIGGER_ERROR, + RAS_TA_CMD_ID__QUERY_BLOCK_INFO, + RAS_TA_CMD_ID__QUERY_SUB_BLOCK_INFO, + RAS_TA_CMD_ID__QUERY_ADDRESS, + MAX_RAS_TA_CMD_ID +}; + +enum ras_ta_status { + RAS_TA_STATUS__SUCCESS = 0x0000, + RAS_TA_STATUS__RESET_NEEDED = 0xA001, + RAS_TA_STATUS__ERROR_INVALID_PARAMETER = 0xA002, + RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003, + RAS_TA_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004, + RAS_TA_STATUS__ERROR_INJECTION_FAILED = 0xA005, + RAS_TA_STATUS__ERROR_ASD_READ_WRITE = 0xA006, + RAS_TA_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007, + RAS_TA_STATUS__ERROR_TIMEOUT = 0xA008, + RAS_TA_STATUS__ERROR_BLOCK_DISABLED = 0XA009, + RAS_TA_STATUS__ERROR_GENERIC = 0xA00A, + RAS_TA_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B, + RAS_TA_STATUS__ERROR_GET_DEV_INFO = 0xA00C, + RAS_TA_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D, + RAS_TA_STATUS__ERROR_NOT_INITIALIZED = 0xA00E, + RAS_TA_STATUS__ERROR_TEE_INTERNAL = 0xA00F, + RAS_TA_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010, + RAS_TA_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011, + RAS_TA_STATUS__ERROR_RAS_READ_WRITE = 0xA012, + RAS_TA_STATUS__ERROR_NULL_PTR = 0xA013, + RAS_TA_STATUS__ERROR_UNSUPPORTED_IP = 0xA014, + RAS_TA_STATUS__ERROR_PCS_STATE_QUIET = 0xA015, + RAS_TA_STATUS__ERROR_PCS_STATE_ERROR = 0xA016, + RAS_TA_STATUS__ERROR_PCS_STATE_HANG = 0xA017, + RAS_TA_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018, + RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019, + RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A +}; + +enum ras_ta_block { + RAS_TA_BLOCK__UMC = 0, + RAS_TA_BLOCK__SDMA, + RAS_TA_BLOCK__GFX, + RAS_TA_BLOCK__MMHUB, + RAS_TA_BLOCK__ATHUB, + RAS_TA_BLOCK__PCIE_BIF, + RAS_TA_BLOCK__HDP, + RAS_TA_BLOCK__XGMI_WAFL, + RAS_TA_BLOCK__DF, + RAS_TA_BLOCK__SMN, + RAS_TA_BLOCK__SEM, + RAS_TA_BLOCK__MP0, + RAS_TA_BLOCK__MP1, + RAS_TA_BLOCK__FUSE, + RAS_TA_BLOCK__MCA, + RAS_TA_BLOCK__VCN, + RAS_TA_BLOCK__JPEG, + RAS_TA_BLOCK__IH, + RAS_TA_BLOCK__MPIO, + RAS_TA_BLOCK__MMSCH, + RAS_TA_NUM_BLOCK_MAX +}; + +enum ras_ta_mca_block { + RAS_TA_MCA_BLOCK__MP0 = 0, + RAS_TA_MCA_BLOCK__MP1 = 1, + RAS_TA_MCA_BLOCK__MPIO = 2, + RAS_TA_MCA_BLOCK__IOHC = 3, + RAS_TA_MCA_NUM_BLOCK_MAX +}; + +enum ras_ta_error_type { + RAS_TA_ERROR__NONE = 0, + RAS_TA_ERROR__PARITY = 1, + RAS_TA_ERROR__SINGLE_CORRECTABLE = 2, + RAS_TA_ERROR__MULTI_UNCORRECTABLE = 4, + RAS_TA_ERROR__POISON = 8, +}; + +enum ras_ta_address_type { + RAS_TA_MCA_TO_PA, + RAS_TA_PA_TO_MCA, +}; + +enum ras_ta_nps_mode { + RAS_TA_UNKNOWN_MODE = 0, + RAS_TA_NPS1_MODE = 1, + RAS_TA_NPS2_MODE = 2, + RAS_TA_NPS4_MODE = 4, + RAS_TA_NPS8_MODE = 8, +}; + +/* Input/output structures for RAS commands */ +/**********************************************************/ + +struct ras_ta_enable_features_input { + enum ras_ta_block block_id; + enum ras_ta_error_type error_type; +}; + +struct ras_ta_disable_features_input { + enum ras_ta_block block_id; + enum ras_ta_error_type error_type; +}; + +struct ras_ta_trigger_error_input { + /* ras-block. i.e. umc, gfx */ + enum ras_ta_block block_id; + + /* type of error. i.e. single_correctable */ + enum ras_ta_error_type inject_error_type; + + /* mem block. i.e. hbm, sram etc. */ + uint32_t sub_block_index; + + /* explicit address of error */ + uint64_t address; + + /* method if error injection. i.e persistent, coherent etc. */ + uint64_t value; +}; + +struct ras_ta_init_flags { + uint8_t poison_mode_en; + uint8_t dgpu_mode; + uint16_t xcc_mask; + uint8_t channel_dis_num; + uint8_t nps_mode; + uint32_t active_umc_mask; +}; + +struct ras_ta_mca_addr { + uint64_t err_addr; + uint32_t ch_inst; + uint32_t umc_inst; + uint32_t node_inst; + uint32_t socket_id; +}; + +struct ras_ta_phy_addr { + uint64_t pa; + uint32_t bank; + uint32_t channel_idx; +}; + +struct ras_ta_query_address_input { + enum ras_ta_address_type addr_type; + struct ras_ta_mca_addr ma; + struct ras_ta_phy_addr pa; +}; + +struct ras_ta_output_flags { + uint8_t ras_init_success_flag; + uint8_t err_inject_switch_disable_flag; + uint8_t reg_access_failure_flag; +}; + +struct ras_ta_query_address_output { + /* don't use the flags here */ + struct ras_ta_output_flags flags; + struct ras_ta_mca_addr ma; + struct ras_ta_phy_addr pa; +}; + +/* Common input structure for RAS callbacks */ +/**********************************************************/ +union ras_ta_cmd_input { + struct ras_ta_init_flags init_flags; + struct ras_ta_enable_features_input enable_features; + struct ras_ta_disable_features_input disable_features; + struct ras_ta_trigger_error_input trigger_error; + struct ras_ta_query_address_input address; + uint32_t reserve_pad[256]; +}; + +union ras_ta_cmd_output { + struct ras_ta_output_flags flags; + struct ras_ta_query_address_output address; + uint32_t reserve_pad[256]; +}; + +struct ras_ta_cmd { + uint32_t cmd_id; + uint32_t resp_id; + uint32_t ras_status; + uint32_t if_version; + union ras_ta_cmd_input ras_in_message; + union ras_ta_cmd_output ras_out_message; +}; + +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c new file mode 100644 index 000000000000..4067359bb299 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c @@ -0,0 +1,706 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_umc.h" +#include "ras_umc_v12_0.h" + +#define MAX_ECC_NUM_PER_RETIREMENT 16 + +/* bad page timestamp format + * yy[31:27] mm[26:23] day[22:17] hh[16:12] mm[11:6] ss[5:0] + */ +#define EEPROM_TIMESTAMP_MINUTE 6 +#define EEPROM_TIMESTAMP_HOUR 12 +#define EEPROM_TIMESTAMP_DAY 17 +#define EEPROM_TIMESTAMP_MONTH 23 +#define EEPROM_TIMESTAMP_YEAR 27 + +static uint64_t ras_umc_get_eeprom_timestamp(struct ras_core_context *ras_core) +{ + struct ras_time tm = {0}; + uint64_t utc_timestamp = 0; + uint64_t eeprom_timestamp = 0; + + utc_timestamp = ras_core_get_utc_second_timestamp(ras_core); + if (!utc_timestamp) + return utc_timestamp; + + ras_core_convert_timestamp_to_time(ras_core, utc_timestamp, &tm); + + /* the year range is 2000 ~ 2031, set the year if not in the range */ + if (tm.tm_year < 2000) + tm.tm_year = 2000; + if (tm.tm_year > 2031) + tm.tm_year = 2031; + + tm.tm_year -= 2000; + + eeprom_timestamp = tm.tm_sec + (tm.tm_min << EEPROM_TIMESTAMP_MINUTE) + + (tm.tm_hour << EEPROM_TIMESTAMP_HOUR) + + (tm.tm_mday << EEPROM_TIMESTAMP_DAY) + + (tm.tm_mon << EEPROM_TIMESTAMP_MONTH) + + (tm.tm_year << EEPROM_TIMESTAMP_YEAR); + eeprom_timestamp &= 0xffffffff; + + return eeprom_timestamp; +} + +static const struct ras_umc_ip_func *ras_umc_get_ip_func( + struct ras_core_context *ras_core, uint32_t ip_version) +{ + switch (ip_version) { + case IP_VERSION(12, 0, 0): + return &ras_umc_func_v12_0; + default: + RAS_DEV_ERR(ras_core->dev, + "UMC ip version(0x%x) is not supported!\n", ip_version); + break; + } + + return NULL; +} + +int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *in, struct umc_phy_addr *out, + uint32_t nps) +{ + struct ras_ta_query_address_input addr_in; + struct ras_ta_query_address_output addr_out; + int ret; + + if (!in) + return -EINVAL; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + addr_in.ma.err_addr = in->err_addr; + addr_in.ma.ch_inst = in->ch_inst; + addr_in.ma.umc_inst = in->umc_inst; + addr_in.ma.node_inst = in->node_inst; + addr_in.ma.socket_id = in->socket_id; + + addr_in.addr_type = RAS_TA_MCA_TO_PA; + + ret = ras_psp_query_address(ras_core, &addr_in, &addr_out); + if (ret) { + RAS_DEV_WARN(ras_core->dev, + "Failed to query RAS physical address for 0x%llx, ret:%d", + in->err_addr, ret); + return -EREMOTEIO; + } + + if (out) { + out->pa = addr_out.pa.pa; + out->bank = addr_out.pa.bank; + out->channel_idx = addr_out.pa.channel_idx; + } + + return 0; +} + +static int ras_umc_log_ecc(struct ras_core_context *ras_core, + unsigned long idx, void *data) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + int ret; + + mutex_lock(&ras_umc->tree_lock); + ret = radix_tree_insert(&ras_umc->root, idx, data); + if (!ret) + radix_tree_tag_set(&ras_umc->root, idx, UMC_ECC_NEW_DETECTED_TAG); + mutex_unlock(&ras_umc->tree_lock); + + return ret; +} + +int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint64_t buf[8] = {0}; + void **slot; + void *data; + void *iter = buf; + + mutex_lock(&ras_umc->tree_lock); + radix_tree_for_each_slot(slot, &ras_umc->root, iter, 0) { + data = ras_radix_tree_delete_iter(&ras_umc->root, iter); + kfree(data); + } + mutex_unlock(&ras_umc->tree_lock); + + return 0; +} + +static void ras_umc_reserve_eeprom_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint64_t page_pfn[16]; + int count = 0, i; + + memset(page_pfn, 0, sizeof(page_pfn)); + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) { + count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core, + record, record->cur_nps, page_pfn, ARRAY_SIZE(page_pfn)); + if (count <= 0) { + RAS_DEV_ERR(ras_core->dev, + "Fail to convert error address! count:%d\n", count); + return; + } + } + + /* Reserve memory */ + for (i = 0; i < count; i++) + ras_core_event_notify(ras_core, + RAS_EVENT_ID__RESERVE_BAD_PAGE, &page_pfn[i]); +} + +/* When gpu reset is ongoing, ecc logging operations will be pended. + */ +int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_bank_ecc_node *ecc_node; + + ecc_node = kzalloc(sizeof(*ecc_node), GFP_KERNEL); + if (!ecc_node) + return -ENOMEM; + + memcpy(&ecc_node->ecc, bank, sizeof(ecc_node->ecc)); + + mutex_lock(&ras_umc->pending_ecc_lock); + list_add_tail(&ecc_node->node, &ras_umc->pending_ecc_list); + mutex_unlock(&ras_umc->pending_ecc_lock); + + return 0; +} + +/* After gpu reset is complete, re-log the pending error banks. + */ +int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_bank_ecc_node *ecc_node, *tmp; + + mutex_lock(&ras_umc->pending_ecc_lock); + list_for_each_entry_safe(ecc_node, + tmp, &ras_umc->pending_ecc_list, node){ + if (ecc_node && !ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) { + list_del(&ecc_node->node); + kfree(ecc_node); + } + } + mutex_unlock(&ras_umc->pending_ecc_lock); + + return 0; +} + +int ras_umc_log_bad_bank(struct ras_core_context *ras_core, struct ras_bank_ecc *bank) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_umc_record umc_rec; + struct eeprom_umc_record *err_rec; + int ret; + + memset(&umc_rec, 0, sizeof(umc_rec)); + + mutex_lock(&ras_umc->bank_log_lock); + ret = ras_umc->ip_func->bank_to_eeprom_record(ras_core, bank, &umc_rec); + if (ret) + goto out; + + err_rec = kzalloc(sizeof(*err_rec), GFP_KERNEL); + if (!err_rec) { + ret = -ENOMEM; + goto out; + } + + memcpy(err_rec, &umc_rec, sizeof(umc_rec)); + ret = ras_umc_log_ecc(ras_core, err_rec->cur_nps_retired_row_pfn, err_rec); + if (ret) { + if (ret == -EEXIST) { + RAS_DEV_INFO(ras_core->dev, "The bad pages have been logged before.\n"); + ret = 0; + } + + kfree(err_rec); + goto out; + } + + ras_umc_reserve_eeprom_record(ras_core, err_rec); + + ret = ras_core_event_notify(ras_core, + RAS_EVENT_ID__BAD_PAGE_DETECTED, NULL); + +out: + mutex_unlock(&ras_umc->bank_log_lock); + return ret; +} + +static int ras_umc_get_new_records(struct ras_core_context *ras_core, + struct eeprom_umc_record *records, u32 num) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_umc_record *entries[MAX_ECC_NUM_PER_RETIREMENT]; + u32 entry_num = num < MAX_ECC_NUM_PER_RETIREMENT ? num : MAX_ECC_NUM_PER_RETIREMENT; + int count = 0; + int new_detected, i; + + mutex_lock(&ras_umc->tree_lock); + new_detected = radix_tree_gang_lookup_tag(&ras_umc->root, (void **)entries, + 0, entry_num, UMC_ECC_NEW_DETECTED_TAG); + for (i = 0; i < new_detected; i++) { + if (!entries[i]) + continue; + + memcpy(&records[i], entries[i], sizeof(struct eeprom_umc_record)); + count++; + radix_tree_tag_clear(&ras_umc->root, + entries[i]->cur_nps_retired_row_pfn, UMC_ECC_NEW_DETECTED_TAG); + } + mutex_unlock(&ras_umc->tree_lock); + + return count; +} + +static bool ras_umc_check_retired_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, bool from_eeprom) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data; + uint32_t nps = 0; + int i, ret; + + if (from_eeprom) { + nps = ras_umc->umc_err_data.umc_nps_mode; + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_record) { + ret = ras_umc->ip_func->eeprom_record_to_nps_record(ras_core, record, nps); + if (ret) + RAS_DEV_WARN(ras_core->dev, + "Failed to adjust eeprom record, ret:%d", ret); + } + return false; + } + + for (i = 0; i < data->count; i++) { + if ((data->bps[i].retired_row_pfn == record->retired_row_pfn) && + (data->bps[i].cur_nps_retired_row_pfn == record->cur_nps_retired_row_pfn)) + return true; + } + + return false; +} + +/* alloc/realloc bps array */ +static int ras_umc_realloc_err_data_space(struct ras_core_context *ras_core, + struct eeprom_store_record *data, int pages) +{ + unsigned int old_space = data->count + data->space_left; + unsigned int new_space = old_space + pages; + unsigned int align_space = ALIGN(new_space, 512); + void *bps = kzalloc(align_space * sizeof(*data->bps), GFP_KERNEL); + + if (!bps) + return -ENOMEM; + + if (data->bps) { + memcpy(bps, data->bps, + data->count * sizeof(*data->bps)); + kfree(data->bps); + } + + data->bps = bps; + data->space_left += align_space - old_space; + return 0; +} + +static int ras_umc_update_eeprom_rom_data(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.rom_data; + + if (!data->space_left && + ras_umc_realloc_err_data_space(ras_core, data, 256)) { + return -ENOMEM; + } + + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + return 0; +} + +static int ras_umc_update_eeprom_ram_data(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data; + uint64_t page_pfn[16]; + int count = 0, j; + + if (!data->space_left && + ras_umc_realloc_err_data_space(ras_core, data, 256)) { + return -ENOMEM; + } + + memset(page_pfn, 0, sizeof(page_pfn)); + if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) + count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core, + bps, bps->cur_nps, page_pfn, ARRAY_SIZE(page_pfn)); + + if (count > 0) { + for (j = 0; j < count; j++) { + bps->cur_nps_retired_row_pfn = page_pfn[j]; + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + } + } else { + memcpy(&data->bps[data->count], bps, sizeof(*data->bps)); + data->count++; + data->space_left--; + } + + return 0; +} + +/* it deal with vram only. */ +static int ras_umc_add_bad_pages(struct ras_core_context *ras_core, + struct eeprom_umc_record *bps, + int pages, bool from_eeprom) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_umc_err_data *data = &ras_umc->umc_err_data; + int i, ret = 0; + + if (!bps || pages <= 0) + return 0; + + mutex_lock(&ras_umc->umc_lock); + for (i = 0; i < pages; i++) { + if (ras_umc_check_retired_record(ras_core, &bps[i], from_eeprom)) + continue; + + ret = ras_umc_update_eeprom_rom_data(ras_core, &bps[i]); + if (ret) + goto out; + + if (data->last_retired_pfn == bps[i].cur_nps_retired_row_pfn) + continue; + + data->last_retired_pfn = bps[i].cur_nps_retired_row_pfn; + + if (from_eeprom) + ras_umc_reserve_eeprom_record(ras_core, &bps[i]); + + ret = ras_umc_update_eeprom_ram_data(ras_core, &bps[i]); + if (ret) + goto out; + } +out: + mutex_unlock(&ras_umc->umc_lock); + + return ret; +} + +/* + * read error record array in eeprom and reserve enough space for + * storing new bad pages + */ +int ras_umc_load_bad_pages(struct ras_core_context *ras_core) +{ + struct eeprom_umc_record *bps; + uint32_t ras_num_recs; + int ret; + + ras_num_recs = ras_eeprom_get_record_count(ras_core); + /* no bad page record, skip eeprom access */ + if (!ras_num_recs || + ras_core->ras_eeprom.record_threshold_config == DISABLE_RETIRE_PAGE) + return 0; + + bps = kcalloc(ras_num_recs, sizeof(*bps), GFP_KERNEL); + if (!bps) + return -ENOMEM; + + ret = ras_eeprom_read(ras_core, bps, ras_num_recs); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to load EEPROM table records!"); + } else { + ras_core->ras_umc.umc_err_data.last_retired_pfn = UMC_INV_MEM_PFN; + ret = ras_umc_add_bad_pages(ras_core, bps, ras_num_recs, true); + } + + kfree(bps); + return ret; +} + +/* + * write error record array to eeprom, the function should be + * protected by recovery_lock + * new_cnt: new added UE count, excluding reserved bad pages, can be NULL + */ +static int ras_umc_save_bad_pages(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data; + uint32_t eeprom_record_num; + int save_count; + int ret = 0; + + if (!data->bps) + return 0; + + eeprom_record_num = ras_eeprom_get_record_count(ras_core); + mutex_lock(&ras_umc->umc_lock); + save_count = data->count - eeprom_record_num; + /* only new entries are saved */ + if (save_count > 0) { + if (ras_eeprom_append(ras_core, + &data->bps[eeprom_record_num], + save_count)) { + RAS_DEV_ERR(ras_core->dev, "Failed to save EEPROM table data!"); + ret = -EIO; + goto exit; + } + + RAS_DEV_INFO(ras_core->dev, "Saved %d pages to EEPROM table.\n", save_count); + } + +exit: + mutex_unlock(&ras_umc->umc_lock); + return ret; +} + +int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data) +{ + struct eeprom_umc_record records[MAX_ECC_NUM_PER_RETIREMENT]; + int count, ret; + + memset(records, 0, sizeof(records)); + count = ras_umc_get_new_records(ras_core, records, ARRAY_SIZE(records)); + if (count <= 0) + return -ENODATA; + + ret = ras_umc_add_bad_pages(ras_core, records, count, false); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to add ras bad page!\n"); + return -EINVAL; + } + + ret = ras_umc_save_bad_pages(ras_core); + if (ret) { + RAS_DEV_ERR(ras_core->dev, "Failed to save ras bad page\n"); + return -EINVAL; + } + + return 0; +} + +int ras_umc_sw_init(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + + memset(ras_umc, 0, sizeof(*ras_umc)); + + INIT_LIST_HEAD(&ras_umc->pending_ecc_list); + + INIT_RADIX_TREE(&ras_umc->root, GFP_KERNEL); + + mutex_init(&ras_umc->tree_lock); + mutex_init(&ras_umc->pending_ecc_lock); + mutex_init(&ras_umc->umc_lock); + mutex_init(&ras_umc->bank_log_lock); + + return 0; +} + +int ras_umc_sw_fini(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct ras_umc_err_data *umc_err_data = &ras_umc->umc_err_data; + struct ras_bank_ecc_node *ecc_node, *tmp; + + mutex_destroy(&ras_umc->umc_lock); + mutex_destroy(&ras_umc->bank_log_lock); + + if (umc_err_data->rom_data.bps) { + umc_err_data->rom_data.count = 0; + kfree(umc_err_data->rom_data.bps); + umc_err_data->rom_data.bps = NULL; + umc_err_data->rom_data.space_left = 0; + } + + if (umc_err_data->ram_data.bps) { + umc_err_data->ram_data.count = 0; + kfree(umc_err_data->ram_data.bps); + umc_err_data->ram_data.bps = NULL; + umc_err_data->ram_data.space_left = 0; + } + + ras_umc_clear_logged_ecc(ras_core); + + mutex_lock(&ras_umc->pending_ecc_lock); + list_for_each_entry_safe(ecc_node, + tmp, &ras_umc->pending_ecc_list, node){ + list_del(&ecc_node->node); + kfree(ecc_node); + } + mutex_unlock(&ras_umc->pending_ecc_lock); + + mutex_destroy(&ras_umc->tree_lock); + mutex_destroy(&ras_umc->pending_ecc_lock); + + return 0; +} + +int ras_umc_hw_init(struct ras_core_context *ras_core) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + uint32_t nps; + + nps = ras_core_get_curr_nps_mode(ras_core); + + if (!nps || (nps >= UMC_MEMORY_PARTITION_MODE_UNKNOWN)) { + RAS_DEV_ERR(ras_core->dev, "Invalid memory NPS mode: %u!\n", nps); + return -ENODATA; + } + + ras_umc->umc_err_data.umc_nps_mode = nps; + + ras_umc->umc_vram_type = ras_core->config->umc_cfg.umc_vram_type; + if (!ras_umc->umc_vram_type) { + RAS_DEV_ERR(ras_core->dev, "Invalid UMC VRAM Type: %u!\n", + ras_umc->umc_vram_type); + return -ENODATA; + } + + ras_umc->umc_ip_version = ras_core->config->umc_ip_version; + ras_umc->ip_func = ras_umc_get_ip_func(ras_core, ras_umc->umc_ip_version); + if (!ras_umc->ip_func) + return -EINVAL; + + return 0; +} + +int ras_umc_hw_fini(struct ras_core_context *ras_core) +{ + return 0; +} + +int ras_umc_clean_badpage_data(struct ras_core_context *ras_core) +{ + struct ras_umc_err_data *data = &ras_core->ras_umc.umc_err_data; + + mutex_lock(&ras_core->ras_umc.umc_lock); + + kfree(data->rom_data.bps); + kfree(data->ram_data.bps); + + memset(data, 0, sizeof(*data)); + mutex_unlock(&ras_core->ras_umc.umc_lock); + + return 0; +} + +int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core, + uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr, + enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record) +{ + struct eeprom_umc_record *err_rec = record; + + /* Set bad page pfn and nps mode */ + EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(err_rec, + RAS_ADDR_TO_PFN(cur_nps_addr->pa), cur_nps); + + err_rec->address = err_addr; + err_rec->ts = ras_umc_get_eeprom_timestamp(ras_core); + err_rec->err_type = RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = cur_nps_addr->channel_idx; + err_rec->mcumc_id = umc_inst; + err_rec->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(cur_nps_addr->pa); + err_rec->cur_nps_bank = cur_nps_addr->bank; + err_rec->cur_nps = cur_nps; + return 0; +} + +int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core) +{ + struct ras_umc_err_data *err_data = &ras_core->ras_umc.umc_err_data; + + return err_data->rom_data.count; +} + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data; + + return data->count; +} + +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record) +{ + struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data; + + if (index >= data->count) + return -EINVAL; + + memcpy(record, &data->bps[index], sizeof(struct eeprom_umc_record)); + return 0; +} + +bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data; + uint64_t page_pfn = RAS_ADDR_TO_PFN(addr); + int i, ret = false; + + mutex_lock(&ras_umc->umc_lock); + for (i = 0; i < data->count; i++) { + if (data->bps[i].cur_nps_retired_row_pfn == page_pfn) { + ret = true; + break; + } + } + mutex_unlock(&ras_umc->umc_lock); + + return ret; +} + +int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa) +{ + struct ras_umc *ras_umc = &ras_core->ras_umc; + int ret = 0; + + if (bank_to_pa) + ret = ras_umc->ip_func->bank_to_soc_pa(ras_core, *bank_addr, soc_pa); + else + ret = ras_umc->ip_func->soc_pa_to_bank(ras_core, *soc_pa, bank_addr); + + return ret; +} diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h new file mode 100644 index 000000000000..7d9e779d8c4c --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __RAS_UMC_H__ +#define __RAS_UMC_H__ +#include "ras.h" +#include "ras_eeprom.h" +#include "ras_cmd.h" + +#define UMC_VRAM_TYPE_UNKNOWN 0 +#define UMC_VRAM_TYPE_GDDR1 1 +#define UMC_VRAM_TYPE_DDR2 2 +#define UMC_VRAM_TYPE_GDDR3 3 +#define UMC_VRAM_TYPE_GDDR4 4 +#define UMC_VRAM_TYPE_GDDR5 5 +#define UMC_VRAM_TYPE_HBM 6 +#define UMC_VRAM_TYPE_DDR3 7 +#define UMC_VRAM_TYPE_DDR4 8 +#define UMC_VRAM_TYPE_GDDR6 9 +#define UMC_VRAM_TYPE_DDR5 10 +#define UMC_VRAM_TYPE_LPDDR4 11 +#define UMC_VRAM_TYPE_LPDDR5 12 +#define UMC_VRAM_TYPE_HBM3E 13 + +#define UMC_ECC_NEW_DETECTED_TAG 0x1 +#define UMC_INV_MEM_PFN (0xFFFFFFFFFFFFFFFF) + +/* three column bits and one row bit in MCA address flip + * in bad page retirement + */ +#define UMC_PA_FLIP_BITS_NUM 4 + +enum umc_memory_partition_mode { + UMC_MEMORY_PARTITION_MODE_NONE = 0, + UMC_MEMORY_PARTITION_MODE_NPS1 = 1, + UMC_MEMORY_PARTITION_MODE_NPS2 = 2, + UMC_MEMORY_PARTITION_MODE_NPS3 = 3, + UMC_MEMORY_PARTITION_MODE_NPS4 = 4, + UMC_MEMORY_PARTITION_MODE_NPS6 = 6, + UMC_MEMORY_PARTITION_MODE_NPS8 = 8, + UMC_MEMORY_PARTITION_MODE_UNKNOWN +}; + +struct ras_core_context; +struct ras_bank_ecc; + +struct umc_flip_bits { + uint32_t flip_bits_in_pa[UMC_PA_FLIP_BITS_NUM]; + uint32_t flip_row_bit; + uint32_t r13_in_pa; + uint32_t bit_num; +}; + +struct umc_mca_addr { + uint64_t err_addr; + uint32_t ch_inst; + uint32_t umc_inst; + uint32_t node_inst; + uint32_t socket_id; +}; + +struct umc_phy_addr { + uint64_t pa; + uint32_t bank; + uint32_t channel_idx; +}; + +struct umc_bank_addr { + uint32_t stack_id; /* SID */ + uint32_t bank_group; + uint32_t bank; + uint32_t row; + uint32_t column; + uint32_t channel; + uint32_t subchannel; /* Also called Pseudochannel (PC) */ +}; + +struct ras_umc_ip_func { + int (*bank_to_eeprom_record)(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct eeprom_umc_record *record); + int (*eeprom_record_to_nps_record)(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps); + int (*eeprom_record_to_nps_pages)(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num); + int (*bank_to_soc_pa)(struct ras_core_context *ras_core, + struct umc_bank_addr bank_addr, uint64_t *soc_pa); + int (*soc_pa_to_bank)(struct ras_core_context *ras_core, + uint64_t soc_pa, struct umc_bank_addr *bank_addr); +}; + +struct eeprom_store_record { + /* point to data records array */ + struct eeprom_umc_record *bps; + /* the count of entries */ + int count; + /* the space can place new entries */ + int space_left; +}; + +struct ras_umc_err_data { + struct eeprom_store_record rom_data; + struct eeprom_store_record ram_data; + enum umc_memory_partition_mode umc_nps_mode; + uint64_t last_retired_pfn; +}; + +struct ras_umc { + u32 umc_ip_version; + u32 umc_vram_type; + const struct ras_umc_ip_func *ip_func; + struct radix_tree_root root; + struct mutex tree_lock; + struct mutex umc_lock; + struct mutex bank_log_lock; + struct mutex pending_ecc_lock; + struct ras_umc_err_data umc_err_data; + struct list_head pending_ecc_list; +}; + +int ras_umc_sw_init(struct ras_core_context *ras); +int ras_umc_sw_fini(struct ras_core_context *ras); +int ras_umc_hw_init(struct ras_core_context *ras); +int ras_umc_hw_fini(struct ras_core_context *ras); +int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *in, struct umc_phy_addr *out, + uint32_t nps); +int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data); +int ras_umc_log_bad_bank(struct ras_core_context *ras, struct ras_bank_ecc *bank); +int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank); +int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core); +int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core); +int ras_umc_load_bad_pages(struct ras_core_context *ras_core); +int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core); +int ras_umc_clean_badpage_data(struct ras_core_context *ras_core); +int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core, + uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr, + enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record); + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core); +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record); +bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr); +int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core, + uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa); +#endif diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c new file mode 100644 index 000000000000..5d9a11c17a86 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "ras.h" +#include "ras_umc.h" +#include "ras_core_status.h" +#include "ras_umc_v12_0.h" + +#define NumDieInterleaved 4 + +static const uint32_t umc_v12_0_channel_idx_tbl[] + [UMC_V12_0_UMC_INSTANCE_NUM][UMC_V12_0_CHANNEL_INSTANCE_NUM] = { + {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12}, + {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}}, + {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32}, + {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}}, + {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64}, + {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}}, + {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108}, + {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}} +}; + +/* mapping of MCA error address to normalized address */ +static const uint32_t umc_v12_0_ma2na_mapping[] = { + 0, 5, 6, 8, 9, 14, 12, 13, + 10, 11, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 24, 7, 29, 30, +}; + +static bool umc_v12_0_bit_wise_xor(uint32_t val) +{ + bool result = 0; + int i; + + for (i = 0; i < 32; i++) + result = result ^ ((val >> i) & 0x1); + + return result; +} + +static void __get_nps_pa_flip_bits(struct ras_core_context *ras_core, + enum umc_memory_partition_mode nps, + struct umc_flip_bits *flip_bits) +{ + uint32_t vram_type = ras_core->ras_umc.umc_vram_type; + + /* default setting */ + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT; + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT; + flip_bits->flip_row_bit = 13; + flip_bits->bit_num = 4; + flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT; + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) { + flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT; + flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT; + flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT; + flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT; + } + + switch (vram_type) { + case UMC_VRAM_TYPE_HBM: + /* other nps modes are taken as nps1 */ + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + + break; + case UMC_VRAM_TYPE_HBM3E: + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT; + flip_bits->flip_row_bit = 12; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT; + + break; + default: + RAS_DEV_WARN(ras_core->dev, + "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n"); + break; + } +} + +static uint64_t convert_nps_pa_to_row_pa(struct ras_core_context *ras_core, + uint64_t pa, enum umc_memory_partition_mode nps, bool zero_pfn_ok) +{ + struct umc_flip_bits flip_bits = {0}; + uint64_t row_pa; + int i; + + __get_nps_pa_flip_bits(ras_core, nps, &flip_bits); + + row_pa = pa; + /* clear loop bits in soc physical address */ + for (i = 0; i < flip_bits.bit_num; i++) + row_pa &= ~BIT_ULL(flip_bits.flip_bits_in_pa[i]); + + if (!zero_pfn_ok && !RAS_ADDR_TO_PFN(row_pa)) + row_pa |= BIT_ULL(flip_bits.flip_bits_in_pa[2]); + + return row_pa; +} + +static int lookup_bad_pages_in_a_row(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num, + uint64_t seq_no, bool dump) +{ + uint32_t col, col_lower, row, row_lower, idx, row_high; + uint64_t soc_pa, row_pa, column, err_addr; + uint64_t retired_addr = RAS_PFN_TO_ADDR(record->cur_nps_retired_row_pfn); + struct umc_flip_bits flip_bits = {0}; + uint32_t retire_unit; + uint32_t i; + + __get_nps_pa_flip_bits(ras_core, nps, &flip_bits); + + row_pa = convert_nps_pa_to_row_pa(ras_core, retired_addr, nps, true); + + err_addr = record->address; + /* get column bit 0 and 1 in mca address */ + col_lower = (err_addr >> 1) & 0x3ULL; + /* MA_R13_BIT will be handled later */ + row_lower = (err_addr >> UMC_V12_0_MCA_R0_BIT) & 0x1fffULL; + row_lower &= ~BIT_ULL(flip_bits.flip_row_bit); + + if (ras_core->ras_gfx.gfx_ip_version >= IP_VERSION(9, 5, 0)) { + row_high = (row_pa >> flip_bits.r13_in_pa) & 0x3ULL; + /* it's 2.25GB in each channel, from MCA address to PA + * [R14 R13] is converted if the two bits value are 0x3, + * get them from PA instead of MCA address. + */ + row_lower |= (row_high << 13); + } + + idx = 0; + row = 0; + retire_unit = 0x1 << flip_bits.bit_num; + /* loop for all possibilities of retire bits */ + for (column = 0; column < retire_unit; column++) { + soc_pa = row_pa; + for (i = 0; i < flip_bits.bit_num; i++) + soc_pa |= (((column >> i) & 0x1ULL) << flip_bits.flip_bits_in_pa[i]); + + col = ((column & 0x7) << 2) | col_lower; + + /* add row bit 13 */ + if (flip_bits.bit_num == UMC_PA_FLIP_BITS_NUM) + row = ((column >> 3) << flip_bits.flip_row_bit) | row_lower; + + if (dump) + RAS_DEV_INFO(ras_core->dev, + "{%llu} Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", + seq_no, soc_pa, row, col, + record->cur_nps_bank, record->mem_channel); + + + if (pfns && (idx < num)) + pfns[idx++] = RAS_ADDR_TO_PFN(soc_pa); + } + + return idx; +} + +static int umc_v12_convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out, + uint32_t nps) +{ + uint32_t i, na_shift; + uint64_t soc_pa, na, na_nps; + uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row; + uint32_t bank0, bank1, bank2, bank3, bank; + uint32_t ch_inst = addr_in->ch_inst; + uint32_t umc_inst = addr_in->umc_inst; + uint32_t node_inst = addr_in->node_inst; + uint32_t socket_id = addr_in->socket_id; + uint32_t channel_index; + uint64_t err_addr = addr_in->err_addr; + + if (node_inst != UMC_INV_AID_NODE) { + if (ch_inst >= UMC_V12_0_CHANNEL_INSTANCE_NUM || + umc_inst >= UMC_V12_0_UMC_INSTANCE_NUM || + node_inst >= UMC_V12_0_AID_NUM_MAX || + socket_id >= UMC_V12_0_SOCKET_NUM_MAX) + return -EINVAL; + } else { + if (socket_id >= UMC_V12_0_SOCKET_NUM_MAX || + ch_inst >= UMC_V12_0_TOTAL_CHANNEL_NUM) + return -EINVAL; + } + + bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL; + bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL; + bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL; + bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL; + col = (err_addr >> 1) & 0x1fULL; + row = (err_addr >> 10) & 0x3fffULL; + + /* apply bank hash algorithm */ + bank0 = + bank_hash0 ^ (UMC_V12_0_XOR_EN0 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0)))); + bank1 = + bank_hash1 ^ (UMC_V12_0_XOR_EN1 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1)))); + bank2 = + bank_hash2 ^ (UMC_V12_0_XOR_EN2 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2)))); + bank3 = + bank_hash3 ^ (UMC_V12_0_XOR_EN3 & + (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^ + (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3)))); + + bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3); + err_addr &= ~0x3c0ULL; + err_addr |= (bank << UMC_V12_0_MCA_B0_BIT); + + na_nps = 0x0; + /* convert mca error address to normalized address */ + for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++) + na_nps |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i]; + + if (nps == UMC_MEMORY_PARTITION_MODE_NPS1) + na_shift = 8; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) + na_shift = 9; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) + na_shift = 10; + else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) + na_shift = 11; + else + return -EINVAL; + + na = ((na_nps >> na_shift) << 8) | (na_nps & 0xff); + + if (node_inst != UMC_INV_AID_NODE) + channel_index = + umc_v12_0_channel_idx_tbl[node_inst][umc_inst][ch_inst]; + else { + channel_index = ch_inst; + node_inst = channel_index / + (UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM); + } + + /* translate umc channel address to soc pa, 3 parts are included */ + soc_pa = ADDR_OF_32KB_BLOCK(na) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(na); + + /* calc channel hash based on absolute address */ + soc_pa += socket_id * SOCKET_LFB_SIZE; + /* the umc channel bits are not original values, they are hashed */ + UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa); + /* restore pa */ + soc_pa -= socket_id * SOCKET_LFB_SIZE; + + /* get some channel bits from na_nps directly and + * add nps section offset + */ + if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) { + soc_pa &= ~(0x1ULL << UMC_V12_0_PA_CH5_BIT); + soc_pa |= ((na_nps & 0x100) << 5); + soc_pa += (node_inst >> 1) * (SOCKET_LFB_SIZE >> 1); + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) { + soc_pa &= ~(0x3ULL << UMC_V12_0_PA_CH4_BIT); + soc_pa |= ((na_nps & 0x300) << 4); + soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2); + } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) { + soc_pa &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); + soc_pa |= ((na_nps & 0x700) << 4); + soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2) + + (channel_index >> 4) * (SOCKET_LFB_SIZE >> 3); + } + + addr_out->pa = soc_pa; + addr_out->bank = bank; + addr_out->channel_idx = channel_index; + + return 0; +} + +static int convert_ma_to_pa(struct ras_core_context *ras_core, + struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out, + uint32_t nps) +{ + int ret; + + if (ras_psp_check_supported_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS)) + ret = ras_umc_psp_convert_ma_to_pa(ras_core, + addr_in, addr_out, nps); + else + ret = umc_v12_convert_ma_to_pa(ras_core, + addr_in, addr_out, nps); + + return ret; +} + +static int convert_bank_to_nps_addr(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct umc_phy_addr *pa_addr, uint32_t nps) +{ + struct umc_mca_addr addr_in; + struct umc_phy_addr addr_out; + int ret; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + addr_in.err_addr = ACA_ADDR_2_ERR_ADDR(bank->addr); + addr_in.ch_inst = ACA_IPID_2_UMC_CH(bank->ipid); + addr_in.umc_inst = ACA_IPID_2_UMC_INST(bank->ipid); + addr_in.node_inst = ACA_IPID_2_DIE_ID(bank->ipid); + addr_in.socket_id = ACA_IPID_2_SOCKET_ID(bank->ipid); + + ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps); + if (!ret) { + pa_addr->pa = + convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false); + pa_addr->channel_idx = addr_out.channel_idx; + pa_addr->bank = addr_out.bank; + } + + return ret; +} + +static int umc_v12_0_bank_to_eeprom_record(struct ras_core_context *ras_core, + struct ras_bank_ecc *bank, struct eeprom_umc_record *record) +{ + struct umc_phy_addr nps_addr; + int ret; + + memset(&nps_addr, 0, sizeof(nps_addr)); + + ret = convert_bank_to_nps_addr(ras_core, bank, + &nps_addr, bank->nps); + if (ret) + return ret; + + ras_umc_fill_eeprom_record(ras_core, + ACA_ADDR_2_ERR_ADDR(bank->addr), ACA_IPID_2_UMC_INST(bank->ipid), + &nps_addr, bank->nps, record); + + lookup_bad_pages_in_a_row(ras_core, record, + bank->nps, NULL, 0, bank->seq_no, true); + + return 0; +} + +static int convert_eeprom_record_to_nps_addr(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint64_t *pa, uint32_t nps) +{ + struct device_system_info dev_info = {0}; + struct umc_mca_addr addr_in; + struct umc_phy_addr addr_out; + int ret; + + memset(&addr_in, 0, sizeof(addr_in)); + memset(&addr_out, 0, sizeof(addr_out)); + + ras_core_get_device_system_info(ras_core, &dev_info); + + addr_in.err_addr = record->address; + addr_in.ch_inst = record->mem_channel; + addr_in.umc_inst = record->mcumc_id; + addr_in.node_inst = UMC_INV_AID_NODE; + addr_in.socket_id = dev_info.socket_id; + + ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps); + if (ret) + return ret; + + *pa = convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false); + + return 0; +} + +static int umc_v12_0_eeprom_record_to_nps_record(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps) +{ + uint64_t pa = 0; + int ret = 0; + + if (nps == EEPROM_RECORD_UMC_NPS_MODE(record)) { + record->cur_nps_retired_row_pfn = EEPROM_RECORD_UMC_ADDR_PFN(record); + } else { + ret = convert_eeprom_record_to_nps_addr(ras_core, + record, &pa, nps); + if (!ret) + record->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(pa); + } + + record->cur_nps = nps; + + return ret; +} + +static int umc_v12_0_eeprom_record_to_nps_pages(struct ras_core_context *ras_core, + struct eeprom_umc_record *record, uint32_t nps, + uint64_t *pfns, uint32_t num) +{ + return lookup_bad_pages_in_a_row(ras_core, + record, nps, pfns, num, 0, false); +} + +static int umc_12_0_soc_pa_to_bank(struct ras_core_context *ras_core, + uint64_t soc_pa, + struct umc_bank_addr *bank_addr) +{ + + int channel_hashed = 0; + int channel_real = 0; + int channel_reversed = 0; + int i = 0; + + bank_addr->stack_id = UMC_V12_0_SOC_PA_TO_SID(soc_pa); + bank_addr->bank_group = 0; /* This is a combination of SID & Bank. Needed?? */ + bank_addr->bank = UMC_V12_0_SOC_PA_TO_BANK(soc_pa); + bank_addr->row = UMC_V12_0_SOC_PA_TO_ROW(soc_pa); + bank_addr->column = UMC_V12_0_SOC_PA_TO_COL(soc_pa); + + /* Channel bits 4-6 are hashed. Bruteforce reverse the hash */ + channel_hashed = (soc_pa >> UMC_V12_0_PA_CH4_BIT) & 0x7; + + for (i = 0; i < 8; i++) { + channel_reversed = 0; + channel_reversed |= UMC_V12_0_CHANNEL_HASH_CH4((i << 4), soc_pa); + channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH5((i << 4), soc_pa) << 1); + channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH6((i << 4), soc_pa) << 2); + if (channel_reversed == channel_hashed) + channel_real = ((i << 4)) | ((soc_pa >> UMC_V12_0_PA_CH0_BIT) & 0xf); + } + + bank_addr->channel = channel_real; + bank_addr->subchannel = UMC_V12_0_SOC_PA_TO_PC(soc_pa); + + return 0; +} + +static int umc_12_0_bank_to_soc_pa(struct ras_core_context *ras_core, + struct umc_bank_addr bank_addr, + uint64_t *soc_pa) +{ + uint64_t na = 0; + uint64_t tmp_pa = 0; + *soc_pa = 0; + + tmp_pa |= UMC_V12_0_SOC_SID_TO_PA(bank_addr.stack_id); + tmp_pa |= UMC_V12_0_SOC_BANK_TO_PA(bank_addr.bank); + tmp_pa |= UMC_V12_0_SOC_ROW_TO_PA(bank_addr.row); + tmp_pa |= UMC_V12_0_SOC_COL_TO_PA(bank_addr.column); + tmp_pa |= UMC_V12_0_SOC_CH_TO_PA(bank_addr.channel); + tmp_pa |= UMC_V12_0_SOC_PC_TO_PA(bank_addr.subchannel); + + /* Get the NA */ + na = ((tmp_pa >> UMC_V12_0_PA_C2_BIT) << UMC_V12_0_NA_C2_BIT); + na |= tmp_pa & 0xff; + + /* translate umc channel address to soc pa, 3 parts are included */ + tmp_pa = ADDR_OF_32KB_BLOCK(na) | + ADDR_OF_256B_BLOCK(bank_addr.channel) | + OFFSET_IN_256B_BLOCK(na); + + /* the umc channel bits are not original values, they are hashed */ + UMC_V12_0_SET_CHANNEL_HASH(bank_addr.channel, tmp_pa); + + *soc_pa = tmp_pa; + + return 0; +} + +const struct ras_umc_ip_func ras_umc_func_v12_0 = { + .bank_to_eeprom_record = umc_v12_0_bank_to_eeprom_record, + .eeprom_record_to_nps_record = umc_v12_0_eeprom_record_to_nps_record, + .eeprom_record_to_nps_pages = umc_v12_0_eeprom_record_to_nps_pages, + .bank_to_soc_pa = umc_12_0_bank_to_soc_pa, + .soc_pa_to_bank = umc_12_0_soc_pa_to_bank, +}; + diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h new file mode 100644 index 000000000000..8a35ad856165 --- /dev/null +++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h @@ -0,0 +1,314 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __RAS_UMC_V12_0_H__ +#define __RAS_UMC_V12_0_H__ +#include "ras.h" + +/* MCA_UMC_UMC0_MCUMC_ADDRT0 */ +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xFF00000000000000L + +/* MCMP1_IPIDT0 */ +#define MCMP1_IPIDT0__InstanceIdLo__SHIFT 0x0 +#define MCMP1_IPIDT0__HardwareID__SHIFT 0x20 +#define MCMP1_IPIDT0__InstanceIdHi__SHIFT 0x2c +#define MCMP1_IPIDT0__McaType__SHIFT 0x30 + +#define MCMP1_IPIDT0__InstanceIdLo_MASK 0x00000000FFFFFFFFL +#define MCMP1_IPIDT0__HardwareID_MASK 0x00000FFF00000000L +#define MCMP1_IPIDT0__InstanceIdHi_MASK 0x0000F00000000000L +#define MCMP1_IPIDT0__McaType_MASK 0xFFFF000000000000L + +/* number of umc channel instance with memory map register access */ +#define UMC_V12_0_CHANNEL_INSTANCE_NUM 8 +/* number of umc instance with memory map register access */ +#define UMC_V12_0_UMC_INSTANCE_NUM 4 + +/* one piece of normalized address is mapped to 8 pieces of physical address */ +#define UMC_V12_0_NA_MAP_PA_NUM 8 + +/* bank bits in MCA error address */ +#define UMC_V12_0_MCA_B0_BIT 6 +#define UMC_V12_0_MCA_B1_BIT 7 +#define UMC_V12_0_MCA_B2_BIT 8 +#define UMC_V12_0_MCA_B3_BIT 9 + +/* row bits in MCA address */ +#define UMC_V12_0_MCA_R0_BIT 10 + +/* Stack ID bits in SOC physical address */ +#define UMC_V12_0_PA_SID1_BIT 37 +#define UMC_V12_0_PA_SID0_BIT 36 + +/* bank bits in SOC physical address */ +#define UMC_V12_0_PA_B3_BIT 18 +#define UMC_V12_0_PA_B2_BIT 17 +#define UMC_V12_0_PA_B1_BIT 20 +#define UMC_V12_0_PA_B0_BIT 19 + +/* row bits in SOC physical address */ +#define UMC_V12_0_PA_R13_BIT 35 +#define UMC_V12_0_PA_R12_BIT 34 +#define UMC_V12_0_PA_R11_BIT 33 +#define UMC_V12_0_PA_R10_BIT 32 +#define UMC_V12_0_PA_R9_BIT 31 +#define UMC_V12_0_PA_R8_BIT 30 +#define UMC_V12_0_PA_R7_BIT 29 +#define UMC_V12_0_PA_R6_BIT 28 +#define UMC_V12_0_PA_R5_BIT 27 +#define UMC_V12_0_PA_R4_BIT 26 +#define UMC_V12_0_PA_R3_BIT 25 +#define UMC_V12_0_PA_R2_BIT 24 +#define UMC_V12_0_PA_R1_BIT 23 +#define UMC_V12_0_PA_R0_BIT 22 + +/* column bits in SOC physical address */ +#define UMC_V12_0_PA_C4_BIT 21 +#define UMC_V12_0_PA_C3_BIT 16 +#define UMC_V12_0_PA_C2_BIT 15 +#define UMC_V12_0_PA_C1_BIT 6 +#define UMC_V12_0_PA_C0_BIT 5 + +/* channel index bits in SOC physical address */ +#define UMC_V12_0_PA_CH6_BIT 14 +#define UMC_V12_0_PA_CH5_BIT 13 +#define UMC_V12_0_PA_CH4_BIT 12 +#define UMC_V12_0_PA_CH3_BIT 11 +#define UMC_V12_0_PA_CH2_BIT 10 +#define UMC_V12_0_PA_CH1_BIT 9 +#define UMC_V12_0_PA_CH0_BIT 8 + +/* Pseudochannel index bits in SOC physical address */ +#define UMC_V12_0_PA_PC0_BIT 7 + +#define UMC_V12_0_NA_C2_BIT 8 + +#define UMC_V12_0_SOC_PA_TO_SID(pa) \ + ((((pa >> UMC_V12_0_PA_SID0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_SID1_BIT) & 0x1ULL) << 1ULL)) + +#define UMC_V12_0_SOC_PA_TO_BANK(pa) \ + ((((pa >> UMC_V12_0_PA_B0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_B1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_B2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_B3_BIT) & 0x1ULL) << 3ULL)) + +#define UMC_V12_0_SOC_PA_TO_ROW(pa) \ + ((((pa >> UMC_V12_0_PA_R0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_R1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_R2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_R3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_R4_BIT) & 0x1ULL) << 4ULL) | \ + (((pa >> UMC_V12_0_PA_R5_BIT) & 0x1ULL) << 5ULL) | \ + (((pa >> UMC_V12_0_PA_R6_BIT) & 0x1ULL) << 6ULL) | \ + (((pa >> UMC_V12_0_PA_R7_BIT) & 0x1ULL) << 7ULL) | \ + (((pa >> UMC_V12_0_PA_R8_BIT) & 0x1ULL) << 8ULL) | \ + (((pa >> UMC_V12_0_PA_R9_BIT) & 0x1ULL) << 9ULL) | \ + (((pa >> UMC_V12_0_PA_R10_BIT) & 0x1ULL) << 10ULL) | \ + (((pa >> UMC_V12_0_PA_R11_BIT) & 0x1ULL) << 11ULL) | \ + (((pa >> UMC_V12_0_PA_R12_BIT) & 0x1ULL) << 12ULL) | \ + (((pa >> UMC_V12_0_PA_R13_BIT) & 0x1ULL) << 13ULL)) + +#define UMC_V12_0_SOC_PA_TO_COL(pa) \ + ((((pa >> UMC_V12_0_PA_C0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_C1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_C2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_C3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_C4_BIT) & 0x1ULL) << 4ULL)) + +#define UMC_V12_0_SOC_PA_TO_CH(pa) \ + ((((pa >> UMC_V12_0_PA_CH0_BIT) & 0x1ULL) << 0ULL) | \ + (((pa >> UMC_V12_0_PA_CH1_BIT) & 0x1ULL) << 1ULL) | \ + (((pa >> UMC_V12_0_PA_CH2_BIT) & 0x1ULL) << 2ULL) | \ + (((pa >> UMC_V12_0_PA_CH3_BIT) & 0x1ULL) << 3ULL) | \ + (((pa >> UMC_V12_0_PA_CH4_BIT) & 0x1ULL) << 4ULL) | \ + (((pa >> UMC_V12_0_PA_CH5_BIT) & 0x1ULL) << 5ULL) | \ + (((pa >> UMC_V12_0_PA_CH6_BIT) & 0x1ULL) << 6ULL)) + +#define UMC_V12_0_SOC_PA_TO_PC(pa) (((pa >> UMC_V12_0_PA_PC0_BIT) & 0x1ULL) << 0ULL) + +#define UMC_V12_0_SOC_SID_TO_PA(sid) \ + ((((sid >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_SID0_BIT) | \ + (((sid >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_SID1_BIT)) + +#define UMC_V12_0_SOC_BANK_TO_PA(bank) \ + ((((bank >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_B0_BIT) | \ + (((bank >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_B1_BIT) | \ + (((bank >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_B2_BIT) | \ + (((bank >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_B3_BIT)) + +#define UMC_V12_0_SOC_ROW_TO_PA(row) \ + ((((row >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_R0_BIT) | \ + (((row >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_R1_BIT) | \ + (((row >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_R2_BIT) | \ + (((row >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_R3_BIT) | \ + (((row >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_R4_BIT) | \ + (((row >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_R5_BIT) | \ + (((row >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_R6_BIT) | \ + (((row >> 7ULL) & 0x1ULL) << UMC_V12_0_PA_R7_BIT) | \ + (((row >> 8ULL) & 0x1ULL) << UMC_V12_0_PA_R8_BIT) | \ + (((row >> 9ULL) & 0x1ULL) << UMC_V12_0_PA_R9_BIT) | \ + (((row >> 10ULL) & 0x1ULL) << UMC_V12_0_PA_R10_BIT) | \ + (((row >> 11ULL) & 0x1ULL) << UMC_V12_0_PA_R11_BIT) | \ + (((row >> 12ULL) & 0x1ULL) << UMC_V12_0_PA_R12_BIT) | \ + (((row >> 13ULL) & 0x1ULL) << UMC_V12_0_PA_R13_BIT)) + +#define UMC_V12_0_SOC_COL_TO_PA(col) \ + ((((col >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_C0_BIT) | \ + (((col >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_C1_BIT) | \ + (((col >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_C2_BIT) | \ + (((col >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_C3_BIT) | \ + (((col >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_C4_BIT)) + +#define UMC_V12_0_SOC_CH_TO_PA(ch) \ + ((((ch >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_CH0_BIT) | \ + (((ch >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_CH1_BIT) | \ + (((ch >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_CH2_BIT) | \ + (((ch >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_CH3_BIT) | \ + (((ch >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_CH4_BIT) | \ + (((ch >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_CH5_BIT) | \ + (((ch >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_CH6_BIT)) + +#define UMC_V12_0_SOC_PC_TO_PA(pc) (((pc >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_PC0_BIT) + +/* bank hash settings */ +#define UMC_V12_0_XOR_EN0 1 +#define UMC_V12_0_XOR_EN1 1 +#define UMC_V12_0_XOR_EN2 1 +#define UMC_V12_0_XOR_EN3 1 +#define UMC_V12_0_COL_XOR0 0x0 +#define UMC_V12_0_COL_XOR1 0x0 +#define UMC_V12_0_COL_XOR2 0x800 +#define UMC_V12_0_COL_XOR3 0x1000 +#define UMC_V12_0_ROW_XOR0 0x11111 +#define UMC_V12_0_ROW_XOR1 0x22222 +#define UMC_V12_0_ROW_XOR2 0x4444 +#define UMC_V12_0_ROW_XOR3 0x8888 + +/* channel hash settings */ +#define UMC_V12_0_HASH_4K 0 +#define UMC_V12_0_HASH_64K 1 +#define UMC_V12_0_HASH_2M 1 +#define UMC_V12_0_HASH_1G 1 +#define UMC_V12_0_HASH_1T 1 + +/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA), + * hash bit is only effective when related setting is enabled + */ +#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \ + (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \ + (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \ + (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ + (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ + (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ + (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \ + (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_1T)) +#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \ + (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \ + (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ + } while (0) + + +/* + * (addr / 256) * 4096, the higher 26 bits in ErrorAddr + * is the index of 4KB block + */ +#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4) +/* + * (addr / 256) * 8192, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) +/* + * (addr / 256) * 32768, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7) +/* channel index is the index of 256B block */ +#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) +/* offset in 256B block */ +#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) + + +#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \ + ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \ + (0x1ULL << UMC_V12_0_PA_C4_BIT) | \ + (0x1ULL << UMC_V12_0_PA_R13_BIT))) + +#define ACA_IPID_HI_2_UMC_AID(_ipid_hi) (((_ipid_hi) >> 2) & 0x3) +#define ACA_IPID_LO_2_UMC_CH(_ipid_lo) \ + (((((_ipid_lo) >> 20) & 0x1) * 4) + (((_ipid_lo) >> 12) & 0xF)) +#define ACA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7) + +#define ACA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03) +#define ACA_IPID_2_UMC_CH(ipid) \ + (ACA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo))) + +#define ACA_IPID_2_UMC_INST(ipid) \ + (ACA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo))) + +#define ACA_IPID_2_SOCKET_ID(ipid) \ + (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \ + (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03)) + +#define ACA_ADDR_2_ERR_ADDR(addr) \ + REG_GET_FIELD(addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr) + +/* R13 bit shift should be considered, double the number */ +#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) + + +/* C2, C3, C4, R13, four MCA bits are looped in page retirement */ +#define UMC_V12_0_RETIRE_LOOP_BITS 4 + +/* invalid node instance value */ +#define UMC_INV_AID_NODE 0xffff + +#define UMC_V12_0_AID_NUM_MAX 4 +#define UMC_V12_0_SOCKET_NUM_MAX 8 + +#define UMC_V12_0_TOTAL_CHANNEL_NUM \ + (UMC_V12_0_AID_NUM_MAX * UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM) + +/* one device has 192GB HBM */ +#define SOCKET_LFB_SIZE 0x3000000000ULL + +extern const struct ras_umc_ip_func ras_umc_func_v12_0; + +int ras_umc_get_badpage_count(struct ras_core_context *ras_core); +int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record); +#endif + diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 2ad33559a33a..5a66948ffd24 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -111,6 +111,7 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc, static int komeda_crtc_prepare(struct komeda_crtc *kcrtc) { + struct drm_device *drm = kcrtc->base.dev; struct komeda_dev *mdev = kcrtc->base.dev->dev_private; struct komeda_pipeline *master = kcrtc->master; struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state); @@ -128,8 +129,8 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) err = mdev->funcs->change_opmode(mdev, new_mode); if (err) { - DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", - mdev->dpmode, new_mode); + drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); goto unlock; } @@ -142,18 +143,18 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc) if (new_mode != KOMEDA_MODE_DUAL_DISP) { err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st)); if (err) - DRM_ERROR("failed to set aclk.\n"); + drm_err(drm, "failed to set aclk.\n"); err = clk_prepare_enable(mdev->aclk); if (err) - DRM_ERROR("failed to enable aclk.\n"); + drm_err(drm, "failed to enable aclk.\n"); } err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000); if (err) - DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id); + drm_err(drm, "failed to set pxlclk for pipe%d\n", master->id); err = clk_prepare_enable(master->pxlclk); if (err) - DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id); + drm_err(drm, "failed to enable pxl clk for pipe%d.\n", master->id); unlock: mutex_unlock(&mdev->lock); @@ -164,6 +165,7 @@ unlock: static int komeda_crtc_unprepare(struct komeda_crtc *kcrtc) { + struct drm_device *drm = kcrtc->base.dev; struct komeda_dev *mdev = kcrtc->base.dev->dev_private; struct komeda_pipeline *master = kcrtc->master; u32 new_mode; @@ -180,8 +182,8 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc) err = mdev->funcs->change_opmode(mdev, new_mode); if (err) { - DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,", - mdev->dpmode, new_mode); + drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,", + mdev->dpmode, new_mode); goto unlock; } @@ -200,6 +202,7 @@ unlock: void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, struct komeda_events *evts) { + struct drm_device *drm = kcrtc->base.dev; struct drm_crtc *crtc = &kcrtc->base; u32 events = evts->pipes[kcrtc->master->id]; @@ -212,7 +215,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, if (wb_conn) drm_writeback_signal_completion(&wb_conn->base, 0); else - DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n", + drm_warn(drm, "CRTC[%d]: EOW happen but no wb_connector.\n", drm_crtc_index(&kcrtc->base)); } /* will handle it together with the write back support */ @@ -236,7 +239,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc, crtc->state->event = NULL; drm_crtc_send_vblank_event(crtc, event); } else { - DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n", + drm_warn(drm, "CRTC[%d]: FLIP happened but no pending commit.\n", drm_crtc_index(&kcrtc->base)); } spin_unlock_irqrestore(&crtc->dev->event_lock, flags); @@ -309,7 +312,7 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, /* wait the flip take affect.*/ if (wait_for_completion_timeout(flip_done, HZ) == 0) { - DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id); + drm_err(drm, "wait pipe%d flip done timeout\n", kcrtc->master->id); if (!input_flip_done) { unsigned long flags; @@ -562,6 +565,7 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = { int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev) { + struct drm_device *drm = &kms->base; struct komeda_crtc *crtc; struct komeda_pipeline *master; char str[16]; @@ -581,7 +585,7 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, else sprintf(str, "None"); - DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n", + drm_info(drm, "CRTC-%d: master(pipe-%d) slave(%s).\n", kms->n_crtcs, master->id, str); kms->n_crtcs++; @@ -613,6 +617,7 @@ static int komeda_attach_bridge(struct device *dev, struct komeda_pipeline *pipe, struct drm_encoder *encoder) { + struct drm_device *drm = encoder->dev; struct drm_bridge *bridge; int err; @@ -624,7 +629,7 @@ static int komeda_attach_bridge(struct device *dev, err = drm_bridge_attach(encoder, bridge, NULL, 0); if (err) - dev_err(dev, "bridge_attach() failed for pipe: %s\n", + drm_err(drm, "bridge_attach() failed for pipe: %s\n", of_node_full_name(pipe->of_node)); return err; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c index 901f938aefe0..3ca461eb0a24 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "komeda_framebuffer.h" #include "komeda_dev.h" diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 806da0aaedf7..4b4a08cb396d 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index c3179d74f3f5..81d45f2dd6a7 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index bc5f5e9798c3..b765f6c9eea4 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 600af5ad81b1..47733c85d271 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 87f2e5ee8790..f1a5014bcfa1 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -263,7 +263,7 @@ static int malidp_se_check_scaling(struct malidp_plane *mp, struct drm_plane_state *state) { struct drm_crtc_state *crtc_state = - drm_atomic_get_existing_crtc_state(state->state, state->crtc); + drm_atomic_get_new_crtc_state(state->state, state->crtc); struct malidp_crtc_state *mc; u32 src_w, src_h; int ret; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 0900e4466ffb..033b19b31f63 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -13,6 +13,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c index a763349dd89f..2445365c823f 100644 --- a/drivers/gpu/drm/armada/armada_debugfs.c +++ b/drivers/gpu/drm/armada/armada_debugfs.c @@ -12,6 +12,7 @@ #include #include +#include #include "armada_crtc.h" #include "armada_drm.h" diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index aa4289127086..77098928f821 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "armada_drm.h" #include "armada_fb.h" diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index cb53cc91bafb..be703d35f6b7 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "armada_crtc.h" #include "armada_drm.h" @@ -28,8 +29,6 @@ static void armada_fbdev_fb_destroy(struct fb_info *info) fbh->fb->funcs->destroy(fbh->fb); drm_client_release(&fbh->client); - drm_fb_helper_unprepare(fbh); - kfree(fbh); } static const struct fb_ops armada_fb_ops = { diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 1a1680d71486..35fcfa0d85ff 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -10,6 +10,7 @@ #include #include +#include #include "armada_drm.h" #include "armada_gem.h" diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 3b9bd8ecda13..21fd3b4ba10f 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "armada_crtc.h" #include "armada_drm.h" diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c index cc47c032dbc1..a0326b4f568e 100644 --- a/drivers/gpu/drm/armada/armada_plane.c +++ b/drivers/gpu/drm/armada/armada_plane.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "armada_crtc.h" #include "armada_drm.h" @@ -94,12 +95,7 @@ int armada_drm_plane_atomic_check(struct drm_plane *plane, return 0; } - if (state) - crtc_state = drm_atomic_get_existing_crtc_state(state, - crtc); - else - crtc_state = crtc->state; - + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 0, INT_MAX, true, false); diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index 2547613155da..cdbcba3b43ad 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -6,7 +6,9 @@ ast-y := \ ast_2000.o \ ast_2100.o \ + ast_2200.o \ ast_2300.o \ + ast_2400.o \ ast_2500.o \ ast_2600.o \ ast_cursor.o \ @@ -14,7 +16,6 @@ ast-y := \ ast_dp501.o \ ast_dp.o \ ast_drv.o \ - ast_main.o \ ast_mm.o \ ast_mode.o \ ast_post.o \ diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c index 41c2aa1e425a..fa3bc23ce098 100644 --- a/drivers/gpu/drm/ast/ast_2000.c +++ b/drivers/gpu/drm/ast/ast_2000.c @@ -27,6 +27,9 @@ */ #include +#include + +#include #include "ast_drv.h" #include "ast_post.h" @@ -147,3 +150,108 @@ int ast_2000_post(struct ast_device *ast) return 0; } + +/* + * Mode setting + */ + +const struct ast_vbios_dclk_info ast_2000_dclk_table[] = { + {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xee, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xc6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7b, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */ + {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */ + {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0d: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */ + {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x77, 0x58, 0x80}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x3b, 0x2c, 0x81}, /* 1a: VCLK118_25 */ +}; + +/* + * Device initialization + */ + +void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post) +{ + enum ast_tx_chip tx_chip = AST_TX_NONE; + u8 vgacra3; + + /* + * VGACRA3 Enhanced Color Mode Register, check if DVO is already + * enabled, in that case, assume we have a SIL164 TMDS transmitter + * + * Don't make that assumption if we the chip wasn't enabled and + * is at power-on reset, otherwise we'll incorrectly "detect" a + * SIL164 when there is none. + */ + if (!need_post) { + vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); + if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED) + tx_chip = AST_TX_SIL164; + } + + __ast_device_set_tx_chip(ast, tx_chip); +} + +static const struct ast_device_quirks ast_2000_device_quirks = { + .crtc_mem_req_threshold_low = 31, + .crtc_mem_req_threshold_high = 47, +}; + +struct drm_device *ast_2000_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2000_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c index 829e3b8b0d19..05aeb0624d41 100644 --- a/drivers/gpu/drm/ast/ast_2100.c +++ b/drivers/gpu/drm/ast/ast_2100.c @@ -27,6 +27,9 @@ */ #include +#include + +#include #include "ast_drv.h" #include "ast_post.h" @@ -386,3 +389,92 @@ int ast_2100_post(struct ast_device *ast) return 0; } + +/* + * Widescreen detection + */ + +/* Try to detect WSXGA+ on Gen2+ */ +bool __ast_2100_detect_wsxga_p(struct ast_device *ast) +{ + u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0); + + if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC)) + return true; + if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN) + return true; + + return false; +} + +/* Try to detect WUXGA on Gen2+ */ +bool __ast_2100_detect_wuxga(struct ast_device *ast) +{ + u8 vgacrd1; + + if (ast->support_fullhd) { + vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); + if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA)) + return true; + } + + return false; +} + +static void ast_2100_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast)) { + ast->support_wsxga_p = true; + if (ast->chip == AST2100) + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2100_device_quirks = { + .crtc_mem_req_threshold_low = 47, + .crtc_mem_req_threshold_high = 63, +}; + +struct drm_device *ast_2100_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2100_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2100_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2200.c b/drivers/gpu/drm/ast/ast_2200.c new file mode 100644 index 000000000000..b64345d11ffa --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2200.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ +/* + * Authors: Dave Airlie + */ + +#include + +#include + +#include "ast_drv.h" + +static void ast_2200_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast)) { + ast->support_wsxga_p = true; + if (ast->chip == AST2200) + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2200_device_quirks = { + .crtc_mem_req_threshold_low = 47, + .crtc_mem_req_threshold_high = 63, +}; + +struct drm_device *ast_2200_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2200_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2000_detect_tx_chip(ast, need_post); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2200_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} + diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c index dc2a32244689..5f50d9f91ffd 100644 --- a/drivers/gpu/drm/ast/ast_2300.c +++ b/drivers/gpu/drm/ast/ast_2300.c @@ -27,6 +27,12 @@ */ #include +#include +#include + +#include +#include +#include #include "ast_drv.h" #include "ast_post.h" @@ -1326,3 +1332,132 @@ int ast_2300_post(struct ast_device *ast) return 0; } + +/* + * Device initialization + */ + +void ast_2300_detect_tx_chip(struct ast_device *ast) +{ + enum ast_tx_chip tx_chip = AST_TX_NONE; + struct drm_device *dev = &ast->base; + u8 vgacrd1; + + /* + * On AST GEN4+, look at the configuration set by the SoC in + * the SOC scratch register #1 bits 11:8 (interestingly marked + * as "reserved" in the spec) + */ + vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, + AST_IO_VGACRD1_TX_TYPE_MASK); + switch (vgacrd1) { + /* + * GEN4 to GEN6 + */ + case AST_IO_VGACRD1_TX_SIL164_VBIOS: + tx_chip = AST_TX_SIL164; + break; + case AST_IO_VGACRD1_TX_DP501_VBIOS: + ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL); + if (ast->dp501_fw_addr) { + /* backup firmware */ + if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) { + drmm_kfree(dev, ast->dp501_fw_addr); + ast->dp501_fw_addr = NULL; + } + } + fallthrough; + case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: + tx_chip = AST_TX_DP501; + break; + /* + * GEN7+ + */ + case AST_IO_VGACRD1_TX_ASTDP: + tx_chip = AST_TX_ASTDP; + break; + /* + * Several of the listed TX chips are not explicitly supported + * by the ast driver. If these exist in real-world devices, they + * are most likely reported as VGA or SIL164 outputs. We warn here + * to get bug reports for these devices. If none come in for some + * time, we can begin to fail device probing on these values. + */ + case AST_IO_VGACRD1_TX_ITE66121_VBIOS: + drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + case AST_IO_VGACRD1_TX_CH7003_VBIOS: + drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + case AST_IO_VGACRD1_TX_ANX9807_VBIOS: + drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast)); + break; + } + + __ast_device_set_tx_chip(ast, tx_chip); +} + +static void ast_2300_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1300) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2300_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, +}; + +struct drm_device *ast_2300_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2300_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2300_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2400.c b/drivers/gpu/drm/ast/ast_2400.c new file mode 100644 index 000000000000..2e6befd24f91 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2400.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include + +#include +#include + +#include "ast_drv.h" + +static void ast_2400_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1400) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2400_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, +}; + +struct drm_device *ast_2400_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2400_device_quirks); + + ast->dclk_table = ast_2000_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2400_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c index 1e541498ea67..2a52af0ded56 100644 --- a/drivers/gpu/drm/ast/ast_2500.c +++ b/drivers/gpu/drm/ast/ast_2500.c @@ -27,7 +27,9 @@ */ #include +#include +#include #include #include "ast_drv.h" @@ -567,3 +569,107 @@ int ast_2500_post(struct ast_device *ast) return 0; } + +/* + * Mode setting + */ + +const struct ast_vbios_dclk_info ast_2500_dclk_table[] = { + {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */ + {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ + {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ + {0x76, 0x63, 0x01}, /* 03: VCLK36 */ + {0xee, 0x67, 0x01}, /* 04: VCLK40 */ + {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ + {0xc6, 0x64, 0x01}, /* 06: VCLK50 */ + {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ + {0x80, 0x64, 0x00}, /* 08: VCLK65 */ + {0x7b, 0x63, 0x00}, /* 09: VCLK75 */ + {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */ + {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */ + {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */ + {0x85, 0x24, 0x00}, /* 0d: VCLK135 */ + {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */ + {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */ + {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ + {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ + {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ + {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ + {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ + {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ + {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ + {0x58, 0x01, 0x42}, /* 17: VCLK119 */ + {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ + {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ + {0x44, 0x20, 0x43}, /* 1a: VCLK118_25 */ +}; + +/* + * Device initialization + */ + +static void ast_2500_detect_widescreen(struct ast_device *ast) +{ + if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) { + ast->support_wsxga_p = true; + ast->support_fullhd = true; + } + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2500_device_quirks = { + .crtc_mem_req_threshold_low = 96, + .crtc_mem_req_threshold_high = 120, + .crtc_hsync_precatch_needed = true, +}; + +struct drm_device *ast_2500_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2500_device_quirks); + + ast->dclk_table = ast_2500_dclk_table; + + ast_2300_detect_tx_chip(ast); + + if (need_post) { + ret = ast_post_gpu(ast); + if (ret) + return ERR_PTR(ret); + } + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (ast->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ast_2500_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c index 8d75a47444f5..dee78fd5b022 100644 --- a/drivers/gpu/drm/ast/ast_2600.c +++ b/drivers/gpu/drm/ast/ast_2600.c @@ -26,6 +26,10 @@ * Authors: Dave Airlie */ +#include + +#include + #include "ast_drv.h" #include "ast_post.h" @@ -42,3 +46,71 @@ int ast_2600_post(struct ast_device *ast) return 0; } + +/* + * Device initialization + */ + +static void ast_2600_detect_widescreen(struct ast_device *ast) +{ + ast->support_wsxga_p = true; + ast->support_fullhd = true; + if (__ast_2100_detect_wuxga(ast)) + ast->support_wuxga = true; +} + +static const struct ast_device_quirks ast_2600_device_quirks = { + .crtc_mem_req_threshold_low = 160, + .crtc_mem_req_threshold_high = 224, + .crtc_hsync_precatch_needed = true, + .crtc_hsync_add4_needed = true, +}; + +struct drm_device *ast_2600_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post) +{ + struct drm_device *dev; + struct ast_device *ast; + int ret; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); + if (IS_ERR(ast)) + return ERR_CAST(ast); + dev = &ast->base; + + ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2600_device_quirks); + + ast->dclk_table = ast_2500_dclk_table; + + ast_2300_detect_tx_chip(ast); + + switch (ast->tx_chip) { + case AST_TX_ASTDP: + ret = ast_post_gpu(ast); + break; + default: + ret = 0; + if (need_post) + ret = ast_post_gpu(ast); + break; + } + if (ret) + return ERR_PTR(ret); + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + ast_2600_detect_widescreen(ast); + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + return dev; +} diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 473faa92d08c..b9a9b050b546 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include "ast_drv.h" @@ -46,6 +47,34 @@ static int ast_modeset = -1; MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); module_param_named(modeset, ast_modeset, int, 0400); +void ast_device_init(struct ast_device *ast, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + const struct ast_device_quirks *quirks) +{ + ast->quirks = quirks; + ast->chip = chip; + ast->config_mode = config_mode; + ast->regs = regs; + ast->ioregs = ioregs; +} + +void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip) +{ + static const char * const info_str[] = { + "analog VGA", + "Sil164 TMDS transmitter", + "DP501 DisplayPort transmitter", + "ASPEED DisplayPort transmitter", + }; + + drm_info(&ast->base, "Using %s\n", info_str[tx_chip]); + + ast->tx_chip = tx_chip; +} + /* * DRM driver */ @@ -266,7 +295,7 @@ static int ast_detect_chip(struct pci_dev *pdev, *chip_out = chip; *config_mode_out = config_mode; - return 0; + return __AST_CHIP_GEN(chip); } static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -277,6 +306,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) void __iomem *ioregs; enum ast_config_mode config_mode; enum ast_chip chip; + unsigned int chip_gen; struct drm_device *drm; bool need_post = false; @@ -349,10 +379,43 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return ret; ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode); - if (ret) + if (ret < 0) return ret; + chip_gen = ret; - drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post); + switch (chip_gen) { + case 1: + drm = ast_2000_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 2: + drm = ast_2100_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 3: + drm = ast_2200_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 4: + drm = ast_2300_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 5: + drm = ast_2400_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 6: + drm = ast_2500_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + case 7: + drm = ast_2600_device_create(pdev, &ast_driver, chip, config_mode, + regs, ioregs, need_post); + break; + default: + dev_err(&pdev->dev, "Gen%d not supported\n", chip_gen); + return -ENODEV; + } if (IS_ERR(drm)) return PTR_ERR(drm); pci_set_drvdata(pdev, drm); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index c15aef014f69..7be36a358e74 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -164,9 +164,31 @@ to_ast_connector(struct drm_connector *connector) * Device */ +struct ast_device_quirks { + /* + * CRTC memory request threshold + */ + unsigned char crtc_mem_req_threshold_low; + unsigned char crtc_mem_req_threshold_high; + + /* + * Adjust hsync values to load next scanline early. Signalled + * by AST2500PreCatchCRT in VBIOS mode flags. + */ + bool crtc_hsync_precatch_needed; + + /* + * Workaround for modes with HSync Time that is not a multiple + * of 8 (e.g., 1920x1080@60Hz, HSync +44 pixels). + */ + bool crtc_hsync_add4_needed; +}; + struct ast_device { struct drm_device base; + const struct ast_device_quirks *quirks; + void __iomem *regs; void __iomem *ioregs; void __iomem *dp501_fw_buf; @@ -174,6 +196,8 @@ struct ast_device { enum ast_config_mode config_mode; enum ast_chip chip; + const struct ast_vbios_dclk_info *dclk_table; + void __iomem *vram; unsigned long vram_base; unsigned long vram_size; @@ -217,14 +241,6 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev) return container_of(dev, struct ast_device, base); } -struct drm_device *ast_device_create(struct pci_dev *pdev, - const struct drm_driver *drv, - enum ast_chip chip, - enum ast_config_mode config_mode, - void __iomem *regs, - void __iomem *ioregs, - bool need_post); - static inline unsigned long __ast_gen(struct ast_device *ast) { return __AST_CHIP_GEN(ast->chip); @@ -415,21 +431,89 @@ struct ast_crtc_state { int ast_mm_init(struct ast_device *ast); +/* ast_drv.c */ +void ast_device_init(struct ast_device *ast, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + const struct ast_device_quirks *quirks); +void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip); + /* ast_2000.c */ int ast_2000_post(struct ast_device *ast); +extern const struct ast_vbios_dclk_info ast_2000_dclk_table[]; +void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post); +struct drm_device *ast_2000_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2100.c */ int ast_2100_post(struct ast_device *ast); +bool __ast_2100_detect_wsxga_p(struct ast_device *ast); +bool __ast_2100_detect_wuxga(struct ast_device *ast); +struct drm_device *ast_2100_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); + +/* ast_2200.c */ +struct drm_device *ast_2200_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2300.c */ int ast_2300_post(struct ast_device *ast); +void ast_2300_detect_tx_chip(struct ast_device *ast); +struct drm_device *ast_2300_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); + +/* ast_2400.c */ +struct drm_device *ast_2400_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2500.c */ void ast_2500_patch_ahb(void __iomem *regs); int ast_2500_post(struct ast_device *ast); +extern const struct ast_vbios_dclk_info ast_2500_dclk_table[]; +struct drm_device *ast_2500_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast_2600.c */ int ast_2600_post(struct ast_device *ast); +struct drm_device *ast_2600_device_create(struct pci_dev *pdev, + const struct drm_driver *drv, + enum ast_chip chip, + enum ast_config_mode config_mode, + void __iomem *regs, + void __iomem *ioregs, + bool need_post); /* ast post */ int ast_post_gpu(struct ast_device *ast); diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c deleted file mode 100644 index 3eea6a6cdacd..000000000000 --- a/drivers/gpu/drm/ast/ast_main.c +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright 2012 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - */ -/* - * Authors: Dave Airlie - */ - -#include -#include - -#include -#include -#include -#include - -#include "ast_drv.h" - -/* Try to detect WSXGA+ on Gen2+ */ -static bool __ast_2100_detect_wsxga_p(struct ast_device *ast) -{ - u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0); - - if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC)) - return true; - if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN) - return true; - - return false; -} - -/* Try to detect WUXGA on Gen2+ */ -static bool __ast_2100_detect_wuxga(struct ast_device *ast) -{ - u8 vgacrd1; - - if (ast->support_fullhd) { - vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1); - if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA)) - return true; - } - - return false; -} - -static void ast_detect_widescreen(struct ast_device *ast) -{ - ast->support_wsxga_p = false; - ast->support_fullhd = false; - ast->support_wuxga = false; - - if (AST_GEN(ast) >= 7) { - ast->support_wsxga_p = true; - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 6) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST2510) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 5) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST1400) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 4) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - else if (ast->chip == AST1300) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) - ast->support_fullhd = true; - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 3) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) { - if (ast->chip == AST2200) - ast->support_fullhd = true; - } - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } else if (AST_GEN(ast) >= 2) { - if (__ast_2100_detect_wsxga_p(ast)) - ast->support_wsxga_p = true; - if (ast->support_wsxga_p) { - if (ast->chip == AST2100) - ast->support_fullhd = true; - } - if (__ast_2100_detect_wuxga(ast)) - ast->support_wuxga = true; - } -} - -static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) -{ - static const char * const info_str[] = { - "analog VGA", - "Sil164 TMDS transmitter", - "DP501 DisplayPort transmitter", - "ASPEED DisplayPort transmitter", - }; - - struct drm_device *dev = &ast->base; - u8 vgacra3, vgacrd1; - - /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip = AST_TX_NONE; - - if (AST_GEN(ast) <= 3) { - /* - * VGACRA3 Enhanced Color Mode Register, check if DVO is already - * enabled, in that case, assume we have a SIL164 TMDS transmitter - * - * Don't make that assumption if we the chip wasn't enabled and - * is at power-on reset, otherwise we'll incorrectly "detect" a - * SIL164 when there is none. - */ - if (!need_post) { - vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff); - if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED) - ast->tx_chip = AST_TX_SIL164; - } - } else { - /* - * On AST GEN4+, look at the configuration set by the SoC in - * the SOC scratch register #1 bits 11:8 (interestingly marked - * as "reserved" in the spec) - */ - vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1, - AST_IO_VGACRD1_TX_TYPE_MASK); - switch (vgacrd1) { - /* - * GEN4 to GEN6 - */ - case AST_IO_VGACRD1_TX_SIL164_VBIOS: - ast->tx_chip = AST_TX_SIL164; - break; - case AST_IO_VGACRD1_TX_DP501_VBIOS: - ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); - if (ast->dp501_fw_addr) { - /* backup firmware */ - if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) { - drmm_kfree(dev, ast->dp501_fw_addr); - ast->dp501_fw_addr = NULL; - } - } - fallthrough; - case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW: - ast->tx_chip = AST_TX_DP501; - break; - /* - * GEN7+ - */ - case AST_IO_VGACRD1_TX_ASTDP: - ast->tx_chip = AST_TX_ASTDP; - break; - /* - * Several of the listed TX chips are not explicitly supported - * by the ast driver. If these exist in real-world devices, they - * are most likely reported as VGA or SIL164 outputs. We warn here - * to get bug reports for these devices. If none come in for some - * time, we can begin to fail device probing on these values. - */ - case AST_IO_VGACRD1_TX_ITE66121_VBIOS: - drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - case AST_IO_VGACRD1_TX_CH7003_VBIOS: - drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - case AST_IO_VGACRD1_TX_ANX9807_VBIOS: - drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", - vgacrd1, AST_GEN(ast)); - break; - } - } - - drm_info(dev, "Using %s\n", info_str[ast->tx_chip]); -} - -struct drm_device *ast_device_create(struct pci_dev *pdev, - const struct drm_driver *drv, - enum ast_chip chip, - enum ast_config_mode config_mode, - void __iomem *regs, - void __iomem *ioregs, - bool need_post) -{ - struct drm_device *dev; - struct ast_device *ast; - int ret; - - ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base); - if (IS_ERR(ast)) - return ERR_CAST(ast); - dev = &ast->base; - - ast->chip = chip; - ast->config_mode = config_mode; - ast->regs = regs; - ast->ioregs = ioregs; - - ast_detect_tx_chip(ast, need_post); - switch (ast->tx_chip) { - case AST_TX_ASTDP: - ret = ast_post_gpu(ast); - break; - default: - ret = 0; - if (need_post) - ret = ast_post_gpu(ast); - break; - } - if (ret) - return ERR_PTR(ret); - - ret = ast_mm_init(ast); - if (ret) - return ERR_PTR(ret); - - /* map reserved buffer */ - ast->dp501_fw_buf = NULL; - if (ast->vram_size < pci_resource_len(pdev, 0)) { - ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0); - if (!ast->dp501_fw_buf) - drm_info(dev, "failed to map reserved buffer!\n"); - } - - ast_detect_widescreen(ast); - - ret = ast_mode_config_init(ast); - if (ret) - return ERR_PTR(ret); - - return dev; -} diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index b4e8edc7c767..de7b6294ce40 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include "ast_drv.h" @@ -241,16 +242,15 @@ static void ast_set_std_reg(struct ast_device *ast, ast_set_index_reg(ast, AST_IO_VGAGRI, i, stdtable->gr[i]); } -static void ast_set_crtc_reg(struct ast_device *ast, - struct drm_display_mode *mode, +static void ast_set_crtc_reg(struct ast_device *ast, struct drm_display_mode *mode, const struct ast_vbios_enhtable *vmode) { u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0; - u16 temp, precache = 0; + u16 temp; + unsigned char crtc_hsync_precatch = 0; - if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) && - (vmode->flags & AST2500PreCatchCRT)) - precache = 40; + if (ast->quirks->crtc_hsync_precatch_needed && (vmode->flags & AST2500PreCatchCRT)) + crtc_hsync_precatch = 40; ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00); @@ -276,12 +276,12 @@ static void ast_set_crtc_reg(struct ast_device *ast, jregAD |= 0x01; /* HBE D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x03, 0xE0, (temp & 0x1f)); - temp = ((mode->crtc_hsync_start-precache) >> 3) - 1; + temp = ((mode->crtc_hsync_start - crtc_hsync_precatch) >> 3) - 1; if (temp & 0x100) jregAC |= 0x40; /* HRS D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x04, 0x00, temp); - temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f; + temp = (((mode->crtc_hsync_end - crtc_hsync_precatch) >> 3) - 1) & 0x3f; if (temp & 0x20) jregAD |= 0x04; /* HRE D[5] */ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05)); @@ -289,8 +289,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAC, 0x00, jregAC); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAD, 0x00, jregAD); - // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels); - if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080)) + if (ast->quirks->crtc_hsync_add4_needed && mode->crtc_vdisplay == 1080) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x02); else ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x00); @@ -348,7 +347,7 @@ static void ast_set_crtc_reg(struct ast_device *ast, ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x09, 0xdf, jreg09); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAE, 0x00, (jregAE | 0x80)); - if (precache) + if (crtc_hsync_precatch) ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x80); else ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x00); @@ -370,12 +369,7 @@ static void ast_set_dclk_reg(struct ast_device *ast, struct drm_display_mode *mode, const struct ast_vbios_enhtable *vmode) { - const struct ast_vbios_dclk_info *clk_info; - - if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) - clk_info = &dclk_table_ast2500[vmode->dclk_index]; - else - clk_info = &dclk_table[vmode->dclk_index]; + const struct ast_vbios_dclk_info *clk_info = &ast->dclk_table[vmode->dclk_index]; ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1); ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2); @@ -415,20 +409,11 @@ static void ast_set_color_reg(struct ast_device *ast, static void ast_set_crtthd_reg(struct ast_device *ast) { - /* Set Threshold */ - if (IS_AST_GEN7(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0xe0); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0xa0); - } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x78); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x60); - } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x3f); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x2f); - } else { - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x2f); - ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x1f); - } + u8 vgacra6 = ast->quirks->crtc_mem_req_threshold_low; + u8 vgacra7 = ast->quirks->crtc_mem_req_threshold_high; + + ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, vgacra7); + ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, vgacra6); } static void ast_set_sync_reg(struct ast_device *ast, @@ -836,22 +821,24 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct ast_device *ast = to_ast_device(crtc->dev); + u8 vgacr17 = 0x00; + u8 vgacrb6 = 0xff; - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, 0x00); - ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, 0x00); + vgacr17 |= AST_IO_VGACR17_SYNC_ENABLE; + vgacrb6 &= ~(AST_IO_VGACRB6_VSYNC_OFF | AST_IO_VGACRB6_HSYNC_OFF); + + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17); + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6); } static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); struct ast_device *ast = to_ast_device(crtc->dev); - u8 vgacrb6; + u8 vgacr17 = 0xff; - ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, AST_IO_VGASR1_SD); - - vgacrb6 = AST_IO_VGACRB6_VSYNC_OFF | - AST_IO_VGACRB6_HSYNC_OFF; - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6); + vgacr17 &= ~AST_IO_VGACR17_SYNC_ENABLE; + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17); /* * HW cursors require the underlying primary plane and CRTC to diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index e15adaf3a80e..30578e3b07e4 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -29,6 +29,7 @@ #define AST_IO_VGAGRI (0x4E) #define AST_IO_VGACRI (0x54) +#define AST_IO_VGACR17_SYNC_ENABLE BIT(7) /* called "Hardware reset" in docs */ #define AST_IO_VGACR80_PASSWORD (0xa8) #define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0) #define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1) diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index f1c9f7e1f1fc..7da5b5c60f41 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -33,66 +33,6 @@ #define HiCModeIndex 3 #define TrueCModeIndex 4 -static const struct ast_vbios_dclk_info dclk_table[] = { - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ - {0x77, 0x58, 0x80}, /* 17: VCLK119 */ - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ - {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */ -}; - -static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { - {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */ - {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */ - {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */ - {0x76, 0x63, 0x01}, /* 03: VCLK36 */ - {0xEE, 0x67, 0x01}, /* 04: VCLK40 */ - {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */ - {0xC6, 0x64, 0x01}, /* 06: VCLK50 */ - {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */ - {0x80, 0x64, 0x00}, /* 08: VCLK65 */ - {0x7B, 0x63, 0x00}, /* 09: VCLK75 */ - {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */ - {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */ - {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */ - {0x85, 0x24, 0x00}, /* 0D: VCLK135 */ - {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ - {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ - {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ - {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */ - {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ - {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ - {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ - {0x47, 0x6c, 0x80}, /* 15: VCLK71 */ - {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */ - {0x58, 0x01, 0x42}, /* 17: VCLK119 */ - {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */ - {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */ - {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */ -}; - static const struct ast_vbios_stdtable vbios_stdtable[] = { /* MD_2_3_400 */ { diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 0f7ffb3ced20..e0efc7309b1b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -215,32 +216,32 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c, if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_XLCDC_CM), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_SD); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_XLCDC_SD, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n"); } regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_DISP), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_SYNC), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_HLCDC_PIXEL_CLK), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n"); clk_disable_unprepare(crtc->dc->hlcdc->sys_clk); pinctrl_pm_select_sleep_state(dev->dev); @@ -269,32 +270,32 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c, if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_PIXEL_CLK, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_SYNC, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_HLCDC_DISP, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n"); if (crtc->dc->desc->is_xlcdc) { regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_CM); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, status & ATMEL_XLCDC_CM, 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n"); regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_SD); if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status, !(status & ATMEL_XLCDC_SD), 10, 1000)) - dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n"); + drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n"); } pm_runtime_put_sync(dev->dev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index fa8ad94e431a..dd70894c8f38 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -724,19 +725,19 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev) ret = atmel_hlcdc_create_outputs(dev); if (ret) { - dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret); + drm_err(dev, "failed to create HLCDC outputs: %d\n", ret); return ret; } ret = atmel_hlcdc_create_planes(dev); if (ret) { - dev_err(dev->dev, "failed to create planes: %d\n", ret); + drm_err(dev, "failed to create planes: %d\n", ret); return ret; } ret = atmel_hlcdc_crtc_create(dev); if (ret) { - dev_err(dev->dev, "failed to create crtc\n"); + drm_err(dev, "failed to create crtc\n"); return ret; } @@ -778,7 +779,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = clk_prepare_enable(dc->hlcdc->periph_clk); if (ret) { - dev_err(dev->dev, "failed to enable periph_clk\n"); + drm_err(dev, "failed to enable periph_clk\n"); return ret; } @@ -786,13 +787,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = drm_vblank_init(dev, 1); if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); + drm_err(dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); + drm_err(dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } @@ -802,7 +803,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); if (ret < 0) { - dev_err(dev->dev, "failed to install IRQ handler\n"); + drm_err(dev, "failed to install IRQ handler\n"); goto err_periph_clk_disable; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h index e1a0bb24b511..53d47f01db0b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h @@ -378,7 +378,8 @@ struct atmel_lcdc_dc_ops { void (*lcdc_update_buffers)(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state, u32 sr, int i); - void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane); + void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc); void (*lcdc_update_general_settings)(struct atmel_hlcdc_plane *plane, struct atmel_hlcdc_plane_state *state); void (*lcdc_atomic_update)(struct atmel_hlcdc_plane *plane, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 50fee6a93964..0b8a86afb096 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "atmel_hlcdc_dc.h" @@ -92,7 +93,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep); of_node_put(ep); if (output->bus_fmt < 0) { - dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint); + drm_err(dev, "endpoint %d: invalid bus width\n", endpoint); return -EINVAL; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 4a7ba0918eca..92132be9823f 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "atmel_hlcdc_dc.h" @@ -365,13 +366,34 @@ void atmel_xlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane, xfactor); /* - * With YCbCr 4:2:2 and YCbYcr 4:2:0 window resampling, configuration - * register LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT is half + * With YCbCr 4:2:0 window resampling, configuration register + * LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT values are half * the value of yfactor and xfactor. + * + * On the other hand, with YCbCr 4:2:2 window resampling, only the + * configuration register LCDC_HEOCFG27.HXSCFACT value is half the value + * of the xfactor; the value of LCDC_HEOCFG25.VXSCFACT is yfactor (no + * division by 2). */ - if (state->base.fb->format->format == DRM_FORMAT_YUV420) { + switch (state->base.fb->format->format) { + /* YCbCr 4:2:2 */ + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_NV61: + xfactor /= 2; + break; + + /* YCbCr 4:2:0 */ + case DRM_FORMAT_YUV420: + case DRM_FORMAT_NV21: yfactor /= 2; xfactor /= 2; + break; + default: + break; } atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 2, @@ -714,7 +736,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, if (!hstate->base.crtc || WARN_ON(!fb)) return 0; - crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, s->crtc); mode = &crtc_state->adjusted_mode; ret = drm_atomic_helper_check_plane_state(s, crtc_state, @@ -816,7 +838,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, return 0; } -static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) +static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc) { /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR, @@ -832,7 +855,8 @@ static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR); } -static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) +static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane, + struct atmel_hlcdc_dc *dc) { /* Disable interrupts */ atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IDR, @@ -842,6 +866,15 @@ static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane) atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_ENR, 0); + /* + * Updating XLCDC_xxxCFGx, XLCDC_xxxFBA and XLCDC_xxxEN, + * (where xxx indicates each layer) requires writing one to the + * Update Attribute field for each layer in LCDC_ATTRE register for SAM9X7. + */ + regmap_write(dc->hlcdc->regmap, ATMEL_XLCDC_ATTRE, ATMEL_XLCDC_BASE_UPDATE | + ATMEL_XLCDC_OVR1_UPDATE | ATMEL_XLCDC_OVR3_UPDATE | + ATMEL_XLCDC_HEO_UPDATE); + /* Clear all pending interrupts */ atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR); } @@ -852,7 +885,7 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p, struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p); struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private; - dc->desc->ops->lcdc_atomic_disable(plane); + dc->desc->ops->lcdc_atomic_disable(plane, dc); } static void atmel_hlcdc_atomic_update(struct atmel_hlcdc_plane *plane, @@ -1034,7 +1067,7 @@ static void atmel_hlcdc_irq_dbg(struct atmel_hlcdc_plane *plane, if (isr & (ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) | ATMEL_HLCDC_LAYER_OVR_IRQ(2))) - dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", + drm_dbg(plane->base.dev, "overrun on plane %s\n", desc->name); } @@ -1051,7 +1084,7 @@ static void atmel_xlcdc_irq_dbg(struct atmel_hlcdc_plane *plane, if (isr & (ATMEL_XLCDC_LAYER_OVR_IRQ(0) | ATMEL_XLCDC_LAYER_OVR_IRQ(1) | ATMEL_XLCDC_LAYER_OVR_IRQ(2))) - dev_dbg(plane->base.dev->dev, "overrun on plane %s\n", + drm_dbg(plane->base.dev, "overrun on plane %s\n", desc->name); } @@ -1140,7 +1173,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p) if (state) { if (atmel_hlcdc_plane_alloc_dscrs(p, state)) { kfree(state); - dev_err(p->dev->dev, + drm_err(p->dev, "Failed to allocate initial plane state\n"); return; } diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig index 9a480c6abb85..b9028a5e5a06 100644 --- a/drivers/gpu/drm/bridge/imx/Kconfig +++ b/drivers/gpu/drm/bridge/imx/Kconfig @@ -18,12 +18,23 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE depends on OF depends on COMMON_CLK select DRM_DW_HDMI + imply DRM_IMX8MP_HDMI_PAI imply DRM_IMX8MP_HDMI_PVI imply PHY_FSL_SAMSUNG_HDMI_PHY help Choose this to enable support for the internal HDMI encoder found on the i.MX8MP SoC. +config DRM_IMX8MP_HDMI_PAI + tristate "Freescale i.MX8MP HDMI PAI bridge support" + depends on OF + select DRM_DW_HDMI + select REGMAP + select REGMAP_MMIO + help + Choose this to enable support for the internal HDMI TX Parallel + Audio Interface found on the Freescale i.MX8MP SoC. + config DRM_IMX8MP_HDMI_PVI tristate "Freescale i.MX8MP HDMI PVI bridge support" depends on OF diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile index dd5d48584806..8d01fda25451 100644 --- a/drivers/gpu/drm/bridge/imx/Makefile +++ b/drivers/gpu/drm/bridge/imx/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o +obj-$(CONFIG_DRM_IMX8MP_HDMI_PAI) += imx8mp-hdmi-pai.o obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c new file mode 100644 index 000000000000..8d13a35b206a --- /dev/null +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2025 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define HTX_PAI_CTRL 0x00 +#define ENABLE BIT(0) + +#define HTX_PAI_CTRL_EXT 0x04 +#define WTMK_HIGH_MASK GENMASK(31, 24) +#define WTMK_LOW_MASK GENMASK(23, 16) +#define NUM_CH_MASK GENMASK(10, 8) +#define WTMK_HIGH(n) FIELD_PREP(WTMK_HIGH_MASK, (n)) +#define WTMK_LOW(n) FIELD_PREP(WTMK_LOW_MASK, (n)) +#define NUM_CH(n) FIELD_PREP(NUM_CH_MASK, (n) - 1) + +#define HTX_PAI_FIELD_CTRL 0x08 +#define PRE_SEL GENMASK(28, 24) +#define D_SEL GENMASK(23, 20) +#define V_SEL GENMASK(19, 15) +#define U_SEL GENMASK(14, 10) +#define C_SEL GENMASK(9, 5) +#define P_SEL GENMASK(4, 0) + +struct imx8mp_hdmi_pai { + struct regmap *regmap; +}; + +static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, + int width, int rate, int non_pcm, + int iec958) +{ + const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); + struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; + int val; + + /* PAI set control extended */ + val = WTMK_HIGH(3) | WTMK_LOW(3); + val |= NUM_CH(channel); + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL_EXT, val); + + /* IEC60958 format */ + if (iec958) { + val = FIELD_PREP_CONST(P_SEL, + __bf_shf(IEC958_SUBFRAME_PARITY)); + val |= FIELD_PREP_CONST(C_SEL, + __bf_shf(IEC958_SUBFRAME_CHANNEL_STATUS)); + val |= FIELD_PREP_CONST(U_SEL, + __bf_shf(IEC958_SUBFRAME_USER_DATA)); + val |= FIELD_PREP_CONST(V_SEL, + __bf_shf(IEC958_SUBFRAME_VALIDITY)); + val |= FIELD_PREP_CONST(D_SEL, + __bf_shf(IEC958_SUBFRAME_SAMPLE_24_MASK)); + val |= FIELD_PREP_CONST(PRE_SEL, + __bf_shf(IEC958_SUBFRAME_PREAMBLE_MASK)); + } else { + /* + * The allowed PCM widths are 24bit and 32bit, as they are supported + * by aud2htx module. + * for 24bit, D_SEL = 0, select all the bits. + * for 32bit, D_SEL = 8, select 24bit in MSB. + */ + val = FIELD_PREP(D_SEL, width - 24); + } + + regmap_write(hdmi_pai->regmap, HTX_PAI_FIELD_CTRL, val); + + /* PAI start running */ + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, ENABLE); +} + +static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi) +{ + const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); + struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; + + /* Stop PAI */ + regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0); +} + +static const struct regmap_config imx8mp_hdmi_pai_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = HTX_PAI_FIELD_CTRL, +}; + +static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_hdmi_plat_data *plat_data = data; + struct imx8mp_hdmi_pai *hdmi_pai; + struct resource *res; + void __iomem *base; + + hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL); + if (!hdmi_pai) + return -ENOMEM; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(base)) + return PTR_ERR(base); + + hdmi_pai->regmap = devm_regmap_init_mmio_clk(dev, "apb", base, + &imx8mp_hdmi_pai_regmap_config); + if (IS_ERR(hdmi_pai->regmap)) { + dev_err(dev, "regmap init failed\n"); + return PTR_ERR(hdmi_pai->regmap); + } + + plat_data->enable_audio = imx8mp_hdmi_pai_enable; + plat_data->disable_audio = imx8mp_hdmi_pai_disable; + plat_data->priv_audio = hdmi_pai; + + return 0; +} + +static const struct component_ops imx8mp_hdmi_pai_ops = { + .bind = imx8mp_hdmi_pai_bind, +}; + +static int imx8mp_hdmi_pai_probe(struct platform_device *pdev) +{ + return component_add(&pdev->dev, &imx8mp_hdmi_pai_ops); +} + +static void imx8mp_hdmi_pai_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &imx8mp_hdmi_pai_ops); +} + +static const struct of_device_id imx8mp_hdmi_pai_of_table[] = { + { .compatible = "fsl,imx8mp-hdmi-pai" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pai_of_table); + +static struct platform_driver imx8mp_hdmi_pai_platform_driver = { + .probe = imx8mp_hdmi_pai_probe, + .remove = imx8mp_hdmi_pai_remove, + .driver = { + .name = "imx8mp-hdmi-pai", + .of_match_table = imx8mp_hdmi_pai_of_table, + }, +}; +module_platform_driver(imx8mp_hdmi_pai_platform_driver); + +MODULE_DESCRIPTION("i.MX8MP HDMI PAI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c index 1e7a789ec289..32fd3554e267 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c @@ -5,11 +5,13 @@ */ #include +#include #include #include #include #include #include +#include struct imx8mp_hdmi { struct dw_hdmi_plat_data plat_data; @@ -79,10 +81,45 @@ static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = { .update_hpd = dw_hdmi_phy_update_hpd, }; +static int imx8mp_dw_hdmi_bind(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); + int ret; + + ret = component_bind_all(dev, &hdmi->plat_data); + if (ret) + return dev_err_probe(dev, ret, "component_bind_all failed!\n"); + + hdmi->dw_hdmi = dw_hdmi_probe(pdev, &hdmi->plat_data); + if (IS_ERR(hdmi->dw_hdmi)) { + component_unbind_all(dev, &hdmi->plat_data); + return PTR_ERR(hdmi->dw_hdmi); + } + + return 0; +} + +static void imx8mp_dw_hdmi_unbind(struct device *dev) +{ + struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev); + + dw_hdmi_remove(hdmi->dw_hdmi); + + component_unbind_all(dev, &hdmi->plat_data); +} + +static const struct component_master_ops imx8mp_dw_hdmi_ops = { + .bind = imx8mp_dw_hdmi_bind, + .unbind = imx8mp_dw_hdmi_unbind, +}; + static int imx8mp_dw_hdmi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct dw_hdmi_plat_data *plat_data; + struct component_match *match = NULL; + struct device_node *remote; struct imx8mp_hdmi *hdmi; hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); @@ -102,20 +139,38 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev) plat_data->priv_data = hdmi; plat_data->phy_force_vendor = true; - hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); - if (IS_ERR(hdmi->dw_hdmi)) - return PTR_ERR(hdmi->dw_hdmi); - platform_set_drvdata(pdev, hdmi); + /* port@2 is for hdmi_pai device */ + remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0); + if (!remote) { + hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data); + if (IS_ERR(hdmi->dw_hdmi)) + return PTR_ERR(hdmi->dw_hdmi); + } else { + drm_of_component_match_add(dev, &match, component_compare_of, remote); + + of_node_put(remote); + + return component_master_add_with_match(dev, &imx8mp_dw_hdmi_ops, match); + } + return 0; } static void imx8mp_dw_hdmi_remove(struct platform_device *pdev) { struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev); + struct device_node *remote; - dw_hdmi_remove(hdmi->dw_hdmi); + remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0); + if (remote) { + of_node_put(remote); + + component_master_del(&pdev->dev, &imx8mp_dw_hdmi_ops); + } else { + dw_hdmi_remove(hdmi->dw_hdmi); + } } static int imx8mp_dw_hdmi_pm_suspend(struct device *dev) diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index 5d272916e200..122502968927 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -683,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } -static int imx8qxp_ldb_runtime_suspend(struct device *dev) -{ - return 0; -} - static int imx8qxp_ldb_runtime_resume(struct device *dev) { struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev); @@ -700,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev) } static const struct dev_pm_ops imx8qxp_ldb_pm_ops = { - RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL) + RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL) }; static const struct of_device_id imx8qxp_ldb_dt_ids[] = { diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c index 399fa7eebd49..03fc8fd10f20 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9211.c +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -121,8 +121,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx) } /* Test for known Chip ID. */ - if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE || - chipid[2] != REG_CHIPID2_VALUE) { + if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) { dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n", chipid[0], chipid[1], chipid[2]); return -EINVAL; diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index 2c5e532410de..a46df7583bcf 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -61,6 +61,14 @@ config DRM_DW_HDMI_QP select DRM_KMS_HELPER select REGMAP_MMIO +config DRM_DW_HDMI_QP_CEC + bool "Synopsis Designware QP CEC interface" + depends on DRM_DW_HDMI_QP + select DRM_DISPLAY_HDMI_CEC_HELPER + help + Support the CEC interface which is part of the Synopsys + Designware HDMI QP block. + config DRM_DW_MIPI_DSI tristate select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c index 9bbfe8da3de0..82aaf74e1bc0 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-dp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c @@ -2049,6 +2049,8 @@ struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder, bridge->type = DRM_MODE_CONNECTOR_DisplayPort; bridge->ycbcr_420_allowed = true; + devm_drm_bridge_add(dev, bridge); + dp->aux.dev = dev; dp->aux.drm_dev = encoder->dev; dp->aux.name = dev_name(dev); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c index ab18f9a3bf23..df7a37eb47f4 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c @@ -90,6 +90,11 @@ static int audio_hw_params(struct device *dev, void *data, params->iec.status[0] & IEC958_AES0_NONAUDIO); dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width); + if (daifmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE) + dw_hdmi_set_sample_iec958(dw->data.hdmi, 1); + else + dw_hdmi_set_sample_iec958(dw->data.hdmi, 0); + return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 39332c57f2c5..4ba7b339eff6 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -26,6 +27,8 @@ #include #include +#include + #include #include "dw-hdmi-qp.h" @@ -131,17 +134,34 @@ struct dw_hdmi_qp_i2c { bool is_segment; }; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC +struct dw_hdmi_qp_cec { + struct drm_connector *connector; + int irq; + u32 addresses; + struct cec_msg rx_msg; + u8 tx_status; + bool tx_done; + bool rx_done; +}; +#endif + struct dw_hdmi_qp { struct drm_bridge bridge; struct device *dev; struct dw_hdmi_qp_i2c *i2c; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC + struct dw_hdmi_qp_cec *cec; +#endif + struct { const struct dw_hdmi_qp_phy_ops *ops; void *data; } phy; + unsigned long ref_clk_rate; struct regmap *regm; unsigned long tmds_char_rate; @@ -965,6 +985,179 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge, } } +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC +static irqreturn_t dw_hdmi_qp_cec_hardirq(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_cec *cec = hdmi->cec; + irqreturn_t ret = IRQ_HANDLED; + u32 stat; + + stat = dw_hdmi_qp_read(hdmi, CEC_INT_STATUS); + if (stat == 0) + return IRQ_NONE; + + dw_hdmi_qp_write(hdmi, stat, CEC_INT_CLEAR); + + if (stat & CEC_STAT_LINE_ERR) { + cec->tx_status = CEC_TX_STATUS_ERROR; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } else if (stat & CEC_STAT_DONE) { + cec->tx_status = CEC_TX_STATUS_OK; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } else if (stat & CEC_STAT_NACK) { + cec->tx_status = CEC_TX_STATUS_NACK; + cec->tx_done = true; + ret = IRQ_WAKE_THREAD; + } + + if (stat & CEC_STAT_EOM) { + unsigned int len, i, val; + + val = dw_hdmi_qp_read(hdmi, CEC_RX_COUNT_STATUS); + len = (val & 0xf) + 1; + + if (len > sizeof(cec->rx_msg.msg)) + len = sizeof(cec->rx_msg.msg); + + for (i = 0; i < 4; i++) { + val = dw_hdmi_qp_read(hdmi, CEC_RX_DATA3_0 + i * 4); + cec->rx_msg.msg[i * 4] = val & 0xff; + cec->rx_msg.msg[i * 4 + 1] = (val >> 8) & 0xff; + cec->rx_msg.msg[i * 4 + 2] = (val >> 16) & 0xff; + cec->rx_msg.msg[i * 4 + 3] = (val >> 24) & 0xff; + } + + dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL); + + cec->rx_msg.len = len; + cec->rx_done = true; + + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +static irqreturn_t dw_hdmi_qp_cec_thread(int irq, void *dev_id) +{ + struct dw_hdmi_qp *hdmi = dev_id; + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + if (cec->tx_done) { + cec->tx_done = false; + drm_connector_hdmi_cec_transmit_attempt_done(cec->connector, + cec->tx_status); + } + + if (cec->rx_done) { + cec->rx_done = false; + drm_connector_hdmi_cec_received_msg(cec->connector, &cec->rx_msg); + } + + return IRQ_HANDLED; +} + +static int dw_hdmi_qp_cec_init(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + cec->connector = connector; + + dw_hdmi_qp_write(hdmi, 0, CEC_TX_COUNT); + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N); + + return devm_request_threaded_irq(hdmi->dev, cec->irq, + dw_hdmi_qp_cec_hardirq, + dw_hdmi_qp_cec_thread, IRQF_SHARED, + dev_name(hdmi->dev), hdmi); +} + +static int dw_hdmi_qp_cec_log_addr(struct drm_bridge *bridge, u8 logical_addr) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + struct dw_hdmi_qp_cec *cec = hdmi->cec; + + if (logical_addr == CEC_LOG_ADDR_INVALID) + cec->addresses = 0; + else + cec->addresses |= BIT(logical_addr) | CEC_ADDR_BROADCAST; + + dw_hdmi_qp_write(hdmi, cec->addresses, CEC_ADDR); + + return 0; +} + +static int dw_hdmi_qp_cec_enable(struct drm_bridge *bridge, bool enable) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + unsigned int irqs; + u32 swdisable; + + if (!enable) { + dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N); + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + + swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE); + swdisable = swdisable | CEC_SWDISABLE; + dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE); + } else { + swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE); + swdisable = swdisable & ~CEC_SWDISABLE; + dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE); + + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL); + + dw_hdmi_qp_cec_log_addr(bridge, CEC_LOG_ADDR_INVALID); + + irqs = CEC_STAT_LINE_ERR | CEC_STAT_NACK | CEC_STAT_EOM | + CEC_STAT_DONE; + dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR); + dw_hdmi_qp_write(hdmi, irqs, CEC_INT_MASK_N); + } + + return 0; +} + +static int dw_hdmi_qp_cec_transmit(struct drm_bridge *bridge, u8 attempts, + u32 signal_free_time, struct cec_msg *msg) +{ + struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); + unsigned int i; + u32 val; + + for (i = 0; i < msg->len; i++) { + if (!(i % 4)) + val = msg->msg[i]; + if ((i % 4) == 1) + val |= msg->msg[i] << 8; + if ((i % 4) == 2) + val |= msg->msg[i] << 16; + if ((i % 4) == 3) + val |= msg->msg[i] << 24; + + if (i == (msg->len - 1) || (i % 4) == 3) + dw_hdmi_qp_write(hdmi, val, CEC_TX_DATA3_0 + (i / 4) * 4); + } + + dw_hdmi_qp_write(hdmi, msg->len - 1, CEC_TX_COUNT); + dw_hdmi_qp_write(hdmi, CEC_CTRL_START, CEC_TX_CONTROL); + + return 0; +} +#else +#define dw_hdmi_qp_cec_init NULL +#define dw_hdmi_qp_cec_enable NULL +#define dw_hdmi_qp_cec_log_addr NULL +#define dw_hdmi_qp_cec_transmit NULL +#endif /* CONFIG_DRM_DW_HDMI_QP_CEC */ + static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -979,6 +1172,10 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = { .hdmi_audio_startup = dw_hdmi_qp_audio_enable, .hdmi_audio_shutdown = dw_hdmi_qp_audio_disable, .hdmi_audio_prepare = dw_hdmi_qp_audio_prepare, + .hdmi_cec_init = dw_hdmi_qp_cec_init, + .hdmi_cec_enable = dw_hdmi_qp_cec_enable, + .hdmi_cec_log_addr = dw_hdmi_qp_cec_log_addr, + .hdmi_cec_transmit = dw_hdmi_qp_cec_transmit, }; static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id) @@ -1014,13 +1211,11 @@ static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi) { dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N); dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N); - dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0); + dw_hdmi_qp_write(hdmi, hdmi->ref_clk_rate, TIMER_BASE_CONFIG0); /* Software reset */ dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0); - dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0); - dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0); /* Clear DONE and ERROR interrupts */ @@ -1066,6 +1261,13 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->phy.ops = plat_data->phy_ops; hdmi->phy.data = plat_data->phy_data; + if (plat_data->ref_clk_rate) { + hdmi->ref_clk_rate = plat_data->ref_clk_rate; + } else { + hdmi->ref_clk_rate = 428571429; + dev_warn(dev, "Set ref_clk_rate to vendor default\n"); + } + dw_hdmi_qp_init_hw(hdmi); ret = devm_request_threaded_irq(dev, plat_data->main_irq, @@ -1093,6 +1295,22 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, hdmi->bridge.hdmi_audio_dev = dev; hdmi->bridge.hdmi_audio_dai_port = 1; +#ifdef CONFIG_DRM_DW_HDMI_QP_CEC + if (plat_data->cec_irq) { + hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER; + hdmi->bridge.hdmi_cec_dev = dev; + hdmi->bridge.hdmi_cec_adapter_name = dev_name(dev); + + hdmi->cec = devm_kzalloc(hdmi->dev, sizeof(*hdmi->cec), GFP_KERNEL); + if (!hdmi->cec) + return ERR_PTR(-ENOMEM); + + hdmi->cec->irq = plat_data->cec_irq; + } else { + dev_warn(dev, "Disabled CEC support due to missing IRQ\n"); + } +#endif + ret = devm_drm_bridge_add(dev, &hdmi->bridge); if (ret) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h index 72987e6c4689..91a15f82e32a 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h @@ -488,9 +488,23 @@ #define AUDPKT_VBIT_OVR0 0xf24 /* CEC Registers */ #define CEC_TX_CONTROL 0x1000 +#define CEC_CTRL_CLEAR BIT(0) +#define CEC_CTRL_START BIT(0) #define CEC_STATUS 0x1004 +#define CEC_STAT_DONE BIT(0) +#define CEC_STAT_NACK BIT(1) +#define CEC_STAT_ARBLOST BIT(2) +#define CEC_STAT_LINE_ERR BIT(3) +#define CEC_STAT_RETRANS_FAIL BIT(4) +#define CEC_STAT_DISCARD BIT(5) +#define CEC_STAT_TX_BUSY BIT(8) +#define CEC_STAT_RX_BUSY BIT(9) +#define CEC_STAT_DRIVE_ERR BIT(10) +#define CEC_STAT_EOM BIT(11) +#define CEC_STAT_NOTIFY_ERR BIT(12) #define CEC_CONFIG 0x1008 #define CEC_ADDR 0x100c +#define CEC_ADDR_BROADCAST BIT(15) #define CEC_TX_COUNT 0x1020 #define CEC_TX_DATA3_0 0x1024 #define CEC_TX_DATA7_4 0x1028 diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 206b099a35e9..3b77e73ac0ea 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -177,6 +177,7 @@ struct dw_hdmi { spinlock_t audio_lock; struct mutex audio_mutex; + unsigned int sample_iec958; unsigned int sample_non_pcm; unsigned int sample_width; unsigned int sample_rate; @@ -198,6 +199,12 @@ struct dw_hdmi { enum drm_connector_status last_connector_result; }; +const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi) +{ + return hdmi->plat_data; +} +EXPORT_SYMBOL_GPL(dw_hdmi_to_plat_data); + #define HDMI_IH_PHY_STAT0_RX_SENSE \ (HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \ HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3) @@ -712,6 +719,14 @@ void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm) } EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm); +void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958) +{ + mutex_lock(&hdmi->audio_mutex); + hdmi->sample_iec958 = iec958; + mutex_unlock(&hdmi->audio_mutex); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_iec958); + void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate) { mutex_lock(&hdmi->audio_mutex); @@ -843,7 +858,8 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi) hdmi->channels, hdmi->sample_width, hdmi->sample_rate, - hdmi->sample_non_pcm); + hdmi->sample_non_pcm, + hdmi->sample_iec958); } static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index ae0d08e5e960..276d05d25ad8 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -106,10 +106,21 @@ #define SN_PWM_EN_INV_REG 0xA5 #define SN_PWM_INV_MASK BIT(0) #define SN_PWM_EN_MASK BIT(1) + +#define SN_IRQ_EN_REG 0xE0 +#define IRQ_EN BIT(0) + +#define SN_IRQ_EVENTS_EN_REG 0xE6 +#define HPD_INSERTION_EN BIT(1) +#define HPD_REMOVAL_EN BIT(2) + #define SN_AUX_CMD_STATUS_REG 0xF4 #define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3) #define AUX_IRQ_STATUS_AUX_SHORT BIT(5) #define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6) +#define SN_IRQ_STATUS_REG 0xF5 +#define HPD_REMOVAL_STATUS BIT(2) +#define HPD_INSERTION_STATUS BIT(1) #define MIN_DSI_CLK_FREQ_MHZ 40 @@ -152,7 +163,9 @@ * @ln_assign: Value to program to the LN_ASSIGN register. * @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG. * @comms_enabled: If true then communication over the aux channel is enabled. + * @hpd_enabled: If true then HPD events are enabled. * @comms_mutex: Protects modification of comms_enabled. + * @hpd_mutex: Protects modification of hpd_enabled. * * @gchip: If we expose our GPIOs, this is used. * @gchip_output: A cache of whether we've set GPIOs to output. This @@ -190,7 +203,9 @@ struct ti_sn65dsi86 { u8 ln_assign; u8 ln_polrs; bool comms_enabled; + bool hpd_enabled; struct mutex comms_mutex; + struct mutex hpd_mutex; #if defined(CONFIG_OF_GPIO) struct gpio_chip gchip; @@ -221,6 +236,23 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = { .max_register = 0xFF, }; +static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg, + u8 *val) +{ + int ret; + unsigned int reg_val; + + ret = regmap_read(pdata->regmap, reg, ®_val); + if (ret) { + dev_err(pdata->dev, "fail to read raw reg %#x: %d\n", + reg, ret); + return ret; + } + *val = (u8)reg_val; + + return 0; +} + static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata, unsigned int reg, u16 *val) { @@ -379,6 +411,7 @@ static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata) static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) { struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev); + const struct i2c_client *client = to_i2c_client(pdata->dev); int ret; ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies); @@ -413,6 +446,13 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) if (pdata->refclk) ti_sn65dsi86_enable_comms(pdata, NULL); + if (client->irq) { + ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN, + IRQ_EN); + if (ret) + dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret); + } + return ret; } @@ -1211,6 +1251,8 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry * static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + const struct i2c_client *client = to_i2c_client(pdata->dev); + int ret; /* * Device needs to be powered on before reading the HPD state @@ -1219,11 +1261,35 @@ static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) */ pm_runtime_get_sync(pdata->dev); + + mutex_lock(&pdata->hpd_mutex); + pdata->hpd_enabled = true; + mutex_unlock(&pdata->hpd_mutex); + + if (client->irq) { + ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG, + HPD_REMOVAL_EN | HPD_INSERTION_EN); + if (ret) + dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret); + } } static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + const struct i2c_client *client = to_i2c_client(pdata->dev); + int ret; + + if (client->irq) { + ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG, + HPD_REMOVAL_EN | HPD_INSERTION_EN); + if (ret) + dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret); + } + + mutex_lock(&pdata->hpd_mutex); + pdata->hpd_enabled = false; + mutex_unlock(&pdata->hpd_mutex); pm_runtime_put_autosuspend(pdata->dev); } @@ -1309,6 +1375,41 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata) return 0; } +static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private) +{ + struct ti_sn65dsi86 *pdata = private; + struct drm_device *dev = pdata->bridge.dev; + u8 status; + int ret; + bool hpd_event; + + ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status); + if (ret) { + dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret); + return IRQ_NONE; + } + + hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS); + + dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status); + if (!status) + return IRQ_NONE; + + ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status); + if (ret) { + dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret); + return IRQ_NONE; + } + + /* Only send the HPD event if we are bound with a device. */ + mutex_lock(&pdata->hpd_mutex); + if (pdata->hpd_enabled && hpd_event) + drm_kms_helper_hotplug_event(dev); + mutex_unlock(&pdata->hpd_mutex); + + return IRQ_HANDLED; +} + static int ti_sn_bridge_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id) { @@ -1931,6 +2032,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) dev_set_drvdata(dev, pdata); pdata->dev = dev; + mutex_init(&pdata->hpd_mutex); mutex_init(&pdata->comms_mutex); pdata->regmap = devm_regmap_init_i2c(client, @@ -1971,6 +2073,16 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf))) return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n"); + if (client->irq) { + ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL, + ti_sn_bridge_interrupt, + IRQF_ONESHOT, + dev_name(pdata->dev), pdata); + + if (ret) + return dev_err_probe(dev, ret, "failed to request interrupt\n"); + } + /* * Break ourselves up into a collection of aux devices. The only real * motiviation here is to solve the chicken-and-egg problem of probe diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c index f894ba52bdb5..47e5f27eee58 100644 --- a/drivers/gpu/drm/clients/drm_fbdev_client.c +++ b/drivers/gpu/drm/clients/drm_fbdev_client.c @@ -13,16 +13,28 @@ * struct drm_client_funcs */ +static void drm_fbdev_client_free(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); +} + static void drm_fbdev_client_unregister(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); if (fb_helper->info) { + /* + * Fully probed framebuffer device + */ drm_fb_helper_unregister_info(fb_helper); } else { + /* + * Partially initialized client, no framebuffer device yet + */ drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } } @@ -62,32 +74,27 @@ err_drm_err: return ret; } -static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +static int drm_fbdev_client_suspend(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - if (holds_console_lock) - drm_fb_helper_set_suspend(fb_helper, true); - else - drm_fb_helper_set_suspend_unlocked(fb_helper, true); + drm_fb_helper_set_suspend_unlocked(fb_helper, true); return 0; } -static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock) +static int drm_fbdev_client_resume(struct drm_client_dev *client) { struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - if (holds_console_lock) - drm_fb_helper_set_suspend(fb_helper, false); - else - drm_fb_helper_set_suspend_unlocked(fb_helper, false); + drm_fb_helper_set_suspend_unlocked(fb_helper, false); return 0; } static const struct drm_client_funcs drm_fbdev_client_funcs = { .owner = THIS_MODULE, + .free = drm_fbdev_client_free, .unregister = drm_fbdev_client_unregister, .restore = drm_fbdev_client_restore, .hotplug = drm_fbdev_client_hotplug, diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c index d239f1e3c456..19e55aa0ed74 100644 --- a/drivers/gpu/drm/clients/drm_log.c +++ b/drivers/gpu/drm/clients/drm_log.c @@ -100,7 +100,7 @@ static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line) return; iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]); drm_client_buffer_vunmap_local(scanout->buffer); - drm_client_framebuffer_flush(scanout->buffer, &r); + drm_client_buffer_flush(scanout->buffer, &r); } static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s, @@ -133,7 +133,7 @@ static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s, if (scanout->line >= scanout->rows) scanout->line = 0; drm_client_buffer_vunmap_local(scanout->buffer); - drm_client_framebuffer_flush(scanout->buffer, &r); + drm_client_buffer_flush(scanout->buffer, &r); } static void drm_log_draw_new_line(struct drm_log_scanout *scanout, @@ -204,7 +204,7 @@ static int drm_log_setup_modeset(struct drm_client_dev *client, if (format == DRM_FORMAT_INVALID) return -EINVAL; - scanout->buffer = drm_client_framebuffer_create(client, width, height, format); + scanout->buffer = drm_client_buffer_create_dumb(client, width, height, format); if (IS_ERR(scanout->buffer)) { drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n", width, height, &format); @@ -272,7 +272,7 @@ static void drm_log_init_client(struct drm_log *dlog) err_failed_commit: for (i = 0; i < n_modeset; i++) - drm_client_framebuffer_delete(dlog->scanout[i].buffer); + drm_client_buffer_delete(dlog->scanout[i].buffer); err_nomodeset: kfree(dlog->scanout); @@ -286,26 +286,33 @@ static void drm_log_free_scanout(struct drm_client_dev *client) if (dlog->n_scanout) { for (i = 0; i < dlog->n_scanout; i++) - drm_client_framebuffer_delete(dlog->scanout[i].buffer); + drm_client_buffer_delete(dlog->scanout[i].buffer); dlog->n_scanout = 0; kfree(dlog->scanout); dlog->scanout = NULL; } } -static void drm_log_client_unregister(struct drm_client_dev *client) +static void drm_log_client_free(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); struct drm_device *dev = client->dev; + kfree(dlog); + + drm_dbg(dev, "Unregistered with drm log\n"); +} + +static void drm_log_client_unregister(struct drm_client_dev *client) +{ + struct drm_log *dlog = client_to_drm_log(client); + unregister_console(&dlog->con); mutex_lock(&dlog->lock); drm_log_free_scanout(client); - drm_client_release(client); mutex_unlock(&dlog->lock); - kfree(dlog); - drm_dbg(dev, "Unregistered with drm log\n"); + drm_client_release(client); } static int drm_log_client_hotplug(struct drm_client_dev *client) @@ -319,7 +326,7 @@ static int drm_log_client_hotplug(struct drm_client_dev *client) return 0; } -static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock) +static int drm_log_client_suspend(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); @@ -328,7 +335,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l return 0; } -static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock) +static int drm_log_client_resume(struct drm_client_dev *client) { struct drm_log *dlog = client_to_drm_log(client); @@ -339,6 +346,7 @@ static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lo static const struct drm_client_funcs drm_log_client_funcs = { .owner = THIS_MODULE, + .free = drm_log_client_free, .unregister = drm_log_client_unregister, .hotplug = drm_log_client_hotplug, .suspend = drm_log_client_suspend, diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index baacd21e7341..a2d30cf9e06d 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -137,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector, { struct drm_bridge_connector *bridge_connector = to_drm_bridge_connector(connector); - struct drm_bridge *bridge; /* Notify all bridges in the pipeline of hotplug events. */ - drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) { if (bridge->funcs->hpd_notify) bridge->funcs->hpd_notify(bridge, status); } @@ -619,6 +618,20 @@ static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_f * Bridge Connector Initialisation */ +static void drm_bridge_connector_put_bridges(struct drm_device *dev, void *data) +{ + struct drm_bridge_connector *bridge_connector = (struct drm_bridge_connector *)data; + + drm_bridge_put(bridge_connector->bridge_edid); + drm_bridge_put(bridge_connector->bridge_hpd); + drm_bridge_put(bridge_connector->bridge_detect); + drm_bridge_put(bridge_connector->bridge_modes); + drm_bridge_put(bridge_connector->bridge_hdmi); + drm_bridge_put(bridge_connector->bridge_hdmi_audio); + drm_bridge_put(bridge_connector->bridge_dp_audio); + drm_bridge_put(bridge_connector->bridge_hdmi_cec); +} + /** * drm_bridge_connector_init - Initialise a connector for a chain of bridges * @drm: the DRM device @@ -639,7 +652,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, struct drm_bridge_connector *bridge_connector; struct drm_connector *connector; struct i2c_adapter *ddc = NULL; - struct drm_bridge *bridge, *panel_bridge = NULL; + struct drm_bridge *panel_bridge __free(drm_bridge_put) = NULL; unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB); unsigned int max_bpc = 8; bool support_hdcp = false; @@ -650,6 +663,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (!bridge_connector) return ERR_PTR(-ENOMEM); + ret = drmm_add_action(drm, drm_bridge_connector_put_bridges, bridge_connector); + if (ret) + return ERR_PTR(ret); + bridge_connector->encoder = encoder; /* @@ -667,20 +684,28 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, * detection are available, we don't support hotplug detection at all. */ connector_type = DRM_MODE_CONNECTOR_Unknown; - drm_for_each_bridge_in_chain(encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { if (!bridge->interlace_allowed) connector->interlace_allowed = false; if (!bridge->ycbcr_420_allowed) connector->ycbcr_420_allowed = false; - if (bridge->ops & DRM_BRIDGE_OP_EDID) - bridge_connector->bridge_edid = bridge; - if (bridge->ops & DRM_BRIDGE_OP_HPD) - bridge_connector->bridge_hpd = bridge; - if (bridge->ops & DRM_BRIDGE_OP_DETECT) - bridge_connector->bridge_detect = bridge; - if (bridge->ops & DRM_BRIDGE_OP_MODES) - bridge_connector->bridge_modes = bridge; + if (bridge->ops & DRM_BRIDGE_OP_EDID) { + drm_bridge_put(bridge_connector->bridge_edid); + bridge_connector->bridge_edid = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_HPD) { + drm_bridge_put(bridge_connector->bridge_hpd); + bridge_connector->bridge_hpd = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_DETECT) { + drm_bridge_put(bridge_connector->bridge_detect); + bridge_connector->bridge_detect = drm_bridge_get(bridge); + } + if (bridge->ops & DRM_BRIDGE_OP_MODES) { + drm_bridge_put(bridge_connector->bridge_modes); + bridge_connector->bridge_modes = drm_bridge_get(bridge); + } if (bridge->ops & DRM_BRIDGE_OP_HDMI) { if (bridge_connector->bridge_hdmi) return ERR_PTR(-EBUSY); @@ -688,7 +713,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->hdmi_clear_infoframe) return ERR_PTR(-EINVAL); - bridge_connector->bridge_hdmi = bridge; + bridge_connector->bridge_hdmi = drm_bridge_get(bridge); if (bridge->supported_formats) supported_formats = bridge->supported_formats; @@ -711,7 +736,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->hdmi_audio_shutdown) return ERR_PTR(-EINVAL); - bridge_connector->bridge_hdmi_audio = bridge; + bridge_connector->bridge_hdmi_audio = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) { @@ -729,21 +754,21 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, !bridge->funcs->dp_audio_shutdown) return ERR_PTR(-EINVAL); - bridge_connector->bridge_dp_audio = bridge; + bridge_connector->bridge_dp_audio = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { if (bridge_connector->bridge_hdmi_cec) return ERR_PTR(-EBUSY); - bridge_connector->bridge_hdmi_cec = bridge; + bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge); } if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { if (bridge_connector->bridge_hdmi_cec) return ERR_PTR(-EBUSY); - bridge_connector->bridge_hdmi_cec = bridge; + bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge); if (!bridge->funcs->hdmi_cec_enable || !bridge->funcs->hdmi_cec_log_addr || @@ -762,8 +787,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge->ddc) ddc = bridge->ddc; - if (drm_bridge_is_panel(bridge)) - panel_bridge = bridge; + if (drm_bridge_is_panel(bridge)) { + drm_bridge_put(panel_bridge); + panel_bridge = drm_bridge_get(bridge); + } if (bridge->support_hdcp) support_hdcp = true; @@ -818,7 +845,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) { - bridge = bridge_connector->bridge_hdmi_cec; + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_notifier_register(connector, NULL, @@ -829,7 +856,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_cec && bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) { - bridge = bridge_connector->bridge_hdmi_cec; + struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec; ret = drmm_connector_hdmi_cec_register(connector, &drm_bridge_connector_hdmi_cec_funcs, diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 1ebed7c41b0d..f9fdf19de74a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -123,6 +124,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE], } EXPORT_SYMBOL(drm_dp_clock_recovery_ok); +bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]) +{ + u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED); + + return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS; +} +EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress); + u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], int lane) { @@ -4284,22 +4293,61 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf { int fxp, fxp_min, fxp_max, fxp_actual, f = 1; int ret; - u8 pn, pn_min, pn_max; + u8 pn, pn_min, pn_max, bit_count; if (!bl->aux_set) return 0; - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &bit_count); if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", aux->name, ret); return -ENODEV; } - pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + bit_count &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); + if (ret < 0) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", + aux->name, ret); + return -ENODEV; + } + pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); + if (ret < 0) { + drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", + aux->name, ret); + return -ENODEV; + } + pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + if (unlikely(pn_min > pn_max)) { + drm_dbg_kms(aux->drm_dev, "%s: Invalid pwmgen bit count cap min/max returned: %d %d\n", + aux->name, pn_min, pn_max); + return -EINVAL; + } + + /* + * Per VESA eDP Spec v1.4b, section 3.3.10.2: + * If DP_EDP_PWMGEN_BIT_COUNT is less than DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, + * the sink must use the MIN value as the effective PWM bit count. + * Clamp the reported value to the [MIN, MAX] capability range to ensure + * correct brightness scaling on compliant eDP panels. + * Only enable this logic if the [MIN, MAX] range is valid in regard to Spec. + */ + pn = bit_count; + if (bit_count < pn_min) + pn = clamp(bit_count, pn_min, pn_max); + bl->max = (1 << pn) - 1; - if (!driver_pwm_freq_hz) + if (!driver_pwm_freq_hz) { + if (pn != bit_count) + goto bit_count_write_back; + return 0; + } /* * Set PWM Frequency divider to match desired frequency provided by the driver. @@ -4323,21 +4371,6 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf * - FxP is within 25% of desired value. * Note: 25% is arbitrary value and may need some tweak. */ - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); - if (ret < 0) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", - aux->name, ret); - return 0; - } - ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); - if (ret < 0) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", - aux->name, ret); - return 0; - } - pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - /* Ensure frequency is within 25% of desired value */ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); @@ -4355,12 +4388,17 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf break; } +bit_count_write_back: ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", aux->name, ret); return 0; } + + if (!driver_pwm_freq_hz) + return 0; + bl->pwmgen_bit_count = pn; bl->max = (1 << pn) - 1; diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index cd15cf52f0c9..e05820b18832 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -200,6 +200,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) drm_dbg_atomic(dev, "Clearing atomic state %p\n", state); + state->checked = false; + for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i].ptr; @@ -207,9 +209,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; connector->funcs->atomic_destroy_state(connector, - state->connectors[i].state); + state->connectors[i].state_to_destroy); state->connectors[i].ptr = NULL; - state->connectors[i].state = NULL; + state->connectors[i].state_to_destroy = NULL; state->connectors[i].old_state = NULL; state->connectors[i].new_state = NULL; drm_connector_put(connector); @@ -222,10 +224,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; crtc->funcs->atomic_destroy_state(crtc, - state->crtcs[i].state); + state->crtcs[i].state_to_destroy); state->crtcs[i].ptr = NULL; - state->crtcs[i].state = NULL; + state->crtcs[i].state_to_destroy = NULL; state->crtcs[i].old_state = NULL; state->crtcs[i].new_state = NULL; @@ -242,9 +244,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) continue; plane->funcs->atomic_destroy_state(plane, - state->planes[i].state); + state->planes[i].state_to_destroy); state->planes[i].ptr = NULL; - state->planes[i].state = NULL; + state->planes[i].state_to_destroy = NULL; state->planes[i].old_state = NULL; state->planes[i].new_state = NULL; } @@ -253,9 +255,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) struct drm_private_obj *obj = state->private_objs[i].ptr; obj->funcs->atomic_destroy_state(obj, - state->private_objs[i].state); + state->private_objs[i].state_to_destroy); state->private_objs[i].ptr = NULL; - state->private_objs[i].state = NULL; + state->private_objs[i].state_to_destroy = NULL; state->private_objs[i].old_state = NULL; state->private_objs[i].new_state = NULL; } @@ -348,8 +350,9 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_crtc_state *crtc_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); - crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (crtc_state) return crtc_state; @@ -361,7 +364,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, if (!crtc_state) return ERR_PTR(-ENOMEM); - state->crtcs[index].state = crtc_state; + state->crtcs[index].state_to_destroy = crtc_state; state->crtcs[index].old_state = crtc->state; state->crtcs[index].new_state = crtc_state; state->crtcs[index].ptr = crtc; @@ -480,8 +483,8 @@ static int drm_atomic_connector_check(struct drm_connector *connector, } if (state->crtc) - crtc_state = drm_atomic_get_existing_crtc_state(state->state, - state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state->state, + state->crtc); if (writeback_job->fb && !crtc_state->active) { drm_dbg_atomic(connector->dev, @@ -528,13 +531,14 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane_state *plane_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); /* the legacy pointers should never be set */ WARN_ON(plane->fb); WARN_ON(plane->old_fb); WARN_ON(plane->crtc); - plane_state = drm_atomic_get_existing_plane_state(state, plane); + plane_state = drm_atomic_get_new_plane_state(state, plane); if (plane_state) return plane_state; @@ -546,7 +550,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, if (!plane_state) return ERR_PTR(-ENOMEM); - state->planes[index].state = plane_state; + state->planes[index].state_to_destroy = plane_state; state->planes[index].ptr = plane; state->planes[index].old_state = plane->state; state->planes[index].new_state = plane_state; @@ -831,14 +835,17 @@ struct drm_private_state * drm_atomic_get_private_obj_state(struct drm_atomic_state *state, struct drm_private_obj *obj) { - int index, num_objs, i, ret; + int index, num_objs, ret; size_t size; struct __drm_private_objs_state *arr; struct drm_private_state *obj_state; - for (i = 0; i < state->num_private_objs; i++) - if (obj == state->private_objs[i].ptr) - return state->private_objs[i].state; + WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); + + obj_state = drm_atomic_get_new_private_obj_state(state, obj); + if (obj_state) + return obj_state; ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); if (ret) @@ -858,7 +865,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state, if (!obj_state) return ERR_PTR(-ENOMEM); - state->private_objs[index].state = obj_state; + state->private_objs[index].state_to_destroy = obj_state; state->private_objs[index].old_state = obj->state; state->private_objs[index].new_state = obj_state; state->private_objs[index].ptr = obj; @@ -1129,6 +1136,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector_state *connector_state; WARN_ON(!state->acquire_ctx); + drm_WARN_ON(state->dev, state->checked); ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); if (ret) @@ -1152,15 +1160,16 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, state->num_connector = alloc; } - if (state->connectors[index].state) - return state->connectors[index].state; + connector_state = drm_atomic_get_new_connector_state(state, connector); + if (connector_state) + return connector_state; connector_state = connector->funcs->atomic_duplicate_state(connector); if (!connector_state) return ERR_PTR(-ENOMEM); drm_connector_get(connector); - state->connectors[index].state = connector_state; + state->connectors[index].state_to_destroy = connector_state; state->connectors[index].old_state = connector->state; state->connectors[index].new_state = connector_state; state->connectors[index].ptr = connector; @@ -1308,7 +1317,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, struct drm_encoder *encoder) { struct drm_bridge_state *bridge_state; - struct drm_bridge *bridge; if (!encoder) return 0; @@ -1317,7 +1325,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, "Adding all bridges for [encoder:%d:%s] to %p\n", encoder->base.id, encoder->name, state); - drm_for_each_bridge_in_chain(encoder, bridge) { + drm_for_each_bridge_in_chain_scoped(encoder, bridge) { /* Skip bridges that don't implement the atomic state hooks. */ if (!bridge->funcs->atomic_duplicate_state) continue; @@ -1541,6 +1549,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state) requested_crtc, affected_crtc); } + state->checked = true; + return 0; } EXPORT_SYMBOL(drm_atomic_check_only); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index d5ebe6ea0acb..5a473a274ff0 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3236,7 +3236,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_conn_state->state = state; new_conn_state->state = NULL; - state->connectors[i].state = old_conn_state; + state->connectors[i].state_to_destroy = old_conn_state; connector->state = new_conn_state; } @@ -3246,7 +3246,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_crtc_state->state = state; new_crtc_state->state = NULL; - state->crtcs[i].state = old_crtc_state; + state->crtcs[i].state_to_destroy = old_crtc_state; crtc->state = new_crtc_state; if (new_crtc_state->commit) { @@ -3266,7 +3266,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_plane_state->state = state; new_plane_state->state = NULL; - state->planes[i].state = old_plane_state; + state->planes[i].state_to_destroy = old_plane_state; plane->state = new_plane_state; } drm_panic_unlock(state->dev, flags); @@ -3277,7 +3277,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state, old_obj_state->state = state; new_obj_state->state = NULL; - state->private_objs[i].state = old_obj_state; + state->private_objs[i].state_to_destroy = old_obj_state; obj->state = new_obj_state; } diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index d031447eebc9..8f355df883d8 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -197,15 +197,22 @@ * driver. */ +/* Protect bridge_list and bridge_lingering_list */ static DEFINE_MUTEX(bridge_lock); static LIST_HEAD(bridge_list); +static LIST_HEAD(bridge_lingering_list); static void __drm_bridge_free(struct kref *kref) { struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount); + mutex_lock(&bridge_lock); + list_del(&bridge->list); + mutex_unlock(&bridge_lock); + if (bridge->funcs->destroy) bridge->funcs->destroy(bridge); + kfree(bridge->container); } @@ -273,6 +280,7 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, return ERR_PTR(-ENOMEM); bridge = container + offset; + INIT_LIST_HEAD(&bridge->list); bridge->container = container; bridge->funcs = funcs; kref_init(&bridge->refcount); @@ -286,10 +294,13 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, EXPORT_SYMBOL(__devm_drm_bridge_alloc); /** - * drm_bridge_add - add the given bridge to the global bridge list + * drm_bridge_add - register a bridge * * @bridge: bridge control structure * + * Add the given bridge to the global list of bridges, where they can be + * found by users via of_drm_find_bridge(). + * * The bridge to be added must have been allocated by * devm_drm_bridge_alloc(). */ @@ -300,6 +311,14 @@ void drm_bridge_add(struct drm_bridge *bridge) drm_bridge_get(bridge); + /* + * If the bridge was previously added and then removed, it is now + * in bridge_lingering_list. Remove it or bridge_lingering_list will be + * corrupted when adding this bridge to bridge_list below. + */ + if (!list_empty(&bridge->list)) + list_del_init(&bridge->list); + mutex_init(&bridge->hpd_mutex); if (bridge->ops & DRM_BRIDGE_OP_HDMI) @@ -336,14 +355,19 @@ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge) EXPORT_SYMBOL(devm_drm_bridge_add); /** - * drm_bridge_remove - remove the given bridge from the global bridge list + * drm_bridge_remove - unregister a bridge * * @bridge: bridge control structure + * + * Remove the given bridge from the global list of registered bridges, so + * it won't be found by users via of_drm_find_bridge(), and add it to the + * lingering bridge list, to keep track of it until its allocated memory is + * eventually freed. */ void drm_bridge_remove(struct drm_bridge *bridge) { mutex_lock(&bridge_lock); - list_del_init(&bridge->list); + list_move_tail(&bridge->list, &bridge_lingering_list); mutex_unlock(&bridge_lock); mutex_destroy(&bridge->hpd_mutex); @@ -398,6 +422,9 @@ static bool drm_bridge_is_atomic(struct drm_bridge *bridge) * If non-NULL the previous bridge must be already attached by a call to this * function. * + * The bridge to be attached must have been previously added by + * drm_bridge_add(). + * * Note that bridges attached to encoders are auto-detached during encoder * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally * *not* be balanced with a drm_bridge_detach() in driver code. @@ -414,6 +441,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, if (!encoder || !bridge) return -EINVAL; + if (!bridge->container) + DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n"); + + if (list_empty(&bridge->list)) + DRM_WARN("Missing drm_bridge_add() before attach\n"); + drm_bridge_get(bridge); if (previous && (!previous->dev || previous->encoder != encoder)) { @@ -1062,12 +1095,12 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, struct drm_encoder *encoder = bridge->encoder; struct drm_bridge_state *last_bridge_state; unsigned int i, num_out_bus_fmts = 0; - struct drm_bridge *last_bridge; u32 *out_bus_fmts; int ret = 0; - last_bridge = list_last_entry(&encoder->bridge_chain, - struct drm_bridge, chain_node); + struct drm_bridge *last_bridge __free(drm_bridge_put) = + drm_bridge_get(list_last_entry(&encoder->bridge_chain, + struct drm_bridge, chain_node)); last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, last_bridge); @@ -1121,7 +1154,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_bridge_state *bridge_state, *next_bridge_state; - struct drm_bridge *next_bridge; u32 output_flags = 0; bridge_state = drm_atomic_get_new_bridge_state(state, bridge); @@ -1130,7 +1162,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, if (!bridge_state) return; - next_bridge = drm_bridge_get_next_bridge(bridge); + struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge); /* * Let's try to apply the most common case here, that is, propagate @@ -1432,17 +1464,20 @@ EXPORT_SYMBOL(devm_drm_put_bridge); static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, struct drm_bridge *bridge, - unsigned int idx) + unsigned int idx, + bool lingering) { drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); - drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount)); + drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount), + lingering ? " [lingering]" : ""); drm_printf(p, "\ttype: [%d] %s\n", bridge->type, drm_get_connector_type_name(bridge->type)); - if (bridge->of_node) + /* The OF node could be freed after drm_bridge_remove() */ + if (bridge->of_node && !lingering) drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); drm_printf(p, "\tops: [0x%x]", bridge->ops); @@ -1468,7 +1503,10 @@ static int allbridges_show(struct seq_file *m, void *data) mutex_lock(&bridge_lock); list_for_each_entry(bridge, &bridge_list, list) - drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); + + list_for_each_entry(bridge, &bridge_lingering_list, list) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true); mutex_unlock(&bridge_lock); @@ -1480,11 +1518,10 @@ static int encoder_bridges_show(struct seq_file *m, void *data) { struct drm_encoder *encoder = m->private; struct drm_printer p = drm_seq_file_printer(m); - struct drm_bridge *bridge; unsigned int idx = 0; - drm_for_each_bridge_in_chain(encoder, bridge) - drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + drm_for_each_bridge_in_chain_scoped(encoder, bridge) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false); return 0; } diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index a94061f373de..2f279b46bd2c 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -11,9 +11,19 @@ #include #include +#include + +enum drm_buddy_free_tree { + DRM_BUDDY_CLEAR_TREE = 0, + DRM_BUDDY_DIRTY_TREE, + DRM_BUDDY_MAX_FREE_TREES, +}; static struct kmem_cache *slab_blocks; +#define for_each_free_tree(tree) \ + for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++) + static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, struct drm_buddy_block *parent, unsigned int order, @@ -31,6 +41,8 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, block->header |= order; block->parent = parent; + RB_CLEAR_NODE(&block->rb); + BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); return block; } @@ -41,23 +53,64 @@ static void drm_block_free(struct drm_buddy *mm, kmem_cache_free(slab_blocks, block); } -static void list_insert_sorted(struct drm_buddy *mm, - struct drm_buddy_block *block) +static enum drm_buddy_free_tree +get_block_tree(struct drm_buddy_block *block) { - struct drm_buddy_block *node; - struct list_head *head; + return drm_buddy_block_is_clear(block) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; +} - head = &mm->free_list[drm_buddy_block_order(block)]; - if (list_empty(head)) { - list_add(&block->link, head); - return; - } +static struct drm_buddy_block * +rbtree_get_free_block(const struct rb_node *node) +{ + return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL; +} - list_for_each_entry(node, head, link) - if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node)) - break; +static struct drm_buddy_block * +rbtree_last_free_block(struct rb_root *root) +{ + return rbtree_get_free_block(rb_last(root)); +} - __list_add(&block->link, node->link.prev, &node->link); +static bool rbtree_is_empty(struct rb_root *root) +{ + return RB_EMPTY_ROOT(root); +} + +static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block, + const struct drm_buddy_block *node) +{ + return drm_buddy_block_offset(block) < drm_buddy_block_offset(node); +} + +static bool rbtree_block_offset_less(struct rb_node *block, + const struct rb_node *node) +{ + return drm_buddy_block_offset_less(rbtree_get_free_block(block), + rbtree_get_free_block(node)); +} + +static void rbtree_insert(struct drm_buddy *mm, + struct drm_buddy_block *block, + enum drm_buddy_free_tree tree) +{ + rb_add(&block->rb, + &mm->free_trees[tree][drm_buddy_block_order(block)], + rbtree_block_offset_less); +} + +static void rbtree_remove(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + unsigned int order = drm_buddy_block_order(block); + enum drm_buddy_free_tree tree; + struct rb_root *root; + + tree = get_block_tree(block); + root = &mm->free_trees[tree][order]; + + rb_erase(&block->rb, root); + RB_CLEAR_NODE(&block->rb); } static void clear_reset(struct drm_buddy_block *block) @@ -70,29 +123,34 @@ static void mark_cleared(struct drm_buddy_block *block) block->header |= DRM_BUDDY_HEADER_CLEAR; } -static void mark_allocated(struct drm_buddy_block *block) +static void mark_allocated(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_ALLOCATED; - list_del(&block->link); + rbtree_remove(mm, block); } static void mark_free(struct drm_buddy *mm, struct drm_buddy_block *block) { + enum drm_buddy_free_tree tree; + block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_FREE; - list_insert_sorted(mm, block); + tree = get_block_tree(block); + rbtree_insert(mm, block, tree); } -static void mark_split(struct drm_buddy_block *block) +static void mark_split(struct drm_buddy *mm, + struct drm_buddy_block *block) { block->header &= ~DRM_BUDDY_HEADER_STATE; block->header |= DRM_BUDDY_SPLIT; - list_del(&block->link); + rbtree_remove(mm, block); } static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) @@ -148,7 +206,7 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm, mark_cleared(parent); } - list_del(&buddy->link); + rbtree_remove(mm, buddy); if (force_merge && drm_buddy_block_is_clear(buddy)) mm->clear_avail -= drm_buddy_block_size(mm, buddy); @@ -169,7 +227,7 @@ static int __force_merge(struct drm_buddy *mm, u64 end, unsigned int min_order) { - unsigned int order; + unsigned int tree, order; int i; if (!min_order) @@ -178,44 +236,48 @@ static int __force_merge(struct drm_buddy *mm, if (min_order > mm->max_order) return -EINVAL; - for (i = min_order - 1; i >= 0; i--) { - struct drm_buddy_block *block, *prev; + for_each_free_tree(tree) { + for (i = min_order - 1; i >= 0; i--) { + struct rb_node *iter = rb_last(&mm->free_trees[tree][i]); - list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) { - struct drm_buddy_block *buddy; - u64 block_start, block_end; + while (iter) { + struct drm_buddy_block *block, *buddy; + u64 block_start, block_end; - if (!block->parent) - continue; + block = rbtree_get_free_block(iter); + iter = rb_prev(iter); - block_start = drm_buddy_block_offset(block); - block_end = block_start + drm_buddy_block_size(mm, block) - 1; + if (!block || !block->parent) + continue; - if (!contains(start, end, block_start, block_end)) - continue; + block_start = drm_buddy_block_offset(block); + block_end = block_start + drm_buddy_block_size(mm, block) - 1; - buddy = __get_buddy(block); - if (!drm_buddy_block_is_free(buddy)) - continue; + if (!contains(start, end, block_start, block_end)) + continue; - WARN_ON(drm_buddy_block_is_clear(block) == - drm_buddy_block_is_clear(buddy)); + buddy = __get_buddy(block); + if (!drm_buddy_block_is_free(buddy)) + continue; - /* - * If the prev block is same as buddy, don't access the - * block in the next iteration as we would free the - * buddy block as part of the free function. - */ - if (prev == buddy) - prev = list_prev_entry(prev, link); + WARN_ON(drm_buddy_block_is_clear(block) == + drm_buddy_block_is_clear(buddy)); - list_del(&block->link); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -= drm_buddy_block_size(mm, block); + /* + * Advance to the next node when the current node is the buddy, + * as freeing the block will also remove its buddy from the tree. + */ + if (iter == &buddy->rb) + iter = rb_prev(iter); - order = __drm_buddy_free(mm, block, true); - if (order >= min_order) - return 0; + rbtree_remove(mm, block); + if (drm_buddy_block_is_clear(block)) + mm->clear_avail -= drm_buddy_block_size(mm, block); + + order = __drm_buddy_free(mm, block, true); + if (order >= min_order) + return 0; + } } } @@ -236,8 +298,8 @@ static int __force_merge(struct drm_buddy *mm, */ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) { - unsigned int i; - u64 offset; + unsigned int i, j, root_count = 0; + u64 offset = 0; if (size < chunk_size) return -EINVAL; @@ -258,14 +320,22 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); - mm->free_list = kmalloc_array(mm->max_order + 1, - sizeof(struct list_head), - GFP_KERNEL); - if (!mm->free_list) + mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES, + sizeof(*mm->free_trees), + GFP_KERNEL); + if (!mm->free_trees) return -ENOMEM; - for (i = 0; i <= mm->max_order; ++i) - INIT_LIST_HEAD(&mm->free_list[i]); + for_each_free_tree(i) { + mm->free_trees[i] = kmalloc_array(mm->max_order + 1, + sizeof(struct rb_root), + GFP_KERNEL); + if (!mm->free_trees[i]) + goto out_free_tree; + + for (j = 0; j <= mm->max_order; ++j) + mm->free_trees[i][j] = RB_ROOT; + } mm->n_roots = hweight64(size); @@ -273,10 +343,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) sizeof(struct drm_buddy_block *), GFP_KERNEL); if (!mm->roots) - goto out_free_list; - - offset = 0; - i = 0; + goto out_free_tree; /* * Split into power-of-two blocks, in case we are given a size that is @@ -296,24 +363,26 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) mark_free(mm, root); - BUG_ON(i > mm->max_order); + BUG_ON(root_count > mm->max_order); BUG_ON(drm_buddy_block_size(mm, root) < chunk_size); - mm->roots[i] = root; + mm->roots[root_count] = root; offset += root_size; size -= root_size; - i++; + root_count++; } while (size); return 0; out_free_roots: - while (i--) - drm_block_free(mm, mm->roots[i]); + while (root_count--) + drm_block_free(mm, mm->roots[root_count]); kfree(mm->roots); -out_free_list: - kfree(mm->free_list); +out_free_tree: + while (i--) + kfree(mm->free_trees[i]); + kfree(mm->free_trees); return -ENOMEM; } EXPORT_SYMBOL(drm_buddy_init); @@ -323,7 +392,7 @@ EXPORT_SYMBOL(drm_buddy_init); * * @mm: DRM buddy manager to free * - * Cleanup memory manager resources and the freelist + * Cleanup memory manager resources and the freetree */ void drm_buddy_fini(struct drm_buddy *mm) { @@ -349,8 +418,9 @@ void drm_buddy_fini(struct drm_buddy *mm) WARN_ON(mm->avail != mm->size); + for_each_free_tree(i) + kfree(mm->free_trees[i]); kfree(mm->roots); - kfree(mm->free_list); } EXPORT_SYMBOL(drm_buddy_fini); @@ -374,8 +444,7 @@ static int split_block(struct drm_buddy *mm, return -ENOMEM; } - mark_free(mm, block->left); - mark_free(mm, block->right); + mark_split(mm, block); if (drm_buddy_block_is_clear(block)) { mark_cleared(block->left); @@ -383,7 +452,8 @@ static int split_block(struct drm_buddy *mm, clear_reset(block); } - mark_split(block); + mark_free(mm, block->left); + mark_free(mm, block->right); return 0; } @@ -412,10 +482,11 @@ EXPORT_SYMBOL(drm_get_buddy); * @is_clear: blocks clear state * * Reset the clear state based on @is_clear value for each block - * in the freelist. + * in the freetree. */ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) { + enum drm_buddy_free_tree src_tree, dst_tree; u64 root_size, size, start; unsigned int order; int i; @@ -430,19 +501,24 @@ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) size -= root_size; } - for (i = 0; i <= mm->max_order; ++i) { - struct drm_buddy_block *block; + src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; - list_for_each_entry_reverse(block, &mm->free_list[i], link) { - if (is_clear != drm_buddy_block_is_clear(block)) { - if (is_clear) { - mark_cleared(block); - mm->clear_avail += drm_buddy_block_size(mm, block); - } else { - clear_reset(block); - mm->clear_avail -= drm_buddy_block_size(mm, block); - } + for (i = 0; i <= mm->max_order; ++i) { + struct rb_root *root = &mm->free_trees[src_tree][i]; + struct drm_buddy_block *block, *tmp; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + rbtree_remove(mm, block); + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); } + + rbtree_insert(mm, block, dst_tree); } } } @@ -632,23 +708,17 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm, } static struct drm_buddy_block * -get_maxblock(struct drm_buddy *mm, unsigned int order, - unsigned long flags) +get_maxblock(struct drm_buddy *mm, + unsigned int order, + enum drm_buddy_free_tree tree) { struct drm_buddy_block *max_block = NULL, *block = NULL; + struct rb_root *root; unsigned int i; for (i = order; i <= mm->max_order; ++i) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + root = &mm->free_trees[tree][i]; + block = rbtree_last_free_block(root); if (!block) continue; @@ -667,46 +737,44 @@ get_maxblock(struct drm_buddy *mm, unsigned int order, } static struct drm_buddy_block * -alloc_from_freelist(struct drm_buddy *mm, +alloc_from_freetree(struct drm_buddy *mm, unsigned int order, unsigned long flags) { struct drm_buddy_block *block = NULL; + struct rb_root *root; + enum drm_buddy_free_tree tree; unsigned int tmp; int err; + tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ? + DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; + if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { - block = get_maxblock(mm, order, flags); + block = get_maxblock(mm, order, tree); if (block) /* Store the obtained block order */ tmp = drm_buddy_block_order(block); } else { for (tmp = order; tmp <= mm->max_order; ++tmp) { - struct drm_buddy_block *tmp_block; - - list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) { - if (block_incompatible(tmp_block, flags)) - continue; - - block = tmp_block; - break; - } - + /* Get RB tree root for this order and tree */ + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); if (block) break; } } if (!block) { - /* Fallback method */ + /* Try allocating from the other tree */ + tree = (tree == DRM_BUDDY_CLEAR_TREE) ? + DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; + for (tmp = order; tmp <= mm->max_order; ++tmp) { - if (!list_empty(&mm->free_list[tmp])) { - block = list_last_entry(&mm->free_list[tmp], - struct drm_buddy_block, - link); - if (block) - break; - } + root = &mm->free_trees[tree][tmp]; + block = rbtree_last_free_block(root); + if (block) + break; } if (!block) @@ -771,7 +839,7 @@ static int __alloc_range(struct drm_buddy *mm, if (contains(start, end, block_start, block_end)) { if (drm_buddy_block_is_free(block)) { - mark_allocated(block); + mark_allocated(mm, block); total_allocated += drm_buddy_block_size(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) @@ -849,10 +917,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, { u64 rhs_offset, lhs_offset, lhs_size, filled; struct drm_buddy_block *block; - struct list_head *list; + unsigned int tree, order; LIST_HEAD(blocks_lhs); unsigned long pages; - unsigned int order; u64 modify_size; int err; @@ -862,35 +929,45 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, if (order == 0) return -ENOSPC; - list = &mm->free_list[order]; - if (list_empty(list)) - return -ENOSPC; + for_each_free_tree(tree) { + struct rb_root *root; + struct rb_node *iter; - list_for_each_entry_reverse(block, list, link) { - /* Allocate blocks traversing RHS */ - rhs_offset = drm_buddy_block_offset(block); - err = __drm_buddy_alloc_range(mm, rhs_offset, size, - &filled, blocks); - if (!err || err != -ENOSPC) - return err; + root = &mm->free_trees[tree][order]; + if (rbtree_is_empty(root)) + continue; - lhs_size = max((size - filled), min_block_size); - if (!IS_ALIGNED(lhs_size, min_block_size)) - lhs_size = round_up(lhs_size, min_block_size); + iter = rb_last(root); + while (iter) { + block = rbtree_get_free_block(iter); - /* Allocate blocks traversing LHS */ - lhs_offset = drm_buddy_block_offset(block) - lhs_size; - err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, - NULL, &blocks_lhs); - if (!err) { - list_splice(&blocks_lhs, blocks); - return 0; - } else if (err != -ENOSPC) { + /* Allocate blocks traversing RHS */ + rhs_offset = drm_buddy_block_offset(block); + err = __drm_buddy_alloc_range(mm, rhs_offset, size, + &filled, blocks); + if (!err || err != -ENOSPC) + return err; + + lhs_size = max((size - filled), min_block_size); + if (!IS_ALIGNED(lhs_size, min_block_size)) + lhs_size = round_up(lhs_size, min_block_size); + + /* Allocate blocks traversing LHS */ + lhs_offset = drm_buddy_block_offset(block) - lhs_size; + err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, + NULL, &blocks_lhs); + if (!err) { + list_splice(&blocks_lhs, blocks); + return 0; + } else if (err != -ENOSPC) { + drm_buddy_free_list_internal(mm, blocks); + return err; + } + /* Free blocks for the next iteration */ drm_buddy_free_list_internal(mm, blocks); - return err; + + iter = rb_prev(iter); } - /* Free blocks for the next iteration */ - drm_buddy_free_list_internal(mm, blocks); } return -ENOSPC; @@ -976,7 +1053,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm, list_add(&block->tmp_link, &dfs); err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); if (err) { - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -999,8 +1076,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, return __drm_buddy_alloc_range_bias(mm, start, end, order, flags); else - /* Allocate from freelist */ - return alloc_from_freelist(mm, order, flags); + /* Allocate from freetree */ + return alloc_from_freetree(mm, order, flags); } /** @@ -1017,8 +1094,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm, * alloc_range_bias() called on range limitations, which traverses * the tree and returns the desired block. * - * alloc_from_freelist() called when *no* range restrictions - * are enforced, which picks the block from the freelist. + * alloc_from_freetree() called when *no* range restrictions + * are enforced, which picks the block from the freetree. * * Returns: * 0 on success, error code on failure. @@ -1120,7 +1197,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, } } while (1); - mark_allocated(block); + mark_allocated(mm, block); mm->avail -= drm_buddy_block_size(mm, block); if (drm_buddy_block_is_clear(block)) mm->clear_avail -= drm_buddy_block_size(mm, block); @@ -1201,12 +1278,18 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); for (order = mm->max_order; order >= 0; order--) { - struct drm_buddy_block *block; + struct drm_buddy_block *block, *tmp; + struct rb_root *root; u64 count = 0, free; + unsigned int tree; - list_for_each_entry(block, &mm->free_list[order], link) { - BUG_ON(!drm_buddy_block_is_free(block)); - count++; + for_each_free_tree(tree) { + root = &mm->free_trees[tree][order]; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + BUG_ON(!drm_buddy_block_is_free(block)); + count++; + } } drm_printf(p, "order-%2d ", order); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 3fa38d4ac70b..504ec5bdfa2c 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -168,29 +169,59 @@ void drm_client_release(struct drm_client_dev *client) drm_client_modeset_free(client); drm_client_close(client); + + if (client->funcs && client->funcs->free) + client->funcs->free(client); + drm_dev_put(dev); } EXPORT_SYMBOL(drm_client_release); -static void drm_client_buffer_delete(struct drm_client_buffer *buffer) +/** + * drm_client_buffer_delete - Delete a client buffer + * @buffer: DRM client buffer + */ +void drm_client_buffer_delete(struct drm_client_buffer *buffer) { - if (buffer->gem) { - drm_gem_vunmap(buffer->gem, &buffer->map); - drm_gem_object_put(buffer->gem); - } + struct drm_gem_object *gem; + int ret; + + if (!buffer) + return; + + gem = buffer->fb->obj[0]; + drm_gem_vunmap(gem, &buffer->map); + + ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file); + if (ret) + drm_err(buffer->client->dev, + "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret); + + drm_gem_object_put(buffer->gem); kfree(buffer); } +EXPORT_SYMBOL(drm_client_buffer_delete); static struct drm_client_buffer * drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, - u32 format, u32 *handle) + u32 format, u32 handle, u32 pitch) { - const struct drm_format_info *info = drm_format_info(format); - struct drm_mode_create_dumb dumb_args = { }; + struct drm_mode_fb_cmd2 fb_req = { + .width = width, + .height = height, + .pixel_format = format, + .handles = { + handle, + }, + .pitches = { + pitch, + }, + }; struct drm_device *dev = client->dev; struct drm_client_buffer *buffer; struct drm_gem_object *obj; + struct drm_framebuffer *fb; int ret; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); @@ -199,28 +230,38 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, buffer->client = client; - dumb_args.width = width; - dumb_args.height = height; - dumb_args.bpp = drm_format_info_bpp(info, 0); - ret = drm_mode_create_dumb(dev, &dumb_args, client->file); - if (ret) - goto err_delete; - - obj = drm_gem_object_lookup(client->file, dumb_args.handle); + obj = drm_gem_object_lookup(client->file, handle); if (!obj) { ret = -ENOENT; goto err_delete; } - buffer->pitch = dumb_args.pitch; + ret = drm_mode_addfb2(dev, &fb_req, client->file); + if (ret) + goto err_drm_gem_object_put; + + fb = drm_framebuffer_lookup(dev, client->file, fb_req.fb_id); + if (drm_WARN_ON(dev, !fb)) { + ret = -ENOENT; + goto err_drm_mode_rmfb; + } + + /* drop the reference we picked up in framebuffer lookup */ + drm_framebuffer_put(fb); + + strscpy(fb->comm, client->name, TASK_COMM_LEN); + buffer->gem = obj; - *handle = dumb_args.handle; + buffer->fb = fb; return buffer; +err_drm_mode_rmfb: + drm_mode_rmfb(dev, fb_req.fb_id, client->file); +err_drm_gem_object_put: + drm_gem_object_put(obj); err_delete: - drm_client_buffer_delete(buffer); - + kfree(buffer); return ERR_PTR(ret); } @@ -247,7 +288,7 @@ err_delete: int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { - struct drm_gem_object *gem = buffer->gem; + struct drm_gem_object *gem = buffer->fb->obj[0]; struct iosys_map *map = &buffer->map; int ret; @@ -276,7 +317,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap_local); */ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer) { - struct drm_gem_object *gem = buffer->gem; + struct drm_gem_object *gem = buffer->fb->obj[0]; struct iosys_map *map = &buffer->map; drm_gem_vunmap_locked(gem, map); @@ -307,9 +348,10 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local); int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct iosys_map *map_copy) { + struct drm_gem_object *gem = buffer->fb->obj[0]; int ret; - ret = drm_gem_vmap(buffer->gem, &buffer->map); + ret = drm_gem_vmap(gem, &buffer->map); if (ret) return ret; *map_copy = buffer->map; @@ -328,57 +370,14 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { - drm_gem_vunmap(buffer->gem, &buffer->map); + struct drm_gem_object *gem = buffer->fb->obj[0]; + + drm_gem_vunmap(gem, &buffer->map); } EXPORT_SYMBOL(drm_client_buffer_vunmap); -static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) -{ - int ret; - - if (!buffer->fb) - return; - - ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file); - if (ret) - drm_err(buffer->client->dev, - "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret); - - buffer->fb = NULL; -} - -static int drm_client_buffer_addfb(struct drm_client_buffer *buffer, - u32 width, u32 height, u32 format, - u32 handle) -{ - struct drm_client_dev *client = buffer->client; - struct drm_mode_fb_cmd2 fb_req = { }; - int ret; - - fb_req.width = width; - fb_req.height = height; - fb_req.pixel_format = format; - fb_req.handles[0] = handle; - fb_req.pitches[0] = buffer->pitch; - - ret = drm_mode_addfb2(client->dev, &fb_req, client->file); - if (ret) - return ret; - - buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id); - if (WARN_ON(!buffer->fb)) - return -ENOENT; - - /* drop the reference we picked up in framebuffer lookup */ - drm_framebuffer_put(buffer->fb); - - strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN); - - return 0; -} - /** - * drm_client_framebuffer_create - Create a client framebuffer + * drm_client_buffer_create_dumb - Create a client buffer backed by a dumb buffer * @client: DRM client * @width: Framebuffer width * @height: Framebuffer height @@ -386,24 +385,33 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer, * * This function creates a &drm_client_buffer which consists of a * &drm_framebuffer backed by a dumb buffer. - * Call drm_client_framebuffer_delete() to free the buffer. + * Call drm_client_buffer_delete() to free the buffer. * * Returns: * Pointer to a client buffer or an error pointer on failure. */ struct drm_client_buffer * -drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format) +drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format) { + const struct drm_format_info *info = drm_format_info(format); + struct drm_device *dev = client->dev; + struct drm_mode_create_dumb dumb_args = { }; struct drm_client_buffer *buffer; - u32 handle; int ret; - buffer = drm_client_buffer_create(client, width, height, format, - &handle); - if (IS_ERR(buffer)) - return buffer; + dumb_args.width = width; + dumb_args.height = height; + dumb_args.bpp = drm_format_info_bpp(info, 0); + ret = drm_mode_create_dumb(dev, &dumb_args, client->file); + if (ret) + return ERR_PTR(ret); - ret = drm_client_buffer_addfb(buffer, width, height, format, handle); + buffer = drm_client_buffer_create(client, width, height, format, + dumb_args.handle, dumb_args.pitch); + if (IS_ERR(buffer)) { + ret = PTR_ERR(buffer); + goto err_drm_mode_destroy_dumb; + } /* * The handle is only needed for creating the framebuffer, destroy it @@ -411,34 +419,19 @@ drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 heig * object as DMA-buf. The framebuffer and our buffer structure are still * holding references to the GEM object to prevent its destruction. */ - drm_mode_destroy_dumb(client->dev, handle, client->file); - - if (ret) { - drm_client_buffer_delete(buffer); - return ERR_PTR(ret); - } + drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file); return buffer; + +err_drm_mode_destroy_dumb: + drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file); + return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_client_framebuffer_create); +EXPORT_SYMBOL(drm_client_buffer_create_dumb); /** - * drm_client_framebuffer_delete - Delete a client framebuffer - * @buffer: DRM client buffer (can be NULL) - */ -void drm_client_framebuffer_delete(struct drm_client_buffer *buffer) -{ - if (!buffer) - return; - - drm_client_buffer_rmfb(buffer); - drm_client_buffer_delete(buffer); -} -EXPORT_SYMBOL(drm_client_framebuffer_delete); - -/** - * drm_client_framebuffer_flush - Manually flush client framebuffer - * @buffer: DRM client buffer (can be NULL) + * drm_client_buffer_flush - Manually flush client buffer + * @buffer: DRM client buffer * @rect: Damage rectangle (if NULL flushes all) * * This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes @@ -447,7 +440,7 @@ EXPORT_SYMBOL(drm_client_framebuffer_delete); * Returns: * Zero on success or negative error code on failure. */ -int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect) +int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect) { if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty) return 0; @@ -467,4 +460,4 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file, 0, 0, NULL, 0); } -EXPORT_SYMBOL(drm_client_framebuffer_flush); +EXPORT_SYMBOL(drm_client_buffer_flush); diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c index c83196ad8b59..d25dc5250983 100644 --- a/drivers/gpu/drm/drm_client_event.c +++ b/drivers/gpu/drm/drm_client_event.c @@ -39,12 +39,13 @@ void drm_client_dev_unregister(struct drm_device *dev) mutex_lock(&dev->clientlist_mutex); list_for_each_entry_safe(client, tmp, &dev->clientlist, list) { list_del(&client->list); - if (client->funcs && client->funcs->unregister) { + /* + * Unregistering consumes and frees the client. + */ + if (client->funcs && client->funcs->unregister) client->funcs->unregister(client); - } else { + else drm_client_release(client); - kfree(client); - } } mutex_unlock(&dev->clientlist_mutex); } @@ -122,7 +123,7 @@ void drm_client_dev_restore(struct drm_device *dev) mutex_unlock(&dev->clientlist_mutex); } -static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock) +static int drm_client_suspend(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret = 0; @@ -131,7 +132,7 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_ return 0; if (client->funcs && client->funcs->suspend) - ret = client->funcs->suspend(client, holds_console_lock); + ret = client->funcs->suspend(client); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); client->suspended = true; @@ -139,20 +140,20 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_ return ret; } -void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock) +void drm_client_dev_suspend(struct drm_device *dev) { struct drm_client_dev *client; mutex_lock(&dev->clientlist_mutex); list_for_each_entry(client, &dev->clientlist, list) { if (!client->suspended) - drm_client_suspend(client, holds_console_lock); + drm_client_suspend(client); } mutex_unlock(&dev->clientlist_mutex); } EXPORT_SYMBOL(drm_client_dev_suspend); -static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock) +static int drm_client_resume(struct drm_client_dev *client) { struct drm_device *dev = client->dev; int ret = 0; @@ -161,7 +162,7 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l return 0; if (client->funcs && client->funcs->resume) - ret = client->funcs->resume(client, holds_console_lock); + ret = client->funcs->resume(client); drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret); client->suspended = false; @@ -172,14 +173,14 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l return ret; } -void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock) +void drm_client_dev_resume(struct drm_device *dev) { struct drm_client_dev *client; mutex_lock(&dev->clientlist_mutex); list_for_each_entry(client, &dev->clientlist, list) { if (client->suspended) - drm_client_resume(client, holds_console_lock); + drm_client_resume(client); } mutex_unlock(&dev->clientlist_mutex); } diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 9c2c3b0c8c47..fc4caf7da5fc 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -1293,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode) } EXPORT_SYMBOL(drm_client_modeset_dpms); +/** + * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur + * @client: DRM client + * @crtc_index: The ndex of the CRTC to wait on + * + * Block the caller until the given CRTC has seen a VBLANK. Do nothing + * if the CRTC is disabled. If there's another DRM master present, fail + * with -EBUSY. + * + * Returns: + * 0 on success, or negative error code otherwise. + */ +int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index) +{ + struct drm_device *dev = client->dev; + struct drm_crtc *crtc; + int ret; + + /* + * Rate-limit update frequency to vblank. If there's a DRM master + * present, it could interfere while we're waiting for the vblank + * event. Don't wait in this case. + */ + if (!drm_master_internal_acquire(dev)) + return -EBUSY; + + crtc = client->modesets[crtc_index].crtc; + + /* + * Only wait for a vblank event if the CRTC is enabled, otherwise + * just don't do anything, not even report an error. + */ + ret = drm_crtc_vblank_get(crtc); + if (!ret) { + drm_crtc_wait_one_vblank(crtc); + drm_crtc_vblank_put(crtc); + } + + drm_master_internal_release(dev); + + return 0; +} +EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank); + #ifdef CONFIG_DRM_KUNIT_TEST #include "tests/drm_client_modeset_test.c" #endif diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c index b4fd43783c50..58d0bb6d2676 100644 --- a/drivers/gpu/drm/drm_displayid.c +++ b/drivers/gpu/drm/drm_displayid.c @@ -9,6 +9,34 @@ #include "drm_crtc_internal.h" #include "drm_displayid_internal.h" +enum { + QUIRK_IGNORE_CHECKSUM, +}; + +struct displayid_quirk { + const struct drm_edid_ident ident; + u8 quirks; +}; + +static const struct displayid_quirk quirks[] = { + { + .ident = DRM_EDID_IDENT_INIT('C', 'S', 'O', 5142, "MNE007ZA1-5"), + .quirks = BIT(QUIRK_IGNORE_CHECKSUM), + }, +}; + +static u8 get_quirks(const struct drm_edid *drm_edid) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(quirks); i++) { + if (drm_edid_match(drm_edid, &quirks[i].ident)) + return quirks[i].quirks; + } + + return 0; +} + static const struct displayid_header * displayid_get_header(const u8 *displayid, int length, int index) { @@ -23,7 +51,7 @@ displayid_get_header(const u8 *displayid, int length, int index) } static const struct displayid_header * -validate_displayid(const u8 *displayid, int length, int idx) +validate_displayid(const u8 *displayid, int length, int idx, bool ignore_checksum) { int i, dispid_length; u8 csum = 0; @@ -41,33 +69,35 @@ validate_displayid(const u8 *displayid, int length, int idx) for (i = 0; i < dispid_length; i++) csum += displayid[idx + i]; if (csum) { - DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); - return ERR_PTR(-EINVAL); + DRM_NOTE("DisplayID checksum invalid, remainder is %d%s\n", csum, + ignore_checksum ? " (ignoring)" : ""); + + if (!ignore_checksum) + return ERR_PTR(-EINVAL); } return base; } -static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, - int *length, int *idx, - int *ext_index) +static const u8 *find_next_displayid_extension(struct displayid_iter *iter) { const struct displayid_header *base; const u8 *displayid; + bool ignore_checksum = iter->quirks & BIT(QUIRK_IGNORE_CHECKSUM); - displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index); + displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index); if (!displayid) return NULL; /* EDID extensions block checksum isn't for us */ - *length = EDID_LENGTH - 1; - *idx = 1; + iter->length = EDID_LENGTH - 1; + iter->idx = 1; - base = validate_displayid(displayid, *length, *idx); + base = validate_displayid(displayid, iter->length, iter->idx, ignore_checksum); if (IS_ERR(base)) return NULL; - *length = *idx + sizeof(*base) + base->bytes; + iter->length = iter->idx + sizeof(*base) + base->bytes; return displayid; } @@ -78,6 +108,7 @@ void displayid_iter_edid_begin(const struct drm_edid *drm_edid, memset(iter, 0, sizeof(*iter)); iter->drm_edid = drm_edid; + iter->quirks = get_quirks(drm_edid); } static const struct displayid_block * @@ -126,10 +157,7 @@ __displayid_iter_next(struct displayid_iter *iter) /* The first section we encounter is the base section */ bool base_section = !iter->section; - iter->section = drm_find_displayid_extension(iter->drm_edid, - &iter->length, - &iter->idx, - &iter->ext_index); + iter->section = find_next_displayid_extension(iter); if (!iter->section) { iter->drm_edid = NULL; return NULL; diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h index 957dd0619f5c..5b1b32f73516 100644 --- a/drivers/gpu/drm/drm_displayid_internal.h +++ b/drivers/gpu/drm/drm_displayid_internal.h @@ -167,6 +167,8 @@ struct displayid_iter { u8 version; u8 primary_use; + + u8 quirks; }; void displayid_iter_edid_begin(const struct drm_edid *drm_edid, diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c index 9dc0408fbbea..5b956229c82f 100644 --- a/drivers/gpu/drm/drm_draw.c +++ b/drivers/gpu/drm/drm_draw.c @@ -127,7 +127,7 @@ EXPORT_SYMBOL(drm_draw_fill16); void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, - u16 color) + u32 color) { unsigned int y, x; diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h index f121ee7339dc..20cb404e23ea 100644 --- a/drivers/gpu/drm/drm_draw_internal.h +++ b/drivers/gpu/drm/drm_draw_internal.h @@ -47,7 +47,7 @@ void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch, void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, - u16 color); + u32 color); void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch, unsigned int height, unsigned int width, diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index 70032bba1c97..e2b62e5fb891 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c @@ -25,8 +25,11 @@ #include #include +#include +#include #include #include +#include #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -57,6 +60,134 @@ * a hardware-specific ioctl to allocate suitable buffer objects. */ +static int drm_mode_align_dumb(struct drm_mode_create_dumb *args, + unsigned long hw_pitch_align, + unsigned long hw_size_align) +{ + u32 pitch = args->pitch; + u32 size; + + if (!pitch) + return -EINVAL; + + if (hw_pitch_align) + pitch = roundup(pitch, hw_pitch_align); + + if (!hw_size_align) + hw_size_align = PAGE_SIZE; + else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE)) + return -EINVAL; /* TODO: handle this if necessary */ + + if (check_mul_overflow(args->height, pitch, &size)) + return -EINVAL; + size = ALIGN(size, hw_size_align); + if (!size) + return -EINVAL; + + args->pitch = pitch; + args->size = size; + + return 0; +} + +/** + * drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers + * @dev: DRM device + * @args: Parameters for the dumb buffer + * @hw_pitch_align: Hardware scanline alignment in bytes + * @hw_size_align: Hardware buffer-size alignment in bytes + * + * The helper drm_mode_size_dumb() calculates the size of the buffer + * allocation and the scanline size for a dumb buffer. Callers have to + * set the buffers width, height and color mode in the argument @arg. + * The helper validates the correctness of the input and tests for + * possible overflows. If successful, it returns the dumb buffer's + * required scanline pitch and size in &args. + * + * The parameter @hw_pitch_align allows the driver to specifies an + * alignment for the scanline pitch, if the hardware requires any. The + * calculated pitch will be a multiple of the alignment. The parameter + * @hw_size_align allows to specify an alignment for buffer sizes. The + * provided alignment should represent requirements of the graphics + * hardware. drm_mode_size_dumb() handles GEM-related constraints + * automatically across all drivers and hardware. For example, the + * returned buffer size is always a multiple of PAGE_SIZE, which is + * required by mmap(). + * + * Returns: + * Zero on success, or a negative error code otherwise. + */ +int drm_mode_size_dumb(struct drm_device *dev, + struct drm_mode_create_dumb *args, + unsigned long hw_pitch_align, + unsigned long hw_size_align) +{ + u64 pitch = 0; + u32 fourcc; + + /* + * The scanline pitch depends on the buffer width and the color + * format. The latter is specified as a color-mode constant for + * which we first have to find the corresponding color format. + * + * Different color formats can have the same color-mode constant. + * For example XRGB8888 and BGRX8888 both have a color mode of 32. + * It is possible to use different formats for dumb-buffer allocation + * and rendering as long as all involved formats share the same + * color-mode constant. + */ + fourcc = drm_driver_color_mode_format(dev, args->bpp); + if (fourcc != DRM_FORMAT_INVALID) { + const struct drm_format_info *info = drm_format_info(fourcc); + + if (!info) + return -EINVAL; + pitch = drm_format_info_min_pitch(info, 0, args->width); + } else if (args->bpp) { + /* + * Some userspace throws in arbitrary values for bpp and + * relies on the kernel to figure it out. In this case we + * fall back to the old method of using bpp directly. The + * over-commitment of memory from the rounding is acceptable + * for compatibility with legacy userspace. We have a number + * of deprecated legacy values that are explicitly supported. + */ + switch (args->bpp) { + default: + drm_warn_once(dev, + "Unknown color mode %u; guessing buffer size.\n", + args->bpp); + fallthrough; + /* + * These constants represent various YUV formats supported by + * drm_gem_afbc_get_bpp(). + */ + case 12: // DRM_FORMAT_YUV420_8BIT + case 15: // DRM_FORMAT_YUV420_10BIT + case 30: // DRM_FORMAT_VUY101010 + fallthrough; + /* + * Used by Mesa and Gstreamer to allocate NV formats and others + * as RGB buffers. Technically, XRGB16161616F formats are RGB, + * but the dumb buffers are not supposed to be used for anything + * beyond 32 bits per pixels. + */ + case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010 + case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F + pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8); + break; + } + } + + if (!pitch || pitch > U32_MAX) + return -EINVAL; + + args->pitch = pitch; + + return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align); +} +EXPORT_SYMBOL(drm_mode_size_dumb); + int drm_mode_create_dumb(struct drm_device *dev, struct drm_mode_create_dumb *args, struct drm_file *file_priv) @@ -99,7 +230,30 @@ int drm_mode_create_dumb(struct drm_device *dev, int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - return drm_mode_create_dumb(dev, data, file_priv); + struct drm_mode_create_dumb *args = data; + int err; + + err = drm_mode_create_dumb(dev, args, file_priv); + if (err) { + args->handle = 0; + args->pitch = 0; + args->size = 0; + } + return err; +} + +static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args, + struct drm_file *file_priv) +{ + if (!dev->driver->dumb_create) + return -ENOSYS; + + if (dev->driver->dumb_map_offset) + return dev->driver->dumb_map_offset(file_priv, dev, args->handle, + &args->offset); + else + return drm_gem_dumb_map_offset(file_priv, dev, args->handle, + &args->offset); } /** @@ -120,17 +274,12 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; + int err; - if (!dev->driver->dumb_create) - return -ENOSYS; - - if (dev->driver->dumb_map_offset) - return dev->driver->dumb_map_offset(file_priv, dev, - args->handle, - &args->offset); - else - return drm_gem_dumb_map_offset(file_priv, dev, args->handle, - &args->offset); + err = drm_mode_mmap_dumb(dev, args, file_priv); + if (err) + args->offset = 0; + return err; } int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle, diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 11a5b60cb9ce..53e9dc0543de 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -368,6 +368,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper) unsigned long flags; int ret; + mutex_lock(&helper->lock); + drm_client_modeset_wait_for_vblank(&helper->client, 0); + mutex_unlock(&helper->lock); + if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty)) return; @@ -1068,15 +1072,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - struct drm_crtc *crtc; int ret = 0; - mutex_lock(&fb_helper->lock); - if (!drm_master_internal_acquire(dev)) { - ret = -EBUSY; - goto unlock; - } + guard(mutex)(&fb_helper->lock); switch (cmd) { case FBIO_WAITFORVSYNC: @@ -1096,28 +1094,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, * make. If we're not smart enough here, one should * just consider switch the userspace to KMS. */ - crtc = fb_helper->client.modesets[0].crtc; - - /* - * Only wait for a vblank event if the CRTC is - * enabled, otherwise just don't do anythintg, - * not even report an error. - */ - ret = drm_crtc_vblank_get(crtc); - if (!ret) { - drm_crtc_wait_one_vblank(crtc); - drm_crtc_vblank_put(crtc); - } - - ret = 0; + ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0); break; default: ret = -ENOTTY; } - drm_master_internal_release(dev); -unlock: - mutex_unlock(&fb_helper->lock); return ret; } EXPORT_SYMBOL(drm_fb_helper_ioctl); diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index 8bd626ef16c7..12a8f5a5ada5 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -10,6 +10,7 @@ #include #include #include +#include /* * struct fb_ops @@ -55,10 +56,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info) drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_dma_fb_ops = { @@ -90,10 +89,8 @@ static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info) vfree(shadow); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = { @@ -285,7 +282,7 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -328,7 +325,7 @@ err_drm_client_buffer_vunmap: fb_helper->buffer = NULL; drm_client_buffer_vunmap(buffer); err_drm_client_buffer_delete: - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c index 1e827bf8b815..ac2b22e05cd6 100644 --- a/drivers/gpu/drm/drm_fbdev_shmem.c +++ b/drivers/gpu/drm/drm_fbdev_shmem.c @@ -9,6 +9,7 @@ #include #include #include +#include /* * struct fb_ops @@ -63,10 +64,8 @@ static void drm_fbdev_shmem_fb_destroy(struct fb_info *info) drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_shmem_fb_ops = { @@ -149,7 +148,7 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, sizes->surface_bpp); format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -206,7 +205,7 @@ err_drm_client_buffer_vunmap: fb_helper->buffer = NULL; drm_client_buffer_vunmap(buffer); err_drm_client_buffer_delete: - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c index 85feb55bba11..c7ad779ba590 100644 --- a/drivers/gpu/drm/drm_fbdev_ttm.c +++ b/drivers/gpu/drm/drm_fbdev_ttm.c @@ -50,11 +50,9 @@ static void drm_fbdev_ttm_fb_destroy(struct fb_info *info) fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); vfree(shadow); - drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_buffer_delete(fb_helper->buffer); drm_client_release(&fb_helper->client); - drm_fb_helper_unprepare(fb_helper); - kfree(fb_helper); } static const struct fb_ops drm_fbdev_ttm_fb_ops = { @@ -189,7 +187,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, + buffer = drm_client_buffer_create_dumb(client, sizes->surface_width, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); @@ -202,7 +200,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, screen_buffer = vzalloc(screen_size); if (!screen_buffer) { ret = -ENOMEM; - goto err_drm_client_framebuffer_delete; + goto err_drm_client_buffer_delete; } info = drm_fb_helper_alloc_info(fb_helper); @@ -235,10 +233,10 @@ err_drm_fb_helper_release_info: drm_fb_helper_release_info(fb_helper); err_vfree: vfree(screen_buffer); -err_drm_client_framebuffer_delete: +err_drm_client_buffer_delete: fb_helper->fb = NULL; fb_helper->buffer = NULL; - drm_client_framebuffer_delete(buffer); + drm_client_buffer_delete(buffer); return ret; } EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe); diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 006836554cc2..6cddf05c493b 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -1165,97 +1165,6 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_ } EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444); -/** - * drm_fb_blit - Copy parts of a framebuffer to display memory - * @dst: Array of display-memory addresses to copy to - * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines - * within @dst; can be NULL if scanlines are stored next to each other. - * @dst_format: FOURCC code of the display's color format - * @src: The framebuffer memory to copy from - * @fb: The framebuffer to copy from - * @clip: Clip rectangle area to copy - * @state: Transform and conversion state - * - * This function copies parts of a framebuffer to display memory. If the - * formats of the display and the framebuffer mismatch, the blit function - * will attempt to convert between them during the process. The parameters @dst, - * @dst_pitch and @src refer to arrays. Each array must have at least as many - * entries as there are planes in @dst_format's format. Each entry stores the - * value for the format's respective color plane at the same index. - * - * This function does not apply clipping on @dst (i.e. the destination is at the - * top-left corner). - * - * Returns: - * 0 on success, or - * -EINVAL if the color-format conversion failed, or - * a negative error code otherwise. - */ -int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format, - const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, struct drm_format_conv_state *state) -{ - uint32_t fb_format = fb->format->format; - - if (fb_format == dst_format) { - drm_fb_memcpy(dst, dst_pitch, src, fb, clip); - return 0; - } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (fb_format == DRM_FORMAT_XRGB8888) { - if (dst_format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XRGB1555) { - drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB1555) { - drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGBA5551) { - drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_BGR888) { - drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB8888) { - drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XBGR8888) { - drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ABGR8888) { - drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_XRGB2101010) { - drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_ARGB2101010) { - drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state); - return 0; - } else if (dst_format == DRM_FORMAT_BGRX8888) { - drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state); - return 0; - } else if (dst_format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(dst, dst_pitch, src, fb, clip, state); - return 0; - } - } - - drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n", - &fb_format, &dst_format); - - return -EINVAL; -} -EXPORT_SYMBOL(drm_fb_blit); - static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels) { u8 *dbuf8 = dbuf; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index adbb73f00d68..18e753ade001 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -1048,7 +1048,7 @@ retry: plane_state->crtc->base.id, plane_state->crtc->name, fb->base.id); - crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc); + crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); ret = drm_atomic_add_affected_connectors(state, plane_state->crtc); if (ret) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index f884d155a832..a1a9c828938b 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -101,10 +101,8 @@ drm_gem_init(struct drm_device *dev) vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), GFP_KERNEL); - if (!vma_offset_manager) { - DRM_ERROR("out of memory\n"); + if (!vma_offset_manager) return -ENOMEM; - } dev->vma_offset_manager = vma_offset_manager; drm_vma_offset_manager_init(vma_offset_manager, @@ -785,9 +783,10 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count, int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out) { - int ret; - u32 *handles; + struct drm_device *dev = filp->minor->dev; struct drm_gem_object **objs; + u32 *handles; + int ret; if (!count) return 0; @@ -807,7 +806,7 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { ret = -EFAULT; - DRM_DEBUG("Failed to copy in GEM handles\n"); + drm_dbg_core(dev, "Failed to copy in GEM handles\n"); goto out; } @@ -855,12 +854,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup); long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout) { - long ret; + struct drm_device *dev = filep->minor->dev; struct drm_gem_object *obj; + long ret; obj = drm_gem_object_lookup(filep, handle); if (!obj) { - DRM_DEBUG("Failed to look up GEM BO %d\n", handle); + drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index ebf305fb24f0..cbb029cc656a 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -334,8 +334,6 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane) } shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); - if (!shadow_plane_state) - return; __drm_gem_reset_shadow_plane(plane, shadow_plane_state); } EXPORT_SYMBOL(drm_gem_reset_shadow_plane); diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index 4f0320df858f..12d8307997a0 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -20,7 +20,9 @@ #include #include #include +#include #include +#include #include /** @@ -304,9 +306,11 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct drm_gem_dma_object *dma_obj; + int ret; - args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - args->size = args->pitch * args->height; + ret = drm_mode_size_dumb(drm, args, SZ_8, 0); + if (ret) + return ret; dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size, &args->handle); @@ -582,7 +586,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev, ret = dma_buf_vmap_unlocked(attach->dmabuf, &map); if (ret) { - DRM_ERROR("Failed to vmap PRIME buffer\n"); + drm_err(dev, "Failed to vmap PRIME buffer\n"); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 4bc89d33df59..9fd4eb02a20f 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "drm_internal.h" diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 5d1349c34afd..dc94a27710e5 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -48,6 +49,64 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .vm_ops = &drm_gem_shmem_vm_ops, }; +static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, + size_t size, bool private, struct vfsmount *gemfs) +{ + struct drm_gem_object *obj = &shmem->base; + int ret = 0; + + if (!obj->funcs) + obj->funcs = &drm_gem_shmem_funcs; + + if (private) { + drm_gem_private_object_init(dev, obj, size); + shmem->map_wc = false; /* dma-buf mappings use always writecombine */ + } else { + ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); + } + if (ret) { + drm_gem_private_object_fini(obj); + return ret; + } + + ret = drm_gem_create_mmap_offset(obj); + if (ret) + goto err_release; + + INIT_LIST_HEAD(&shmem->madv_list); + + if (!private) { + /* + * Our buffers are kept pinned, so allocating them + * from the MOVABLE zone is a really bad idea, and + * conflicts with CMA. See comments above new_inode() + * why this is required _and_ expected if you're + * going to pin these pages. + */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + } + + return 0; +err_release: + drm_gem_object_release(obj); + return ret; +} + +/** + * drm_gem_shmem_init - Initialize an allocated object. + * @dev: DRM device + * @obj: The allocated shmem GEM object. + * + * Returns: + * 0 on success, or a negative error code on failure. + */ +int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size) +{ + return __drm_gem_shmem_init(dev, shmem, size, false, NULL); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_init); + static struct drm_gem_shmem_object * __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, struct vfsmount *gemfs) @@ -70,46 +129,13 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private, obj = &shmem->base; } - if (!obj->funcs) - obj->funcs = &drm_gem_shmem_funcs; - - if (private) { - drm_gem_private_object_init(dev, obj, size); - shmem->map_wc = false; /* dma-buf mappings use always writecombine */ - } else { - ret = drm_gem_object_init_with_mnt(dev, obj, size, gemfs); - } + ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs); if (ret) { - drm_gem_private_object_fini(obj); - goto err_free; - } - - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto err_release; - - INIT_LIST_HEAD(&shmem->madv_list); - - if (!private) { - /* - * Our buffers are kept pinned, so allocating them - * from the MOVABLE zone is a really bad idea, and - * conflicts with CMA. See comments above new_inode() - * why this is required _and_ expected if you're - * going to pin these pages. - */ - mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | - __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + kfree(obj); + return ERR_PTR(ret); } return shmem; - -err_release: - drm_gem_object_release(obj); -err_free: - kfree(obj); - - return ERR_PTR(ret); } /** * drm_gem_shmem_create - Allocate an object with the given size @@ -150,13 +176,13 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt); /** - * drm_gem_shmem_free - Free resources associated with a shmem GEM object - * @shmem: shmem GEM object to free + * drm_gem_shmem_release - Release resources associated with a shmem GEM object. + * @shmem: shmem GEM object * - * This function cleans up the GEM object state and frees the memory used to - * store the object itself. + * This function cleans up the GEM object state, but does not free the memory used to store the + * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings. */ -void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; @@ -183,6 +209,19 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } drm_gem_object_release(obj); +} +EXPORT_SYMBOL_GPL(drm_gem_shmem_release); + +/** + * drm_gem_shmem_free - Free resources associated with a shmem GEM object + * @shmem: shmem GEM object to free + * + * This function cleans up the GEM object state and frees the memory used to + * store the object itself. + */ +void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) +{ + drm_gem_shmem_release(shmem); kfree(shmem); } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); @@ -518,18 +557,11 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked); int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + int ret; - if (!args->pitch || !args->size) { - args->pitch = min_pitch; - args->size = PAGE_ALIGN(args->pitch * args->height); - } else { - /* ensure sane minimum values */ - if (args->pitch < min_pitch) - args->pitch = min_pitch; - if (args->size < args->pitch * args->height) - args->size = PAGE_ALIGN(args->pitch * args->height); - } + ret = drm_mode_size_dumb(dev, args, SZ_8, 0); + if (ret) + return ret; return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); } diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 257cca4cb97a..08ff0fadd0b2 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -4,6 +4,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index b04cde4a60e7..5e5b70518dbe 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -107,7 +108,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) { - /* We got here via ttm_bo_put(), which means that the + /* We got here via ttm_bo_fini(), which means that the * TTM buffer object in 'bo' has already been cleaned * up; only release the GEM object. */ @@ -234,11 +235,11 @@ EXPORT_SYMBOL(drm_gem_vram_create); * drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object * @gbo: the GEM VRAM object * - * See ttm_bo_put() for more information. + * See ttm_bo_fini() for more information. */ void drm_gem_vram_put(struct drm_gem_vram_object *gbo) { - ttm_bo_put(&gbo->bo); + ttm_bo_fini(&gbo->bo); } EXPORT_SYMBOL(drm_gem_vram_put); @@ -859,7 +860,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev, dev->anon_inode->i_mapping, dev->vma_offset_manager, - false, true); + TTM_ALLOCATION_POOL_USE_DMA32); if (ret) return ret; @@ -967,7 +968,7 @@ drm_vram_helper_mode_valid_internal(struct drm_device *dev, max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT; - fbsize = mode->hdisplay * mode->vdisplay * max_bpp; + fbsize = (u32)mode->hdisplay * mode->vdisplay * max_bpp; fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); if (fbpages > max_fbpages) diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index cb906765897e..73e550c8ff8c 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1363,7 +1363,8 @@ map_pages: order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages); if (is_device_private_page(page) || is_device_coherent_page(page)) { - if (zdd != page->zone_device_data && i > 0) { + if (!ctx->allow_mixed && + zdd != page->zone_device_data && i > 0) { err = -EOPNOTSUPP; goto err_unmap; } @@ -1399,7 +1400,8 @@ map_pages: } else { dma_addr_t addr; - if (is_zone_device_page(page) || pagemap) { + if (is_zone_device_page(page) || + (pagemap && !ctx->allow_mixed)) { err = -EOPNOTSUPP; goto err_unmap; } diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index af63f4d00315..5f6f46909fc3 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -26,6 +26,7 @@ */ #include +#include #include #include diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index e33c78fc8fbd..00482227a9cd 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include