spi: xilinx: make IRQs optional

Merge series from Abdurrahman Hussain <abdurrahman@nexthop.ai>:

Additionally, make interrupts optional to allow the driver to fall back
to its existing polling mode on systems where interrupts are either missing
or broken.
This commit is contained in:
Mark Brown 2026-01-20 17:58:35 +00:00
commit e73eb6a73c
No known key found for this signature in database
GPG key ID: 24D68B725D5487D0
330 changed files with 3658 additions and 2052 deletions

View file

@ -207,6 +207,7 @@ Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
Daniel Thompson <danielt@kernel.org> <daniel.thompson@linaro.org>
Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com>
David Brownell <david-b@pacbell.net>
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
@ -794,6 +795,7 @@ Sven Eckelmann <sven@narfation.org> <sven.eckelmann@open-mesh.com>
Sven Eckelmann <sven@narfation.org> <sven.eckelmann@openmesh.com>
Sven Eckelmann <sven@narfation.org> <sven@open-mesh.com>
Sven Peter <sven@kernel.org> <sven@svenpeter.dev>
Szymon Wilczek <swilczek.lx@gmail.com> <szymonwilczek@gmx.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Tamizh Chelvam Raja <quic_tamizhr@quicinc.com> <tamizhr@codeaurora.org>
Taniya Das <quic_tdas@quicinc.com> <tdas@codeaurora.org>

View file

@ -2917,6 +2917,41 @@ Kernel parameters
for Movable pages. "nn[KMGTPE]", "nn%", and "mirror"
are exclusive, so you cannot specify multiple forms.
kfence.burst= [MM,KFENCE] The number of additional successive
allocations to be attempted through KFENCE for each
sample interval.
Format: <unsigned integer>
Default: 0
kfence.check_on_panic=
[MM,KFENCE] Whether to check all KFENCE-managed objects'
canaries on panic.
Format: <bool>
Default: false
kfence.deferrable=
[MM,KFENCE] Whether to use a deferrable timer to trigger
allocations. This avoids forcing CPU wake-ups if the
system is idle, at the risk of a less predictable
sample interval.
Format: <bool>
Default: CONFIG_KFENCE_DEFERRABLE
kfence.sample_interval=
[MM,KFENCE] KFENCE's sample interval in milliseconds.
Format: <unsigned integer>
0 - Disable KFENCE.
>0 - Enabled KFENCE with given sample interval.
Default: CONFIG_KFENCE_SAMPLE_INTERVAL
kfence.skip_covered_thresh=
[MM,KFENCE] If pool utilization reaches this threshold
(pool usage%), KFENCE limits currently covered
allocations of the same source from further filling
up the pool.
Format: <unsigned integer>
Default: 75
kgdbdbgp= [KGDB,HW,EARLY] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval]
The controller # is the number of the ehci usb debug

View file

@ -303,6 +303,14 @@ netdev_max_backlog
Maximum number of packets, queued on the INPUT side, when the interface
receives packets faster than kernel can process them.
qdisc_max_burst
------------------
Maximum number of packets that can be temporarily stored before
reaching qdisc.
Default: 1000
netdev_rss_key
--------------

View file

@ -16,7 +16,8 @@ properties:
- brcm,iproc-nic-i2c
reg:
maxItems: 1
minItems: 1
maxItems: 2
clock-frequency:
enum: [ 100000, 400000 ]
@ -41,8 +42,15 @@ allOf:
contains:
const: brcm,iproc-nic-i2c
then:
properties:
reg:
minItems: 2
required:
- brcm,ape-hsls-addr-mask
else:
properties:
reg:
maxItems: 1
unevaluatedProperties: false

View file

@ -56,7 +56,7 @@ properties:
clocks:
minItems: 5
maxItems: 7
maxItems: 6
clock-names:
minItems: 5
@ -67,7 +67,6 @@ properties:
- enum: [rchng, refgen]
- const: pipe
- const: pipediv2
- const: phy_aux
power-domains:
maxItems: 1
@ -180,6 +179,7 @@ allOf:
contains:
enum:
- qcom,glymur-qmp-gen5x4-pcie-phy
- qcom,qcs8300-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x2-pcie-phy
- qcom,sa8775p-qmp-gen4x4-pcie-phy
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
@ -197,19 +197,6 @@ allOf:
clock-names:
minItems: 6
- if:
properties:
compatible:
contains:
enum:
- qcom,qcs8300-qmp-gen4x2-pcie-phy
then:
properties:
clocks:
minItems: 7
clock-names:
minItems: 7
- if:
properties:
compatible:

View file

@ -49,6 +49,10 @@ properties:
items:
- const: mclk
interrupts:
maxItems: 1
description: Headphone detect interrupt
port:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false

View file

@ -47,6 +47,12 @@ properties:
reg:
maxItems: 1
clocks:
maxItems: 1
clock-names:
const: mclk
interrupts:
maxItems: 1
description: The CODEC's interrupt output.
@ -98,6 +104,7 @@ properties:
- 4 # Use GPIO2 for jack-detect
- 5 # Use GPIO3 for jack-detect
- 6 # Use GPIO4 for jack-detect
- 7 # Use HDA header for jack-detect
realtek,jack-detect-not-inverted:
description:
@ -121,6 +128,10 @@ properties:
- 2 # Scale current by 1.0
- 3 # Scale current by 1.5
port:
$ref: audio-graph-port.yaml#
unevaluatedProperties: false
required:
- compatible
- reg

View file

@ -70,6 +70,9 @@ properties:
"#sound-dai-cells":
const: 0
port:
$ref: /schemas/graph.yaml#/properties/port
required:
- compatible
- reg

View file

@ -38,7 +38,6 @@ properties:
required:
- compatible
- reg
- interrupts
unevaluatedProperties: false

View file

@ -48,8 +48,8 @@ properties:
enum: [1, 2]
default: 2
description:
Number of lanes available per direction. Note that it is assume same
number of lanes is used both directions at once.
Number of lanes available per direction. Note that it is assumed that
the same number of lanes are used in both directions at once.
vdd-hba-supply:
description:

View file

@ -406,7 +406,6 @@ allOf:
compatible:
contains:
enum:
- qcom,ipq5018-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8074-dwc3
- qcom,msm8953-dwc3
@ -428,6 +427,7 @@ allOf:
compatible:
contains:
enum:
- qcom,msm8994-dwc3
- qcom,msm8996-dwc3
- qcom,qcs404-dwc3
- qcom,sdm660-dwc3
@ -451,6 +451,7 @@ allOf:
compatible:
contains:
enum:
- qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
then:
properties:
@ -488,7 +489,6 @@ allOf:
enum:
- qcom,ipq4019-dwc3
- qcom,ipq8064-dwc3
- qcom,msm8994-dwc3
- qcom,qcs615-dwc3
- qcom,qcs8300-dwc3
- qcom,qdu1000-dwc3

View file

@ -420,7 +420,6 @@ allOf:
compatible:
contains:
enum:
- qcom,ipq5018-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8074-dwc3
- qcom,msm8953-dwc3
@ -443,6 +442,7 @@ allOf:
compatible:
contains:
enum:
- qcom,msm8994-dwc3
- qcom,msm8996-dwc3
- qcom,qcs404-dwc3
- qcom,sdm660-dwc3
@ -467,6 +467,7 @@ allOf:
compatible:
contains:
enum:
- qcom,ipq5018-dwc3
- qcom,ipq5332-dwc3
then:
properties:
@ -509,7 +510,6 @@ allOf:
- qcom,ipq4019-dwc3
- qcom,ipq8064-dwc3
- qcom,kaanapali-dwc3
- qcom,msm8994-dwc3
- qcom,qcs615-dwc3
- qcom,qcs8300-dwc3
- qcom,qdu1000-dwc3

View file

@ -0,0 +1,175 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
#
# Copyright (c) 2025 Valve Corporation.
#
---
name: dev-energymodel
doc: |
Energy model netlink interface to notify its changes.
protocol: genetlink
uapi-header: linux/dev_energymodel.h
definitions:
-
type: flags
name: perf-state-flags
entries:
-
name: perf-state-inefficient
doc: >-
The performance state is inefficient. There is in this perf-domain,
another performance state with a higher frequency but a lower or
equal power cost.
-
type: flags
name: perf-domain-flags
entries:
-
name: perf-domain-microwatts
doc: >-
The power values are in micro-Watts or some other scale.
-
name: perf-domain-skip-inefficiencies
doc: >-
Skip inefficient states when estimating energy consumption.
-
name: perf-domain-artificial
doc: >-
The power values are artificial and might be created by platform
missing real power information.
attribute-sets:
-
name: perf-domain
doc: >-
Information on a single performance domains.
attributes:
-
name: pad
type: pad
-
name: perf-domain-id
type: u32
doc: >-
A unique ID number for each performance domain.
-
name: flags
type: u64
doc: >-
Bitmask of performance domain flags.
enum: perf-domain-flags
-
name: cpus
type: u64
multi-attr: true
doc: >-
CPUs that belong to this performance domain.
-
name: perf-table
doc: >-
Performance states table.
attributes:
-
name: perf-domain-id
type: u32
doc: >-
A unique ID number for each performance domain.
-
name: perf-state
type: nest
nested-attributes: perf-state
multi-attr: true
-
name: perf-state
doc: >-
Performance state of a performance domain.
attributes:
-
name: pad
type: pad
-
name: performance
type: u64
doc: >-
CPU performance (capacity) at a given frequency.
-
name: frequency
type: u64
doc: >-
The frequency in KHz, for consistency with CPUFreq.
-
name: power
type: u64
doc: >-
The power consumed at this level (by 1 CPU or by a registered
device). It can be a total power: static and dynamic.
-
name: cost
type: u64
doc: >-
The cost coefficient associated with this level, used during energy
calculation. Equal to: power * max_frequency / frequency.
-
name: flags
type: u64
doc: >-
Bitmask of performance state flags.
enum: perf-state-flags
operations:
list:
-
name: get-perf-domains
attribute-set: perf-domain
doc: Get the list of information for all performance domains.
do:
request:
attributes:
- perf-domain-id
reply:
attributes: &perf-domain-attrs
- pad
- perf-domain-id
- flags
- cpus
dump:
reply:
attributes: *perf-domain-attrs
-
name: get-perf-table
attribute-set: perf-table
doc: Get the energy model table of a performance domain.
do:
request:
attributes:
- perf-domain-id
reply:
attributes:
- perf-domain-id
- perf-state
-
name: perf-domain-created
doc: A performance domain is created.
notify: get-perf-table
mcgrp: event
-
name: perf-domain-updated
doc: A performance domain is updated.
notify: get-perf-table
mcgrp: event
-
name: perf-domain-deleted
doc: A performance domain is deleted.
attribute-set: perf-table
event:
attributes:
- perf-domain-id
mcgrp: event
mcast-groups:
list:
-
name: event

View file

@ -1,113 +0,0 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
name: em
doc: |
Energy model netlink interface to notify its changes.
protocol: genetlink
uapi-header: linux/energy_model.h
attribute-sets:
-
name: pds
attributes:
-
name: pd
type: nest
nested-attributes: pd
multi-attr: true
-
name: pd
attributes:
-
name: pad
type: pad
-
name: pd-id
type: u32
-
name: flags
type: u64
-
name: cpus
type: string
-
name: pd-table
attributes:
-
name: pd-id
type: u32
-
name: ps
type: nest
nested-attributes: ps
multi-attr: true
-
name: ps
attributes:
-
name: pad
type: pad
-
name: performance
type: u64
-
name: frequency
type: u64
-
name: power
type: u64
-
name: cost
type: u64
-
name: flags
type: u64
operations:
list:
-
name: get-pds
attribute-set: pds
doc: Get the list of information for all performance domains.
do:
reply:
attributes:
- pd
-
name: get-pd-table
attribute-set: pd-table
doc: Get the energy model table of a performance domain.
do:
request:
attributes:
- pd-id
reply:
attributes:
- pd-id
- ps
-
name: pd-created
doc: A performance domain is created.
notify: get-pd-table
mcgrp: event
-
name: pd-updated
doc: A performance domain is updated.
notify: get-pd-table
mcgrp: event
-
name: pd-deleted
doc: A performance domain is deleted.
attribute-set: pd-table
event:
attributes:
- pd-id
mcgrp: event
mcast-groups:
list:
-
name: event

View file

@ -44,7 +44,7 @@ member and userspace must populate the type member with a value from
struct v4l2_isp_params_buffer *params =
(struct v4l2_isp_params_buffer *)buffer;
params->version = MALI_C55_PARAM_BUFFER_V1;
params->version = V4L2_ISP_PARAMS_VERSION_V1;
params->data_size = 0;
void *data = (void *)params->data;

View file

@ -314,6 +314,7 @@ R: Mauro Carvalho Chehab <mchehab@kernel.org>
R: Shuai Xue <xueshuai@linux.alibaba.com>
L: linux-acpi@vger.kernel.org
F: drivers/acpi/apei/
F: drivers/firmware/efi/cper*
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: "Rafael J. Wysocki" <rafael@kernel.org>
@ -6437,6 +6438,7 @@ F: include/linux/blk-cgroup.h
CONTROL GROUP - CPUSET
M: Waiman Long <longman@redhat.com>
R: Chen Ridong <chenridong@huaweicloud.com>
L: cgroups@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
@ -9320,12 +9322,12 @@ M: Lukasz Luba <lukasz.luba@arm.com>
M: "Rafael J. Wysocki" <rafael@kernel.org>
L: linux-pm@vger.kernel.org
S: Maintained
F: kernel/power/energy_model.c
F: include/linux/energy_model.h
F: Documentation/netlink/specs/dev-energymodel.yaml
F: Documentation/power/energy-model.rst
F: Documentation/netlink/specs/em.yaml
F: include/uapi/linux/energy_model.h
F: include/linux/energy_model.h
F: include/uapi/linux/dev_energymodel.h
F: kernel/power/em_netlink*.*
F: kernel/power/energy_model.c
EPAPR HYPERVISOR BYTE CHANNEL DEVICE DRIVER
M: Laurentiu Tudor <laurentiu.tudor@nxp.com>
@ -9533,6 +9535,7 @@ F: arch/arm/boot/compressed/efi-header.S
F: arch/x86/platform/efi/
F: drivers/firmware/efi/
F: include/linux/efi*.h
X: drivers/firmware/efi/cper*
EXTERNAL CONNECTOR SUBSYSTEM (EXTCON)
M: MyungJoo Ham <myungjoo.ham@samsung.com>
@ -14897,6 +14900,7 @@ LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com>
M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
M: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com>
M: Ranjan Kumar <ranjan.kumar@broadcom.com>
L: MPT-FusionLinux.pdl@broadcom.com
L: linux-scsi@vger.kernel.org
S: Supported
@ -18440,9 +18444,11 @@ M: Jakub Kicinski <kuba@kernel.org>
M: Sabrina Dubroca <sd@queasysnail.net>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/tls*
F: include/net/tls.h
F: include/uapi/linux/tls.h
F: net/tls/*
F: net/tls/
F: tools/testing/selftests/net/tls.c
NETWORKING [SOCKETS]
M: Eric Dumazet <edumazet@google.com>

View file

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 19
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View file

@ -131,6 +131,7 @@
reg-names = "main", "isr0";
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <2>;
@ -149,6 +150,7 @@
reg-names = "main", "isr0";
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <4>;
@ -164,6 +166,7 @@
compatible = "loongson,ls2k0500-eiointc";
reg = <0x0 0x1fe11600 0x0 0xea00>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-parent = <&cpuintc>;
interrupts = <3>;

View file

@ -46,7 +46,7 @@
};
/* i2c of the dvi eeprom edid */
i2c-gpio-0 {
i2c-0 {
compatible = "i2c-gpio";
scl-gpios = <&gpio0 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio0 1 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@ -57,7 +57,7 @@
};
/* i2c of the eeprom edid */
i2c-gpio-1 {
i2c-1 {
compatible = "i2c-gpio";
scl-gpios = <&gpio0 33 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio0 32 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@ -114,6 +114,7 @@
<0x0 0x1fe01140 0x0 0x8>;
reg-names = "main", "isr0", "isr1";
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <2>;
@ -131,6 +132,7 @@
<0x0 0x1fe01148 0x0 0x8>;
reg-names = "main", "isr0", "isr1";
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <3>;
@ -437,54 +439,47 @@
gmac0: ethernet@3,0 {
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupt-parent = <&liointc0>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
<13 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&liointc0 12 IRQ_TYPE_LEVEL_HIGH>,
<&liointc0 13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
status = "disabled";
};
gmac1: ethernet@3,1 {
reg = <0x1900 0x0 0x0 0x0 0x0>;
interrupt-parent = <&liointc0>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
<15 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&liointc0 14 IRQ_TYPE_LEVEL_HIGH>,
<&liointc0 15 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
status = "disabled";
};
ehci0: usb@4,1 {
reg = <0x2100 0x0 0x0 0x0 0x0>;
interrupt-parent = <&liointc1>;
interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&liointc1 18 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
ohci0: usb@4,2 {
reg = <0x2200 0x0 0x0 0x0 0x0>;
interrupt-parent = <&liointc1>;
interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&liointc1 19 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
display@6,0 {
reg = <0x3000 0x0 0x0 0x0 0x0>;
interrupt-parent = <&liointc0>;
interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&liointc0 28 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
hda@7,0 {
reg = <0x3800 0x0 0x0 0x0 0x0>;
interrupt-parent = <&liointc0>;
interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&liointc0 4 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
sata: sata@8,0 {
reg = <0x4000 0x0 0x0 0x0 0x0>;
interrupt-parent = <&liointc0>;
interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&liointc0 19 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};

View file

@ -126,6 +126,7 @@
reg = <0x0 0x1fe01400 0x0 0x64>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
interrupt-parent = <&cpuintc>;
interrupts = <2>;
@ -140,6 +141,7 @@
compatible = "loongson,ls2k2000-eiointc";
reg = <0x0 0x1fe01600 0x0 0xea00>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-parent = <&cpuintc>;
interrupts = <3>;
@ -149,6 +151,7 @@
compatible = "loongson,pch-pic-1.0";
reg = <0x0 0x10000000 0x0 0x400>;
interrupt-controller;
#address-cells = <0>;
#interrupt-cells = <2>;
loongson,pic-base-vec = <0>;
interrupt-parent = <&eiointc>;
@ -291,65 +294,57 @@
gmac0: ethernet@3,0 {
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
<13 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&pic 12 IRQ_TYPE_LEVEL_HIGH>,
<&pic 13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
gmac1: ethernet@3,1 {
reg = <0x1900 0x0 0x0 0x0 0x0>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
<15 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&pic 14 IRQ_TYPE_LEVEL_HIGH>,
<&pic 15 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
gmac2: ethernet@3,2 {
reg = <0x1a00 0x0 0x0 0x0 0x0>;
interrupts = <17 IRQ_TYPE_LEVEL_HIGH>,
<18 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&pic 17 IRQ_TYPE_LEVEL_HIGH>,
<&pic 18 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&pic>;
status = "disabled";
};
xhci0: usb@4,0 {
reg = <0x2000 0x0 0x0 0x0 0x0>;
interrupts = <48 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&pic>;
interrupts-extended = <&pic 48 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
xhci1: usb@19,0 {
reg = <0xc800 0x0 0x0 0x0 0x0>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&pic>;
interrupts-extended = <&pic 22 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
display@6,1 {
reg = <0x3100 0x0 0x0 0x0 0x0>;
interrupts = <28 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&pic>;
interrupts-extended = <&pic 28 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
i2s@7,0 {
reg = <0x3800 0x0 0x0 0x0 0x0>;
interrupts = <78 IRQ_TYPE_LEVEL_HIGH>,
<79 IRQ_TYPE_LEVEL_HIGH>;
interrupts-extended = <&pic 78 IRQ_TYPE_LEVEL_HIGH>,
<&pic 79 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "tx", "rx";
interrupt-parent = <&pic>;
status = "disabled";
};
sata: sata@8,0 {
reg = <0x4000 0x0 0x0 0x0 0x0>;
interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&pic>;
interrupts-extended = <&pic 16 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};

View file

@ -126,14 +126,6 @@ SYM_CODE_START(smpboot_entry)
LONG_LI t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
/* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
csrwr t0, LOONGARCH_CSR_PRMD
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr t0, LOONGARCH_CSR_EUEN
la.pcrel t0, cpuboot_data
ld.d sp, t0, CPU_BOOT_STACK
ld.d tp, t0, CPU_BOOT_TINFO

View file

@ -626,6 +626,18 @@ static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 conf
return pev;
}
static inline bool loongarch_pmu_event_requires_counter(const struct perf_event *event)
{
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
case PERF_TYPE_RAW:
return true;
default:
return false;
}
}
static int validate_group(struct perf_event *event)
{
struct cpu_hw_events fake_cpuc;
@ -633,15 +645,18 @@ static int validate_group(struct perf_event *event)
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
if (loongarch_pmu_event_requires_counter(leader) &&
loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
return -EINVAL;
for_each_sibling_event(sibling, leader) {
if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
if (loongarch_pmu_event_requires_counter(sibling) &&
loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
return -EINVAL;
}
if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
if (loongarch_pmu_event_requires_counter(event) &&
loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
return -EINVAL;
return 0;

View file

@ -679,6 +679,7 @@ static void kvm_eiointc_destroy(struct kvm_device *dev)
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device);
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &eiointc->device_vext);
kfree(eiointc);
kfree(dev);
}
static struct kvm_device_ops kvm_eiointc_dev_ops = {

View file

@ -459,6 +459,7 @@ static void kvm_ipi_destroy(struct kvm_device *dev)
ipi = kvm->arch.ipi;
kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, &ipi->device);
kfree(ipi);
kfree(dev);
}
static struct kvm_device_ops kvm_ipi_dev_ops = {

View file

@ -475,6 +475,7 @@ static void kvm_pch_pic_destroy(struct kvm_device *dev)
/* unregister pch pic device and free it's memory */
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &s->device);
kfree(s);
kfree(dev);
}
static struct kvm_device_ops kvm_pch_pic_dev_ops = {

View file

@ -425,6 +425,28 @@ void __init paging_init(void)
static struct kcore_list kcore_kseg0;
#endif
static inline void __init highmem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
/*
* If CPU cannot support HIGHMEM discard the memory above highstart_pfn
*/
if (cpu_has_dc_aliases) {
memblock_remove(PFN_PHYS(highstart_pfn), -1);
return;
}
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
struct page *page = pfn_to_page(tmp);
if (!memblock_is_memory(PFN_PHYS(tmp)))
SetPageReserved(page);
}
#endif
}
void __init arch_mm_preinit(void)
{
/*
@ -435,6 +457,7 @@ void __init arch_mm_preinit(void)
maar_init();
setup_zero_pages(); /* Setup zeroed pages. */
highmem_init();
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)

View file

@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/processor.h>
#include <linux/smp.h>
#include <linux/sys_info.h>
#include <asm/interrupt.h>
#include <asm/paca.h>
@ -235,7 +236,11 @@ static void watchdog_smp_panic(int cpu)
pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n",
cpu, tb, last_reset, tb_to_ns(tb - last_reset) / 1000000);
if (!sysctl_hardlockup_all_cpu_backtrace) {
if (sysctl_hardlockup_all_cpu_backtrace ||
(hardlockup_si_mask & SYS_INFO_ALL_BT)) {
trigger_allbutcpu_cpu_backtrace(cpu);
cpumask_clear(&wd_smp_cpus_ipi);
} else {
/*
* Try to trigger the stuck CPUs, unless we are going to
* get a backtrace on all of them anyway.
@ -244,11 +249,9 @@ static void watchdog_smp_panic(int cpu)
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
__cpumask_clear_cpu(c, &wd_smp_cpus_ipi);
}
} else {
trigger_allbutcpu_cpu_backtrace(cpu);
cpumask_clear(&wd_smp_cpus_ipi);
}
sys_info(hardlockup_si_mask & ~SYS_INFO_ALL_BT);
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
@ -415,9 +418,11 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
xchg(&__wd_nmi_output, 1); // see wd_lockup_ipi
if (sysctl_hardlockup_all_cpu_backtrace)
if (sysctl_hardlockup_all_cpu_backtrace ||
(hardlockup_si_mask & SYS_INFO_ALL_BT))
trigger_allbutcpu_cpu_backtrace(cpu);
sys_info(hardlockup_si_mask & ~SYS_INFO_ALL_BT);
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");

View file

@ -1133,10 +1133,6 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
store_args(nr_arg_slots, args_off, ctx);
/* skip to actual body of traced function */
if (flags & BPF_TRAMP_F_ORIG_STACK)
orig_call += RV_FENTRY_NINSNS * 4;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
@ -1171,6 +1167,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* skip to actual body of traced function */
orig_call += RV_FENTRY_NINSNS * 4;
restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx);
restore_stack_args(nr_arg_slots - RV_MAX_REG_ARGS, args_off, stk_arg_off, ctx);
ret = emit_call((const u64)orig_call, true, ctx);

View file

@ -825,7 +825,8 @@ static __init bool get_mem_config(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return __get_mem_config_intel(&hw_res->r_resctrl);
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
return false;
@ -987,7 +988,8 @@ static __init void rdt_init_res_defs(void)
{
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
rdt_init_res_defs_intel();
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
rdt_init_res_defs_amd();
}
@ -1019,8 +1021,19 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)
c->x86_cache_occ_scale = ebx;
c->x86_cache_mbm_width_offset = eax & 0xff;
if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
if (!c->x86_cache_mbm_width_offset) {
switch (c->x86_vendor) {
case X86_VENDOR_AMD:
c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
break;
case X86_VENDOR_HYGON:
c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_HYGON;
break;
default:
/* Leave c->x86_cache_mbm_width_offset as 0 */
break;
}
}
}
}

View file

@ -14,6 +14,9 @@
#define MBM_CNTR_WIDTH_OFFSET_AMD 20
/* Hygon MBM counter width as an offset from MBM_CNTR_WIDTH_BASE */
#define MBM_CNTR_WIDTH_OFFSET_HYGON 8
#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)

View file

@ -319,10 +319,29 @@ EXPORT_SYMBOL_FOR_KVM(fpu_enable_guest_xfd_features);
#ifdef CONFIG_X86_64
void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
{
struct fpstate *fpstate = guest_fpu->fpstate;
fpregs_lock();
guest_fpu->fpstate->xfd = xfd;
if (guest_fpu->fpstate->in_use)
xfd_update_state(guest_fpu->fpstate);
/*
* KVM's guest ABI is that setting XFD[i]=1 *can* immediately revert the
* save state to its initial configuration. Likewise, KVM_GET_XSAVE does
* the same as XSAVE and returns XSTATE_BV[i]=0 whenever XFD[i]=1.
*
* If the guest's FPU state is in hardware, just update XFD: the XSAVE
* in fpu_swap_kvm_fpstate will clear XSTATE_BV[i] whenever XFD[i]=1.
*
* If however the guest's FPU state is NOT resident in hardware, clear
* disabled components in XSTATE_BV now, or a subsequent XRSTOR will
* attempt to load disabled components and generate #NM _in the host_.
*/
if (xfd && test_thread_flag(TIF_NEED_FPU_LOAD))
fpstate->regs.xsave.header.xfeatures &= ~xfd;
fpstate->xfd = xfd;
if (fpstate->in_use)
xfd_update_state(fpstate);
fpregs_unlock();
}
EXPORT_SYMBOL_FOR_KVM(fpu_update_guest_xfd);
@ -430,6 +449,13 @@ int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
if (ustate->xsave.header.xfeatures & ~xcr0)
return -EINVAL;
/*
* Disabled features must be in their initial state, otherwise XRSTOR
* causes an exception.
*/
if (WARN_ON_ONCE(ustate->xsave.header.xfeatures & kstate->xfd))
return -EINVAL;
/*
* Nullify @vpkru to preserve its current value if PKRU's bit isn't set
* in the header. KVM's odd ABI is to leave PKRU untouched in this

View file

@ -89,6 +89,7 @@ struct kvm_task_sleep_node {
struct swait_queue_head wq;
u32 token;
int cpu;
bool dummy;
};
static struct kvm_task_sleep_head {
@ -120,15 +121,26 @@ static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
raw_spin_lock(&b->lock);
e = _find_apf_task(b, token);
if (e) {
/* dummy entry exist -> wake up was delivered ahead of PF */
hlist_del(&e->link);
struct kvm_task_sleep_node *dummy = NULL;
/*
* The entry can either be a 'dummy' entry (which is put on the
* list when wake-up happens ahead of APF handling completion)
* or a token from another task which should not be touched.
*/
if (e->dummy) {
hlist_del(&e->link);
dummy = e;
}
raw_spin_unlock(&b->lock);
kfree(e);
kfree(dummy);
return false;
}
n->token = token;
n->cpu = smp_processor_id();
n->dummy = false;
init_swait_queue_head(&n->wq);
hlist_add_head(&n->link, &b->list);
raw_spin_unlock(&b->lock);
@ -231,6 +243,7 @@ again:
}
dummy->token = token;
dummy->cpu = smp_processor_id();
dummy->dummy = true;
init_swait_queue_head(&dummy->wq);
hlist_add_head(&dummy->link, &b->list);
dummy = NULL;

View file

@ -5807,9 +5807,18 @@ static int kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
union fpregs_state *xstate = (union fpregs_state *)guest_xsave->region;
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
return vcpu->kvm->arch.has_protected_state ? -EINVAL : 0;
/*
* For backwards compatibility, do not expect disabled features to be in
* their initial state. XSTATE_BV[i] must still be cleared whenever
* XFD[i]=1, or XRSTOR would cause a #NM.
*/
xstate->xsave.header.xfeatures &= ~vcpu->arch.guest_fpu.fpstate->xfd;
return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
guest_xsave->region,
kvm_caps.supported_xcr0,

View file

@ -115,12 +115,12 @@ void __init kernel_randomize_memory(void)
/*
* Adapt physical memory region size based on available memory,
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
* device BAR space assuming the direct map space is large enough
* for creating a ZONE_DEVICE mapping in the direct map corresponding
* to the physical BAR address.
* except when CONFIG_ZONE_DEVICE is enabled. ZONE_DEVICE wants to map
* any physical address into the direct-map. KASLR wants to reliably
* steal some physical address bits. Those design choices are in direct
* conflict.
*/
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
if (!IS_ENABLED(CONFIG_ZONE_DEVICE) && (memory_tb < kaslr_regions[0].size_tb))
kaslr_regions[0].size_tb = memory_tb;
/*

View file

@ -140,7 +140,7 @@ bool bio_integrity_prep(struct bio *bio)
return true;
set_flags = false;
gfp |= __GFP_ZERO;
} else if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
} else if (bi->metadata_size > bi->pi_tuple_size)
gfp |= __GFP_ZERO;
break;
default:

View file

@ -28,6 +28,10 @@ static bool sleep_no_lps0 __read_mostly;
module_param(sleep_no_lps0, bool, 0644);
MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
static bool check_lps0_constraints __read_mostly;
module_param(check_lps0_constraints, bool, 0644);
MODULE_PARM_DESC(check_lps0_constraints, "Check LPS0 device constraints");
static const struct acpi_device_id lps0_device_ids[] = {
{"PNP0D80", },
{"", },
@ -515,7 +519,8 @@ static struct acpi_scan_handler lps0_handler = {
static int acpi_s2idle_begin_lps0(void)
{
if (pm_debug_messages_on && !lpi_constraints_table) {
if (lps0_device_handle && !sleep_no_lps0 && check_lps0_constraints &&
!lpi_constraints_table) {
if (acpi_s2idle_vendor_amd())
lpi_device_get_constraints_amd();
else
@ -539,7 +544,7 @@ static int acpi_s2idle_prepare_late_lps0(void)
if (!lps0_device_handle || sleep_no_lps0)
return 0;
if (pm_debug_messages_on)
if (check_lps0_constraints)
lpi_check_constraints();
/* Screen off */

View file

@ -665,12 +665,22 @@ static void nullb_add_fault_config(struct nullb_device *dev)
configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group);
}
static void nullb_del_fault_config(struct nullb_device *dev)
{
config_item_put(&dev->init_hctx_fault_config.group.cg_item);
config_item_put(&dev->requeue_config.group.cg_item);
config_item_put(&dev->timeout_config.group.cg_item);
}
#else
static void nullb_add_fault_config(struct nullb_device *dev)
{
}
static void nullb_del_fault_config(struct nullb_device *dev)
{
}
#endif
static struct
@ -702,7 +712,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item)
null_del_dev(dev->nullb);
mutex_unlock(&lock);
}
nullb_del_fault_config(dev);
config_item_put(item);
}

View file

@ -1662,7 +1662,6 @@ static void destroy_sysfs(struct rnbd_clt_dev *dev,
/* To avoid deadlock firstly remove itself */
sysfs_remove_file_self(&dev->kobj, sysfs_self);
kobject_del(&dev->kobj);
kobject_put(&dev->kobj);
}
}

View file

@ -75,9 +75,16 @@ EXPORT_SYMBOL_FOR_MODULES(cxl_do_xormap_calc, "cxl_translate");
static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
{
struct cxl_cxims_data *cximsd = cxlrd->platform_data;
int hbiw = cxlrd->cxlsd.nr_targets;
struct cxl_cxims_data *cximsd;
return cxl_do_xormap_calc(cximsd, addr, cxlrd->cxlsd.nr_targets);
/* No xormaps for host bridge interleave ways of 1 or 3 */
if (hbiw == 1 || hbiw == 3)
return addr;
cximsd = cxlrd->platform_data;
return cxl_do_xormap_calc(cximsd, addr, hbiw);
}
struct cxl_cxims_context {

View file

@ -403,7 +403,7 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
* is not set.
*/
if (cxled->part < 0)
for (int i = 0; cxlds->nr_partitions; i++)
for (int i = 0; i < cxlds->nr_partitions; i++)
if (resource_contains(&cxlds->part[i].res, res)) {
cxled->part = i;
break;
@ -530,7 +530,7 @@ resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled)
resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled)
{
resource_size_t base = -1;
resource_size_t base = RESOURCE_SIZE_MAX;
lockdep_assert_held(&cxl_rwsem.dpa);
if (cxled->dpa_res)

View file

@ -1590,7 +1590,7 @@ static int update_decoder_targets(struct device *dev, void *data)
cxlsd->target[i] = dport;
dev_dbg(dev, "dport%d found in target list, index %d\n",
dport->port_id, i);
return 1;
return 0;
}
}

View file

@ -759,7 +759,7 @@ static ssize_t extended_linear_cache_size_show(struct device *dev,
ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
return rc;
return sysfs_emit(buf, "%#llx\n", p->cache_size);
return sysfs_emit(buf, "%pap\n", &p->cache_size);
}
static DEVICE_ATTR_RO(extended_linear_cache_size);
@ -3118,7 +3118,7 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled = NULL;
u64 dpa_offset, hpa_offset, hpa;
u64 base, dpa_offset, hpa_offset, hpa;
u16 eig = 0;
u8 eiw = 0;
int pos;
@ -3136,8 +3136,14 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
ways_to_eiw(p->interleave_ways, &eiw);
granularity_to_eig(p->interleave_granularity, &eig);
dpa_offset = dpa - cxl_dpa_resource_start(cxled);
base = cxl_dpa_resource_start(cxled);
if (base == RESOURCE_SIZE_MAX)
return ULLONG_MAX;
dpa_offset = dpa - base;
hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, eiw, eig);
if (hpa_offset == ULLONG_MAX)
return ULLONG_MAX;
/* Apply the hpa_offset to the region base address */
hpa = hpa_offset + p->res->start + p->cache_size;
@ -3146,6 +3152,9 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
if (cxlrd->ops.hpa_to_spa)
hpa = cxlrd->ops.hpa_to_spa(cxlrd, hpa);
if (hpa == ULLONG_MAX)
return ULLONG_MAX;
if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
"Addr trans fail: hpa 0x%llx not in region\n", hpa);
@ -3170,7 +3179,8 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
struct cxl_region_params *p = &cxlr->params;
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
struct cxl_endpoint_decoder *cxled;
u64 hpa, hpa_offset, dpa_offset;
u64 hpa_offset = offset;
u64 dpa, dpa_offset;
u16 eig = 0;
u8 eiw = 0;
int pos;
@ -3187,10 +3197,13 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
* CXL HPA is assumed to equal SPA.
*/
if (cxlrd->ops.spa_to_hpa) {
hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
hpa_offset = hpa - p->res->start;
} else {
hpa_offset = offset;
hpa_offset = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
if (hpa_offset == ULLONG_MAX) {
dev_dbg(&cxlr->dev, "HPA not found for %pr offset %#llx\n",
p->res, offset);
return -ENXIO;
}
hpa_offset -= p->res->start;
}
pos = cxl_calculate_position(hpa_offset, eiw, eig);
@ -3207,8 +3220,13 @@ static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
cxled = p->targets[i];
if (cxled->pos != pos)
continue;
dpa = cxl_dpa_resource_start(cxled);
if (dpa != RESOURCE_SIZE_MAX)
dpa += dpa_offset;
result->cxlmd = cxled_to_memdev(cxled);
result->dpa = cxl_dpa_resource_start(cxled) + dpa_offset;
result->dpa = dpa;
return 0;
}

View file

@ -67,14 +67,16 @@ struct dev_dax_range {
/**
* struct dev_dax - instance data for a subdivision of a dax region, and
* data while the device is activated in the driver.
* @region - parent region
* @dax_dev - core dax functionality
* @region: parent region
* @dax_dev: core dax functionality
* @align: alignment of this instance
* @target_node: effective numa node if dev_dax memory range is onlined
* @dyn_id: is this a dynamic or statically created instance
* @id: ida allocated id when the dax_region is not static
* @ida: mapping id allocator
* @dev - device core
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
* @dev: device core
* @pgmap: pgmap for memmap setup / lifetime (driver owned)
* @memmap_on_memory: allow kmem to put the memmap in the memory
* @nr_range: size of @ranges
* @ranges: range tuples of memory used
*/

View file

@ -936,6 +936,7 @@ static void admac_remove(struct platform_device *pdev)
}
static const struct of_device_id admac_of_match[] = {
{ .compatible = "apple,t8103-admac", },
{ .compatible = "apple,admac", },
{ }
};

View file

@ -1765,6 +1765,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
static void atc_free_chan_resources(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave;
BUG_ON(atc_chan_is_enabled(atchan));
@ -1774,8 +1775,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
/*
* Free atslave allocated in at_dma_xlate()
*/
kfree(chan->private);
chan->private = NULL;
atslave = chan->private;
if (atslave) {
put_device(atslave->dma_dev);
kfree(atslave);
chan->private = NULL;
}
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}

View file

@ -1699,7 +1699,7 @@ static int sba_probe(struct platform_device *pdev)
/* Prealloc channel resource */
ret = sba_prealloc_channel_resources(sba);
if (ret)
goto fail_free_mchan;
goto fail_put_mbox;
/* Check availability of debugfs */
if (!debugfs_initialized())
@ -1729,6 +1729,8 @@ skip_debugfs:
fail_free_resources:
debugfs_remove_recursive(sba->root);
sba_freeup_channel_resources(sba);
fail_put_mbox:
put_device(sba->mbox_dev);
fail_free_mchan:
mbox_free_channel(sba->mchan);
return ret;
@ -1744,6 +1746,8 @@ static void sba_remove(struct platform_device *pdev)
sba_freeup_channel_resources(sba);
put_device(sba->mbox_dev);
mbox_free_channel(sba->mchan);
}

View file

@ -102,11 +102,11 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
struct llist_node *node;
unsigned long flags;
unsigned int chid, devid, cpuid;
int ret;
int ret = -EINVAL;
if (dma_spec->args_count != DMAMUX_NCELLS) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
devid = dma_spec->args[0];
@ -115,18 +115,18 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (devid > MAX_DMA_MAPPING_ID) {
dev_err(&pdev->dev, "invalid device id: %u\n", devid);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
if (cpuid > MAX_DMA_CPU_ID) {
dev_err(&pdev->dev, "invalid cpu id: %u\n", cpuid);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@ -136,8 +136,6 @@ static void *cv1800_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (map->peripheral == devid && map->cpu == cpuid)
goto found;
}
ret = -EINVAL;
goto failed;
} else {
node = llist_del_first(&dmamux->free_maps);
@ -171,12 +169,17 @@ found:
dev_dbg(&pdev->dev, "register channel %u for req %u (cpu %u)\n",
chid, devid, cpuid);
put_device(&pdev->dev);
return map;
failed:
spin_unlock_irqrestore(&dmamux->lock, flags);
of_node_put(dma_spec->np);
dev_err(&pdev->dev, "errno %d\n", ret);
err_put_pdev:
put_device(&pdev->dev);
return ERR_PTR(ret);
}

View file

@ -90,7 +90,7 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
if (test_and_set_bit(map->req_idx, dmamux->used_chans)) {
ret = -EBUSY;
goto free_map;
goto put_dma_spec_np;
}
mask = BIT(map->req_idx);
@ -103,6 +103,8 @@ static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec,
clear_bitmap:
clear_bit(map->req_idx, dmamux->used_chans);
put_dma_spec_np:
of_node_put(dma_spec->np);
free_map:
kfree(map);
put_device:

View file

@ -873,6 +873,7 @@ err_errirq:
free_irq(fsl_chan->txirq, fsl_chan);
err_txirq:
dma_pool_destroy(fsl_chan->tcd_pool);
clk_disable_unprepare(fsl_chan->clk);
return ret;
}

View file

@ -20,11 +20,16 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, size_t c
int rc = -ENODEV;
dev = bus_find_device_by_name(bus, NULL, buf);
if (dev && dev->driver) {
if (!dev)
return -ENODEV;
if (dev->driver) {
device_driver_detach(dev);
rc = count;
}
put_device(dev);
return rc;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
@ -38,9 +43,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t cou
struct idxd_dev *idxd_dev;
dev = bus_find_device_by_name(bus, NULL, buf);
if (!dev || dev->driver || drv != &dsa_drv.drv)
if (!dev)
return -ENODEV;
if (dev->driver || drv != &dsa_drv.drv)
goto err_put_dev;
idxd_dev = confdev_to_idxd_dev(dev);
if (is_idxd_dev(idxd_dev)) {
alt_drv = driver_find("idxd", bus);
@ -53,13 +61,20 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, size_t cou
alt_drv = driver_find("user", bus);
}
if (!alt_drv)
return -ENODEV;
goto err_put_dev;
rc = device_driver_attach(alt_drv, dev);
if (rc < 0)
return rc;
goto err_put_dev;
put_device(dev);
return count;
err_put_dev:
put_device(dev);
return rc;
}
static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);

View file

@ -57,30 +57,31 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
unsigned long flags;
unsigned mux;
int ret = -EINVAL;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
mux = dma_spec->args[0];
if (mux >= dmamux->dma_master_requests) {
dev_err(&pdev->dev, "invalid mux number: %d\n",
dma_spec->args[0]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) {
dev_err(&pdev->dev, "invalid dma mux value: %d\n",
dma_spec->args[1]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@ -89,7 +90,8 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_err(&pdev->dev, "dma request %u busy with %u.%u\n",
mux, mux, dmamux->muxes[mux].value);
of_node_put(dma_spec->np);
return ERR_PTR(-EBUSY);
ret = -EBUSY;
goto err_put_pdev;
}
dmamux->muxes[mux].busy = true;
@ -106,7 +108,14 @@ static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux,
dmamux->muxes[mux].value, mux);
put_device(&pdev->dev);
return &dmamux->muxes[mux];
err_put_pdev:
put_device(&pdev->dev);
return ERR_PTR(ret);
}
static int lpc18xx_dmamux_probe(struct platform_device *pdev)

View file

@ -95,11 +95,12 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
struct lpc32xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
unsigned long flags;
struct lpc32xx_dmamux *mux = NULL;
int ret = -EINVAL;
int i;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
for (i = 0; i < ARRAY_SIZE(lpc32xx_muxes); i++) {
@ -111,20 +112,20 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
if (!mux) {
dev_err(&pdev->dev, "invalid mux request number: %d\n",
dma_spec->args[0]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
if (dma_spec->args[2] > 1) {
dev_err(&pdev->dev, "invalid dma mux value: %d\n",
dma_spec->args[1]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
@ -133,7 +134,8 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_err(dev, "dma request signal %d busy, routed to %s\n",
mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
of_node_put(dma_spec->np);
return ERR_PTR(-EBUSY);
ret = -EBUSY;
goto err_put_pdev;
}
mux->busy = true;
@ -148,7 +150,14 @@ static void *lpc32xx_dmamux_reserve(struct of_phandle_args *dma_spec,
dev_dbg(dev, "dma request signal %d routed to %s\n",
mux->signal, mux->muxval ? mux->name_sel1 : mux->name_sel1);
put_device(&pdev->dev);
return mux;
err_put_pdev:
put_device(&pdev->dev);
return ERR_PTR(ret);
}
static int lpc32xx_dmamux_probe(struct platform_device *pdev)

View file

@ -152,8 +152,8 @@ struct mmp_pdma_phy {
*
* Controller Configuration:
* @run_bits: Control bits in DCSR register for channel start/stop
* @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
* settings, or explicit mask like DMA_BIT_MASK(32/64)
* @dma_width: DMA addressing width in bits (32 or 64). Determines the
* DMA mask capability of the controller hardware.
*/
struct mmp_pdma_ops {
/* Hardware Register Operations */
@ -173,7 +173,7 @@ struct mmp_pdma_ops {
/* Controller Configuration */
u32 run_bits;
u64 dma_mask;
u32 dma_width;
};
struct mmp_pdma_device {
@ -928,6 +928,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
{
struct mmp_pdma_desc_sw *sw;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
unsigned long flags;
u64 curr;
u32 residue = 0;
bool passed = false;
@ -945,6 +946,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
else
curr = pdev->ops->read_src_addr(chan->phy);
spin_lock_irqsave(&chan->desc_lock, flags);
list_for_each_entry(sw, &chan->chain_running, node) {
u64 start, end;
u32 len;
@ -989,6 +992,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
continue;
if (sw->async_tx.cookie == cookie) {
spin_unlock_irqrestore(&chan->desc_lock, flags);
return residue;
} else {
residue = 0;
@ -996,6 +1000,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
}
}
spin_unlock_irqrestore(&chan->desc_lock, flags);
/* We should only get here in case of cyclic transactions */
return residue;
}
@ -1172,7 +1178,7 @@ static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
.get_desc_src_addr = get_desc_src_addr_32,
.get_desc_dst_addr = get_desc_dst_addr_32,
.run_bits = (DCSR_RUN),
.dma_mask = 0, /* let OF/platform set DMA mask */
.dma_width = 32,
};
static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
@ -1185,7 +1191,7 @@ static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
.get_desc_src_addr = get_desc_src_addr_64,
.get_desc_dst_addr = get_desc_dst_addr_64,
.run_bits = (DCSR_RUN | DCSR_LPAEEN),
.dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
.dma_width = 64,
};
static const struct of_device_id mmp_pdma_dt_ids[] = {
@ -1314,13 +1320,9 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
/* Set DMA mask based on ops->dma_mask, or OF/platform */
if (pdev->ops->dma_mask)
dma_set_mask(pdev->dev, pdev->ops->dma_mask);
else if (pdev->dev->coherent_dma_mask)
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
else
dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
/* Set DMA mask based on controller hardware capabilities */
dma_set_mask_and_coherent(pdev->dev,
DMA_BIT_MASK(pdev->ops->dma_width));
ret = dma_async_device_register(&pdev->device);
if (ret) {

View file

@ -1605,14 +1605,16 @@ static int
gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
{
struct gchan *gchan = to_gchan(chan);
void *new_config;
if (!config->peripheral_config)
return -EINVAL;
gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
if (!gchan->config)
new_config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
if (!new_config)
return -ENOMEM;
gchan->config = new_config;
memcpy(gchan->config, config->peripheral_config, config->peripheral_size);
return 0;

View file

@ -557,11 +557,16 @@ rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static int rz_dmac_terminate_all(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
struct rz_lmdesc *lmdesc = channel->lmdesc.base;
unsigned long flags;
unsigned int i;
LIST_HEAD(head);
rz_dmac_disable_hw(channel);
spin_lock_irqsave(&channel->vc.lock, flags);
for (i = 0; i < DMAC_NR_LMDESC; i++)
lmdesc[i].header = 0;
list_splice_tail_init(&channel->ld_active, &channel->ld_free);
list_splice_tail_init(&channel->ld_queue, &channel->ld_free);
vchan_get_all_descriptors(&channel->vc, &head);
@ -854,6 +859,13 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
return 0;
}
static void rz_dmac_put_device(void *_dev)
{
struct device *dev = _dev;
put_device(dev);
}
static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
@ -876,6 +888,10 @@ static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
return -ENODEV;
}
ret = devm_add_action_or_reset(dev, rz_dmac_put_device, &dmac->icu.pdev->dev);
if (ret)
return ret;
dmac_index = args.args[0];
if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
@ -1055,8 +1071,6 @@ static void rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_device_put(dmac->icu.pdev);
}
static const struct of_device_id of_rz_dmac_match[] = {

View file

@ -90,23 +90,25 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
struct stm32_dmamux *mux;
u32 i, min, max;
int ret;
int ret = -EINVAL;
unsigned long flags;
if (dma_spec->args_count != 3) {
dev_err(&pdev->dev, "invalid number of dma mux args\n");
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
if (dma_spec->args[0] > dmamux->dmamux_requests) {
dev_err(&pdev->dev, "invalid mux request number: %d\n",
dma_spec->args[0]);
return ERR_PTR(-EINVAL);
goto err_put_pdev;
}
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
return ERR_PTR(-ENOMEM);
if (!mux) {
ret = -ENOMEM;
goto err_put_pdev;
}
spin_lock_irqsave(&dmamux->lock, flags);
mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
@ -116,7 +118,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
spin_unlock_irqrestore(&dmamux->lock, flags);
dev_err(&pdev->dev, "Run out of free DMA requests\n");
ret = -ENOMEM;
goto error_chan_id;
goto err_free_mux;
}
set_bit(mux->chan_id, dmamux->dma_inuse);
spin_unlock_irqrestore(&dmamux->lock, flags);
@ -133,8 +135,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
if (!dma_spec->np) {
dev_err(&pdev->dev, "can't get dma master\n");
ret = -EINVAL;
goto error;
goto err_clear_inuse;
}
/* Set dma request */
@ -142,7 +143,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
spin_unlock_irqrestore(&dmamux->lock, flags);
goto error;
goto err_put_dma_spec_np;
}
spin_unlock_irqrestore(&dmamux->lock, flags);
@ -160,13 +161,19 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
mux->request, mux->master, mux->chan_id);
put_device(&pdev->dev);
return mux;
error:
err_put_dma_spec_np:
of_node_put(dma_spec->np);
err_clear_inuse:
clear_bit(mux->chan_id, dmamux->dma_inuse);
error_chan_id:
err_free_mux:
kfree(mux);
err_put_pdev:
put_device(&pdev->dev);
return ERR_PTR(ret);
}

View file

@ -429,10 +429,17 @@ static void tegra_adma_stop(struct tegra_adma_chan *tdc)
return;
}
kfree(tdc->desc);
vchan_terminate_vdesc(&tdc->desc->vd);
tdc->desc = NULL;
}
static void tegra_adma_synchronize(struct dma_chan *dc)
{
struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
vchan_synchronize(&tdc->vc);
}
static void tegra_adma_start(struct tegra_adma_chan *tdc)
{
struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc);
@ -1155,6 +1162,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->dma_dev.device_config = tegra_adma_slave_config;
tdma->dma_dev.device_tx_status = tegra_adma_tx_status;
tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all;
tdma->dma_dev.device_synchronize = tegra_adma_synchronize;
tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);

View file

@ -79,34 +79,35 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
struct ti_am335x_xbar_map *map;
struct ti_am335x_xbar_map *map = ERR_PTR(-EINVAL);
if (dma_spec->args_count != 3)
return ERR_PTR(-EINVAL);
goto out_put_pdev;
if (dma_spec->args[2] >= xbar->xbar_events) {
dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
dma_spec->args[2]);
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
if (dma_spec->args[0] >= xbar->dma_requests) {
dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
dma_spec->args[0]);
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
return ERR_PTR(-ENOMEM);
map = ERR_PTR(-ENOMEM);
goto out_put_pdev;
}
map->dma_line = (u16)dma_spec->args[0];
@ -120,6 +121,9 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
out_put_pdev:
put_device(&pdev->dev);
return map;
}
@ -241,28 +245,26 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
{
struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
struct ti_dra7_xbar_map *map;
struct ti_dra7_xbar_map *map = ERR_PTR(-EINVAL);
if (dma_spec->args[0] >= xbar->xbar_requests) {
dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
dma_spec->args[0]);
put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
/* The of_node_put() will be done in the core for the node */
dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
if (!dma_spec->np) {
dev_err(&pdev->dev, "Can't get DMA master\n");
put_device(&pdev->dev);
return ERR_PTR(-EINVAL);
goto out_put_pdev;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
of_node_put(dma_spec->np);
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
map = ERR_PTR(-ENOMEM);
goto out_put_pdev;
}
mutex_lock(&xbar->mutex);
@ -273,8 +275,8 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
dev_err(&pdev->dev, "Run out of free DMA requests\n");
kfree(map);
of_node_put(dma_spec->np);
put_device(&pdev->dev);
return ERR_PTR(-ENOMEM);
map = ERR_PTR(-ENOMEM);
goto out_put_pdev;
}
set_bit(map->xbar_out, xbar->dma_inuse);
mutex_unlock(&xbar->mutex);
@ -288,6 +290,9 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
out_put_pdev:
put_device(&pdev->dev);
return map;
}

View file

@ -42,9 +42,9 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
ud = platform_get_drvdata(pdev);
put_device(&pdev->dev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}

View file

@ -1808,6 +1808,8 @@ static int omap_dma_probe(struct platform_device *pdev)
if (rc) {
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
rc);
if (od->ll123_supported)
dma_pool_destroy(od->desc_pool);
omap_dma_free(od);
return rc;
}
@ -1823,6 +1825,8 @@ static int omap_dma_probe(struct platform_device *pdev)
if (rc) {
pr_warn("OMAP-DMA: failed to register DMA controller\n");
dma_async_device_unregister(&od->ddev);
if (od->ll123_supported)
dma_pool_destroy(od->desc_pool);
omap_dma_free(od);
}
}

View file

@ -9,6 +9,7 @@
/* The length of register space exposed to host */
#define XDMA_REG_SPACE_LEN 65536
#define XDMA_MAX_REG_OFFSET (XDMA_REG_SPACE_LEN - 4)
/*
* maximum number of DMA channels for each direction:

View file

@ -38,7 +38,7 @@ static const struct regmap_config xdma_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = XDMA_REG_SPACE_LEN,
.max_register = XDMA_MAX_REG_OFFSET,
};
/**

View file

@ -131,6 +131,7 @@
#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
#define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
#define XILINX_DMA_DFAULT_ADDRWIDTH 0x20
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
(XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@ -3159,7 +3160,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct xilinx_dma_device *xdev;
struct device_node *child, *np = pdev->dev.of_node;
u32 num_frames, addr_width, len_width;
u32 num_frames, addr_width = XILINX_DMA_DFAULT_ADDRWIDTH, len_width;
int i, err;
/* Allocate and initialize the DMA engine structure */
@ -3235,7 +3236,9 @@ static int xilinx_dma_probe(struct platform_device *pdev)
err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
if (err < 0)
dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
dev_warn(xdev->dev,
"missing xlnx,addrwidth property, using default value %d\n",
XILINX_DMA_DFAULT_ADDRWIDTH);
if (addr_width > 32)
xdev->ext_addr = true;

View file

@ -358,10 +358,11 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct i3200_priv));
rc = -ENOMEM;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct i3200_priv));
if (!mci)
return -ENOMEM;
goto unmap;
edac_dbg(3, "MC: init mci\n");
@ -421,9 +422,9 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
return 0;
fail:
edac_mc_free(mci);
unmap:
iounmap(window);
if (mci)
edac_mc_free(mci);
return rc;
}

View file

@ -341,9 +341,12 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = x38_channel_num;
layers[1].is_virt_csrow = false;
rc = -ENOMEM;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
goto unmap;
edac_dbg(3, "MC: init mci\n");
@ -403,9 +406,9 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
return 0;
fail:
edac_mc_free(mci);
unmap:
iounmap(window);
if (mci)
edac_mc_free(mci);
return rc;
}

View file

@ -162,7 +162,7 @@ int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
len -= size;
str += size;
}
return len - buf_size;
return buf_size - len;
}
EXPORT_SYMBOL_GPL(cper_bits_to_str);

View file

@ -819,6 +819,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
if (tbl) {
phys_initrd_start = tbl->base;
phys_initrd_size = tbl->size;
tbl->base = tbl->size = 0;
early_memunmap(tbl, sizeof(*tbl));
}
}

View file

@ -6,6 +6,7 @@
* Copyright (c) 2007, MontaVista Software, Inc. <source@mvista.com>
*/
#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@ -109,6 +110,22 @@ davinci_direction_out(struct gpio_chip *chip, unsigned offset, int value)
return __davinci_direction(chip, offset, true, value);
}
static int davinci_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct davinci_gpio_controller *d = gpiochip_get_data(chip);
struct davinci_gpio_regs __iomem *g;
u32 mask = __gpio_mask(offset), val;
int bank = offset / 32;
g = d->regs[bank];
guard(spinlock_irqsave)(&d->lock);
val = readl_relaxed(&g->dir);
return (val & mask) ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT;
}
/*
* Read the pin's value (works even if it's set up as output);
* returns zero/nonzero.
@ -203,6 +220,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.get = davinci_gpio_get;
chips->chip.direction_output = davinci_direction_out;
chips->chip.set = davinci_gpio_set;
chips->chip.get_direction = davinci_get_direction;
chips->chip.ngpio = ngpio;
chips->chip.base = -1;

View file

@ -468,9 +468,6 @@ int gpiod_get_direction(struct gpio_desc *desc)
test_bit(GPIOD_FLAG_IS_OUT, &flags))
return 0;
if (!guard.gc->get_direction)
return -ENOTSUPP;
ret = gpiochip_get_direction(guard.gc, offset);
if (ret < 0)
return ret;

View file

@ -274,6 +274,8 @@ extern int amdgpu_rebar;
extern int amdgpu_wbrf;
extern int amdgpu_user_queue;
extern uint amdgpu_hdmi_hpd_debounce_delay_ms;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000

View file

@ -5063,6 +5063,14 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
/*
* device went through surprise hotplug; we need to destroy topology
* before ip_fini_early to prevent kfd locking refcount issues by calling
* amdgpu_amdkfd_suspend()
*/
if (drm_dev_is_unplugged(adev_to_drm(adev)))
amdgpu_amdkfd_device_fini_sw(adev);
amdgpu_device_ip_fini_early(adev);
amdgpu_irq_fini_hw(adev);

View file

@ -1880,7 +1880,12 @@ int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb)
{
struct amdgpu_bo *abo;
struct drm_framebuffer *fb = plane->state->fb;
struct drm_framebuffer *fb;
if (drm_drv_uses_atomic_modeset(plane->dev))
fb = plane->state->fb;
else
fb = plane->fb;
if (!fb)
return -EINVAL;

View file

@ -83,18 +83,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
/*
* Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
* Such buffers cannot be safely accessed over P2P due to device-local
* compression metadata. Fallback to system-memory path instead.
* Device supports GFX12 (GC 12.x or newer)
* BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
*
*/
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
attach->peer2peer = false;
/*
* Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
* Such buffers cannot be safely accessed over P2P due to device-local

View file

@ -247,6 +247,7 @@ int amdgpu_damage_clips = -1; /* auto */
int amdgpu_umsch_mm_fwlog;
int amdgpu_rebar = -1; /* auto */
int amdgpu_user_queue = -1;
uint amdgpu_hdmi_hpd_debounce_delay_ms;
DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
"DRM_UT_CORE",
@ -1123,6 +1124,16 @@ module_param_named(rebar, amdgpu_rebar, int, 0444);
MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
module_param_named(user_queue, amdgpu_user_queue, int, 0444);
/*
* DOC: hdmi_hpd_debounce_delay_ms (uint)
* HDMI HPD disconnect debounce delay in milliseconds.
*
* Used to filter short disconnect->reconnect HPD toggles some HDMI sinks
* generate while entering/leaving power save. Set to 0 to disable by default.
*/
MODULE_PARM_DESC(hdmi_hpd_debounce_delay_ms, "HDMI HPD disconnect debounce delay in milliseconds (0 to disable (by default), 1500 is common)");
module_param_named(hdmi_hpd_debounce_delay_ms, amdgpu_hdmi_hpd_debounce_delay_ms, uint, 0644);
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/

View file

@ -375,7 +375,7 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
* @start_page: first page to map in the GART aperture
* @num_pages: number of pages to be mapped
* @flags: page table entry flags
* @dst: CPU address of the GART table
* @dst: valid CPU address of GART table, cannot be null
*
* Binds a BO that is allocated in VRAM to the GART page table
* (all ASICs).
@ -396,7 +396,7 @@ void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
return;
for (i = 0; i < num_pages; ++i) {
amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
amdgpu_gmc_set_pte_pde(adev, dst,
start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags);
}

View file

@ -732,6 +732,10 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
return 0;
if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid)
return 0;
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,

View file

@ -885,12 +885,28 @@ static int amdgpu_userq_input_args_validate(struct drm_device *dev,
return 0;
}
bool amdgpu_userq_enabled(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int i;
for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
if (adev->userq_funcs[i])
return true;
}
return false;
}
int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_userq *args = data;
int r;
if (!amdgpu_userq_enabled(dev))
return -ENOTSUPP;
if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
return -EINVAL;

View file

@ -141,6 +141,7 @@ uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
struct drm_file *filp);
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
bool amdgpu_userq_enabled(struct drm_device *dev);
int amdgpu_userq_suspend(struct amdgpu_device *adev);
int amdgpu_userq_resume(struct amdgpu_device *adev);

View file

@ -141,6 +141,8 @@ static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
void
amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
{
dma_fence_put(userq->last_fence);
amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
xa_destroy(&userq->fence_drv_xa);
/* Drop the fence_drv reference held by user queue */
@ -471,6 +473,9 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_exec exec;
u64 wptr;
if (!amdgpu_userq_enabled(dev))
return -ENOTSUPP;
num_syncobj_handles = args->num_syncobj_handles;
syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
size_mul(sizeof(u32), num_syncobj_handles));
@ -653,6 +658,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
int r, i, rentry, wentry, cnt;
struct drm_exec exec;
if (!amdgpu_userq_enabled(dev))
return -ENOTSUPP;
num_read_bo_handles = wait_info->num_bo_read_handles;
bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
size_mul(sizeof(u32), num_read_bo_handles));

View file

@ -1069,9 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
}
/* Prepare a TLB flush fence to be attached to PTs */
if (!params->unlocked &&
/* SI doesn't support pasid or KIQ/MES */
params->adev->family > AMDGPU_FAMILY_SI) {
if (!params->unlocked) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */

View file

@ -1235,16 +1235,16 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_WC:
*flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
break;
case AMDGPU_VM_MTYPE_RW:
*flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
break;
case AMDGPU_VM_MTYPE_CC:
*flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
break;
case AMDGPU_VM_MTYPE_UC:
*flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
*flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
break;
}

View file

@ -1209,14 +1209,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
pr_debug_ratelimited("Evicting process pid %d queues\n",
pdd->process->lead_thread->pid);
if (dqm->dev->kfd->shared_resources.enable_mes) {
if (dqm->dev->kfd->shared_resources.enable_mes)
pdd->last_evict_timestamp = get_jiffies_64();
retval = suspend_all_queues_mes(dqm);
if (retval) {
dev_err(dev, "Suspending all queues failed");
goto out;
}
}
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
@ -1246,10 +1240,6 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
} else {
retval = resume_all_queues_mes(dqm);
if (retval)
dev_err(dev, "Resuming all queues failed");
}
out:
@ -2919,6 +2909,14 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
return retval;
}
static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
struct kfd_mem_obj *mqd)
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
}
struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
{
struct device_queue_manager *dqm;
@ -3042,19 +3040,14 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
return dqm;
}
if (!dev->kfd->shared_resources.enable_mes)
deallocate_hiq_sdma_mqd(dev, &dqm->hiq_sdma_mqd);
out_free:
kfree(dqm);
return NULL;
}
static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
struct kfd_mem_obj *mqd)
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem);
}
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
dqm->ops.stop(dqm);

View file

@ -5266,6 +5266,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
struct amdgpu_dm_backlight_caps *caps;
char bl_name[16];
int min, max;
int real_brightness;
int init_brightness;
if (aconnector->bl_idx == -1)
return;
@ -5290,6 +5292,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
init_brightness = props.brightness;
if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
drm_info(drm, "Using custom brightness curve\n");
props.scale = BACKLIGHT_SCALE_NON_LINEAR;
@ -5308,8 +5312,20 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
drm_err(drm, "DM: Backlight registration failed!\n");
dm->backlight_dev[aconnector->bl_idx] = NULL;
} else
} else {
/*
* dm->brightness[x] can be inconsistent just after startup until
* ops.get_brightness is called.
*/
real_brightness =
amdgpu_dm_backlight_ops.get_brightness(dm->backlight_dev[aconnector->bl_idx]);
if (real_brightness != init_brightness) {
dm->actual_brightness[aconnector->bl_idx] = real_brightness;
dm->brightness[aconnector->bl_idx] = real_brightness;
}
drm_dbg_driver(drm, "DM: Registered Backlight device: %s\n", bl_name);
}
}
static int initialize_plane(struct amdgpu_display_manager *dm,
@ -5626,7 +5642,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (psr_feature_enabled) {
amdgpu_dm_set_psr_caps(link);
drm_info(adev_to_drm(adev), "PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
drm_info(adev_to_drm(adev), "%s: PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n",
aconnector->base.name,
link->psr_settings.psr_feature_enabled,
link->psr_settings.psr_version,
link->dpcd_caps.psr_info.psr_version,
@ -8930,9 +8947,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
aconnector->hdmi_prev_sink = NULL;
/*
* If HDMI HPD debounce delay is set, use the minimum between selected
* value and AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS
*/
if (amdgpu_hdmi_hpd_debounce_delay_ms) {
aconnector->hdmi_hpd_debounce_delay_ms = min(amdgpu_hdmi_hpd_debounce_delay_ms,
AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS);
INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
aconnector->hdmi_prev_sink = NULL;
} else {
aconnector->hdmi_hpd_debounce_delay_ms = 0;
}
/*
* configure support HPD hot plug connector_>polled default value is 0

View file

@ -59,7 +59,10 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
/*
* Maximum HDMI HPD debounce delay in milliseconds
*/
#define AMDGPU_DM_MAX_HDMI_HPD_DEBOUNCE_MS 5000
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"

View file

@ -41,7 +41,7 @@
/* kHZ*/
#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
/* kHZ*/
#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 340000
struct dp_hdmi_dongle_signature_data {
int8_t id[15];/* "DP-HDMI ADAPTOR"*/

View file

@ -336,7 +336,7 @@ static void query_dp_dual_mode_adaptor(
/* Assume we have no valid DP passive dongle connected */
*dongle = DISPLAY_DONGLE_NONE;
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
if (!i2c_read(
@ -392,6 +392,8 @@ static void query_dp_dual_mode_adaptor(
}
}
if (is_valid_hdmi_signature)
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
if (is_type2_dongle) {
uint32_t max_tmds_clk =

View file

@ -1702,8 +1702,9 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
CustomSkuTable_t *skutable = &pptable->CustomSkuTable;
uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
int16_t od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
uint32_t power_limit;
if (smu_v14_0_get_current_power_limit(smu, &power_limit))
power_limit = smu->adev->pm.ac_power ?

View file

@ -163,6 +163,7 @@ struct dw_hdmi_qp {
unsigned long ref_clk_rate;
struct regmap *regm;
int main_irq;
unsigned long tmds_char_rate;
};
@ -1271,6 +1272,7 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
dw_hdmi_qp_init_hw(hdmi);
hdmi->main_irq = plat_data->main_irq;
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
dw_hdmi_qp_main_hardirq, NULL,
IRQF_SHARED, dev_name(dev), hdmi);
@ -1331,9 +1333,16 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind);
void dw_hdmi_qp_suspend(struct device *dev, struct dw_hdmi_qp *hdmi)
{
disable_irq(hdmi->main_irq);
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_suspend);
void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_init_hw(hdmi);
enable_irq(hdmi->main_irq);
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume);

View file

@ -1602,24 +1602,23 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_create);
/*
* drm_gpuvm_bo_destroy_not_in_lists() - final part of drm_gpuvm_bo cleanup
* @vm_bo: the &drm_gpuvm_bo to destroy
*
* It is illegal to call this method if the @vm_bo is present in the GEMs gpuva
* list, the extobj list, or the evicted list.
*
* Note that this puts a refcount on the GEM object, which may destroy the GEM
* object if the refcount reaches zero. It's illegal for this to happen if the
* caller holds the GEMs gpuva mutex because it would free the mutex.
*/
static void
drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_destroy_not_in_lists(struct drm_gpuvm_bo *vm_bo)
{
struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
kref);
struct drm_gpuvm *gpuvm = vm_bo->vm;
const struct drm_gpuvm_ops *ops = gpuvm->ops;
struct drm_gem_object *obj = vm_bo->obj;
bool lock = !drm_gpuvm_resv_protected(gpuvm);
if (!lock)
drm_gpuvm_resv_assert_held(gpuvm);
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_del(&vm_bo->list.entry.gem);
if (ops && ops->vm_bo_free)
ops->vm_bo_free(vm_bo);
@ -1630,6 +1629,35 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gem_object_put(obj);
}
static void
drm_gpuvm_bo_destroy_not_in_lists_kref(struct kref *kref)
{
struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
kref);
drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
static void
drm_gpuvm_bo_destroy(struct kref *kref)
{
struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
kref);
struct drm_gpuvm *gpuvm = vm_bo->vm;
bool lock = !drm_gpuvm_resv_protected(gpuvm);
if (!lock)
drm_gpuvm_resv_assert_held(gpuvm);
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
drm_gem_gpuva_assert_lock_held(gpuvm, vm_bo->obj);
list_del(&vm_bo->list.entry.gem);
drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
/**
* drm_gpuvm_bo_put() - drop a struct drm_gpuvm_bo reference
* @vm_bo: the &drm_gpuvm_bo to release the reference of
@ -1745,9 +1773,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
void
drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
struct drm_gpuvm_bo *vm_bo;
struct drm_gem_object *obj;
struct llist_node *bo_defer;
bo_defer = llist_del_all(&gpuvm->bo_defer);
@ -1766,14 +1792,7 @@ drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
while (bo_defer) {
vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
bo_defer = bo_defer->next;
obj = vm_bo->obj;
if (ops && ops->vm_bo_free)
ops->vm_bo_free(vm_bo);
else
kfree(vm_bo);
drm_gpuvm_put(gpuvm);
drm_gem_object_put(obj);
drm_gpuvm_bo_destroy_not_in_lists(vm_bo);
}
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
@ -1861,6 +1880,9 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain);
* count is decreased. If not found @__vm_bo is returned without further
* increase of the reference count.
*
* The provided @__vm_bo must not already be in the gpuva, evict, or extobj
* lists prior to calling this method.
*
* A new &drm_gpuvm_bo is added to the GEMs gpuva list.
*
* Returns: a pointer to the found &drm_gpuvm_bo or @__vm_bo if no existing
@ -1873,14 +1895,19 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
struct drm_gem_object *obj = __vm_bo->obj;
struct drm_gpuvm_bo *vm_bo;
drm_WARN_ON(gpuvm->drm, !drm_gpuvm_immediate_mode(gpuvm));
mutex_lock(&obj->gpuva.lock);
vm_bo = drm_gpuvm_bo_find(gpuvm, obj);
if (vm_bo) {
drm_gpuvm_bo_put(__vm_bo);
mutex_unlock(&obj->gpuva.lock);
kref_put(&__vm_bo->kref, drm_gpuvm_bo_destroy_not_in_lists_kref);
return vm_bo;
}
drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
mutex_unlock(&obj->gpuva.lock);
return __vm_bo;
}

View file

@ -457,27 +457,20 @@ int gud_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
struct drm_crtc_state *crtc_state = NULL;
const struct drm_display_mode *mode;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
const struct drm_format_info *format = fb->format;
const struct drm_format_info *format;
struct drm_connector *connector;
unsigned int i, num_properties;
struct gud_state_req *req;
int idx, ret;
size_t len;
if (drm_WARN_ON_ONCE(plane->dev, !fb))
return -EINVAL;
if (drm_WARN_ON_ONCE(plane->dev, !crtc))
return -EINVAL;
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
mode = &crtc_state->mode;
if (crtc)
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
@ -492,6 +485,9 @@ int gud_plane_atomic_check(struct drm_plane *plane,
if (old_plane_state->rotation != new_plane_state->rotation)
crtc_state->mode_changed = true;
mode = &crtc_state->mode;
format = fb->format;
if (old_fb && old_fb->format != format)
crtc_state->mode_changed = true;
@ -598,7 +594,7 @@ void gud_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_helper_damage_iter iter;
int ret, idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
if (!crtc || crtc->state->mode_changed || !crtc->state->enable) {
cancel_work_sync(&gdrm->work);
mutex_lock(&gdrm->damage_lock);
if (gdrm->fb) {

View file

@ -686,7 +686,7 @@ static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
}
/* This list includes registers that are useful in debugging GuC hangs. */
const struct {
static const struct {
u32 start;
u32 count;
} guc_hw_reg_state[] = {

View file

@ -84,6 +84,7 @@ curs507a_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
asyh->curs.handle = handle;
asyh->curs.offset = offset;
asyh->set.curs = asyh->curs.visible;
nv50_atom(asyh->state.state)->lock_core = true;
}
}

View file

@ -43,6 +43,9 @@ nv50_head_flush_clr(struct nv50_head *head,
union nv50_head_atom_mask clr = {
.mask = asyh->clr.mask & ~(flush ? 0 : asyh->set.mask),
};
lockdep_assert_held(&head->disp->mutex);
if (clr.crc) nv50_crc_atomic_clr(head);
if (clr.olut) head->func->olut_clr(head);
if (clr.core) head->func->core_clr(head);
@ -65,6 +68,8 @@ nv50_head_flush_set_wndw(struct nv50_head *head, struct nv50_head_atom *asyh)
void
nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
lockdep_assert_held(&head->disp->mutex);
if (asyh->set.view ) head->func->view (head, asyh);
if (asyh->set.mode ) head->func->mode (head, asyh);
if (asyh->set.core ) head->func->core_set(head, asyh);

View file

@ -623,8 +623,61 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
if (IS_ERR(desc))
return ERR_CAST(desc);
connector_type = desc->connector_type;
/* Catch common mistakes for panels. */
switch (connector_type) {
case 0:
dev_warn(dev, "Specify missing connector_type\n");
connector_type = DRM_MODE_CONNECTOR_DPI;
break;
case DRM_MODE_CONNECTOR_LVDS:
WARN_ON(desc->bus_flags &
~(DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
DRM_BUS_FLAG_DATA_LSB_TO_MSB));
WARN_ON(desc->bus_format != MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_SPWG &&
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA);
WARN_ON(desc->bus_format == MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
desc->bpc != 6);
WARN_ON((desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG ||
desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) &&
desc->bpc != 8);
break;
case DRM_MODE_CONNECTOR_eDP:
dev_warn(dev, "eDP panels moved to panel-edp\n");
return ERR_PTR(-EINVAL);
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
break;
case DRM_MODE_CONNECTOR_DPI:
bus_flags = DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
DRM_BUS_FLAG_DATA_LSB_TO_MSB |
DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
if (desc->bus_flags & ~bus_flags)
dev_warn(dev, "Unexpected bus_flags(%d)\n", desc->bus_flags & ~bus_flags);
if (!(desc->bus_flags & bus_flags))
dev_warn(dev, "Specify missing bus_flags\n");
if (desc->bus_format == 0)
dev_warn(dev, "Specify missing bus_format\n");
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
break;
default:
dev_warn(dev, "Specify a valid connector_type: %d\n", desc->connector_type);
connector_type = DRM_MODE_CONNECTOR_DPI;
break;
}
panel = devm_drm_panel_alloc(dev, struct panel_simple, base,
&panel_simple_funcs, desc->connector_type);
&panel_simple_funcs, connector_type);
if (IS_ERR(panel))
return ERR_CAST(panel);
@ -666,60 +719,6 @@ static struct panel_simple *panel_simple_probe(struct device *dev)
goto free_ddc;
}
connector_type = desc->connector_type;
/* Catch common mistakes for panels. */
switch (connector_type) {
case 0:
dev_warn(dev, "Specify missing connector_type\n");
connector_type = DRM_MODE_CONNECTOR_DPI;
break;
case DRM_MODE_CONNECTOR_LVDS:
WARN_ON(desc->bus_flags &
~(DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
DRM_BUS_FLAG_DATA_LSB_TO_MSB));
WARN_ON(desc->bus_format != MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_SPWG &&
desc->bus_format != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA);
WARN_ON(desc->bus_format == MEDIA_BUS_FMT_RGB666_1X7X3_SPWG &&
desc->bpc != 6);
WARN_ON((desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG ||
desc->bus_format == MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA) &&
desc->bpc != 8);
break;
case DRM_MODE_CONNECTOR_eDP:
dev_warn(dev, "eDP panels moved to panel-edp\n");
err = -EINVAL;
goto free_ddc;
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
break;
case DRM_MODE_CONNECTOR_DPI:
bus_flags = DRM_BUS_FLAG_DE_LOW |
DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE |
DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
DRM_BUS_FLAG_DATA_MSB_TO_LSB |
DRM_BUS_FLAG_DATA_LSB_TO_MSB |
DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE |
DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
if (desc->bus_flags & ~bus_flags)
dev_warn(dev, "Unexpected bus_flags(%d)\n", desc->bus_flags & ~bus_flags);
if (!(desc->bus_flags & bus_flags))
dev_warn(dev, "Specify missing bus_flags\n");
if (desc->bus_format == 0)
dev_warn(dev, "Specify missing bus_format\n");
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
break;
default:
dev_warn(dev, "Specify a valid connector_type: %d\n", desc->connector_type);
connector_type = DRM_MODE_CONNECTOR_DPI;
break;
}
dev_set_drvdata(dev, panel);
/*
@ -1900,6 +1899,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing dlc_dlc0700yzg_1_timing = {

View file

@ -1252,17 +1252,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
goto err_cleanup;
}
/* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our
* pre-allocated BO if the <BO,VM> association exists. Given we
* only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will
* be called immediately, and we have to hold the VM resv lock when
* calling this function.
*/
dma_resv_lock(panthor_vm_resv(vm), NULL);
mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(panthor_vm_resv(vm));
op_ctx->map.bo_offset = offset;

View file

@ -121,7 +121,7 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
struct drm_crtc *crtc = encoder->crtc;
/* Unconditionally switch to TMDS as FRL is not yet supported */
gpiod_set_value(hdmi->frl_enable_gpio, 0);
gpiod_set_value_cansleep(hdmi->frl_enable_gpio, 0);
if (!crtc || !crtc->state)
return;
@ -640,6 +640,15 @@ static void dw_hdmi_qp_rockchip_remove(struct platform_device *pdev)
component_del(&pdev->dev, &dw_hdmi_qp_rockchip_ops);
}
static int __maybe_unused dw_hdmi_qp_rockchip_suspend(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
dw_hdmi_qp_suspend(dev, hdmi->hdmi);
return 0;
}
static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
{
struct rockchip_hdmi_qp *hdmi = dev_get_drvdata(dev);
@ -655,7 +664,8 @@ static int __maybe_unused dw_hdmi_qp_rockchip_resume(struct device *dev)
}
static const struct dev_pm_ops dw_hdmi_qp_rockchip_pm = {
SET_SYSTEM_SLEEP_PM_OPS(NULL, dw_hdmi_qp_rockchip_resume)
SET_SYSTEM_SLEEP_PM_OPS(dw_hdmi_qp_rockchip_suspend,
dw_hdmi_qp_rockchip_resume)
};
struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver = {

View file

@ -2104,7 +2104,7 @@ static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2)
* Spin until the previous port_mux figuration is done.
*/
ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel,
port_mux_sel == vop2->old_port_sel, 0, 50 * 1000);
port_mux_sel == vop2->old_port_sel, 10, 50 * 1000);
if (ret)
DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n",
port_mux_sel, vop2->old_port_sel);
@ -2124,7 +2124,7 @@ static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg)
* Spin until the previous layer configuration is done.
*/
ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg,
atv_layer_cfg == cfg, 0, 50 * 1000);
atv_layer_cfg == cfg, 10, 50 * 1000);
if (ret)
DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n",
atv_layer_cfg, cfg);
@ -2144,6 +2144,7 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
u8 layer_sel_id;
unsigned int ofs;
u32 ovl_ctrl;
u32 cfg_done;
int i;
struct vop2_video_port *vp0 = &vop2->vps[0];
struct vop2_video_port *vp1 = &vop2->vps[1];
@ -2298,8 +2299,16 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp)
rk3568_vop2_wait_for_port_mux_done(vop2);
}
if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel)
rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel) {
cfg_done = vop2_readl(vop2, RK3568_REG_CFG_DONE);
cfg_done &= (BIT(vop2->data->nr_vps) - 1);
cfg_done &= ~BIT(vp->id);
/*
* Changes of other VPs' overlays have not taken effect
*/
if (cfg_done)
rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel);
}
vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel);
mutex_unlock(&vop2->ovl_lock);

View file

@ -54,15 +54,6 @@ const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
const struct screen_info *si);
#endif
/*
* Input parsing
*/
int drm_sysfb_get_validated_int(struct drm_device *dev, const char *name,
u64 value, u32 max);
int drm_sysfb_get_validated_int0(struct drm_device *dev, const char *name,
u64 value, u32 max);
/*
* Display modes
*/

View file

@ -32,9 +32,15 @@
#include <drm/ttm/ttm_placement.h>
static void vmw_bo_release(struct vmw_bo *vbo)
/**
* vmw_bo_free - vmw_bo destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_resource *res;
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
@ -62,20 +68,8 @@ static void vmw_bo_release(struct vmw_bo *vbo)
}
vmw_surface_unreference(&vbo->dumb_surface);
}
drm_gem_object_release(&vbo->tbo.base);
}
/**
* vmw_bo_free - vmw_bo destructor
*
* @bo: Pointer to the embedded struct ttm_buffer_object
*/
static void vmw_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_bo *vbo = to_vmw_bo(&bo->base);
WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
vmw_bo_release(vbo);
drm_gem_object_release(&vbo->tbo.base);
WARN_ON(vbo->dirty);
kfree(vbo);
}

View file

@ -515,12 +515,12 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
/**
* vmw_event_fence_action_seq_passed
*
* @action: The struct vmw_fence_action embedded in a struct
* vmw_event_fence_action.
* @f: The struct dma_fence which provides timestamp for the action event
* @cb: The struct dma_fence_cb callback for the action event.
*
* This function is called when the seqno of the fence where @action is
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context.
* This function is called when the seqno of the fence has passed
* and it is always called from atomic context.
* It queues the event on the submitter's event list.
*/
static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
struct dma_fence_cb *cb)

Some files were not shown because too many files have changed in this diff Show more