ASoC: Fixes for v7.0

A moderately large pile of fixes, though none of them are  super major,
 plus a few new quirks and device IDs.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmmpqoUACgkQJNaLcl1U
 h9BTHQf+MnXcY+kp+Ou9SsrMyrZE7htz3tbjF1SLSAyZeM7OysDVPiHnx2qkDT4N
 gM/I3lTmF3nyK2CN0aKvTcUQml0Hdjs3lEDO59uIeBI6SK7tcrQQkh74N2EI4UgH
 rbLusOWMGIoUONXZQNns0zpYx8W562SgcW3O6ZW3cRJo6rwKd1UPlM6ywgPtYC+j
 Eb7MPjaz1Q2+IReQ6ewOD68k07PWB7cyYjqlQj/v2GT09MFP5uDmOIw82igsqw+Z
 af7yKDbg7QNCGgYYu3OT5l/zGBm1CqDMxvgUNtf9d2MN9+hjyI2i3ZtzvDNj6cAl
 IUBgqZXE/Tf2tJIMIK77fSFJBYWNGw==
 =v+Al
 -----END PGP SIGNATURE-----

Merge tag 'asoc-fix-v7.0-rc2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v7.0

A moderately large pile of fixes, though none of them are  super major,
plus a few new quirks and device IDs.
This commit is contained in:
Takashi Iwai 2026-03-05 17:22:14 +01:00
commit 8457669db9
463 changed files with 5412 additions and 2398 deletions

View file

@ -210,7 +210,12 @@ Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com> Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com> Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com> Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
Daniel Lezcano <daniel.lezcano@kernel.org> <daniel.lezcano@linaro.org>
Daniel Lezcano <daniel.lezcano@kernel.org> <daniel.lezcano@free.fr>
Daniel Lezcano <daniel.lezcano@kernel.org> <daniel.lezcano@linexp.org>
Daniel Lezcano <daniel.lezcano@kernel.org> <dlezcano@fr.ibm.com>
Daniel Thompson <danielt@kernel.org> <daniel.thompson@linaro.org> Daniel Thompson <danielt@kernel.org> <daniel.thompson@linaro.org>
Daniele Alessandrelli <daniele.alessandrelli@gmail.com> <daniele.alessandrelli@intel.com>
Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com> Danilo Krummrich <dakr@kernel.org> <dakr@redhat.com>
David Brownell <david-b@pacbell.net> David Brownell <david-b@pacbell.net>
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org> David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
@ -876,6 +881,7 @@ Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com> Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vlastimil Babka <vbabka@kernel.org> <vbabka@suse.cz>
WangYuli <wangyuli@aosc.io> <wangyl5933@chinaunicom.cn> WangYuli <wangyuli@aosc.io> <wangyl5933@chinaunicom.cn>
WangYuli <wangyuli@aosc.io> <wangyuli@deepin.org> WangYuli <wangyuli@aosc.io> <wangyuli@deepin.org>
Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn> Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn>
@ -890,7 +896,8 @@ Yanteng Si <si.yanteng@linux.dev> <siyanteng@loongson.cn>
Ying Huang <huang.ying.caritas@gmail.com> <ying.huang@intel.com> Ying Huang <huang.ying.caritas@gmail.com> <ying.huang@intel.com>
Yixun Lan <dlan@kernel.org> <dlan@gentoo.org> Yixun Lan <dlan@kernel.org> <dlan@gentoo.org>
Yixun Lan <dlan@kernel.org> <yixun.lan@amlogic.com> Yixun Lan <dlan@kernel.org> <yixun.lan@amlogic.com>
Yosry Ahmed <yosry.ahmed@linux.dev> <yosryahmed@google.com> Yosry Ahmed <yosry@kernel.org> <yosryahmed@google.com>
Yosry Ahmed <yosry@kernel.org> <yosry.ahmed@linux.dev>
Yu-Chun Lin <eleanor.lin@realtek.com> <eleanor15x@gmail.com> Yu-Chun Lin <eleanor.lin@realtek.com> <eleanor15x@gmail.com>
Yusuke Goda <goda.yusuke@renesas.com> Yusuke Goda <goda.yusuke@renesas.com>
Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com> Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com>

View file

@ -594,6 +594,9 @@ Values:
their sockets will only be able to connect within their own their sockets will only be able to connect within their own
namespace. namespace.
The first write to ``child_ns_mode`` locks its value. Subsequent writes of the
same value succeed, but writing a different value returns ``-EBUSY``.
Changing ``child_ns_mode`` only affects namespaces created after the change; Changing ``child_ns_mode`` only affects namespaces created after the change;
it does not modify the current namespace or any existing children. it does not modify the current namespace or any existing children.

View file

@ -287,7 +287,7 @@ examples:
regulator-max-microvolt = <1700000>; regulator-max-microvolt = <1700000>;
}; };
mt6359_vrfck_1_ldo_reg: ldo_vrfck_1 { mt6359_vrfck_1_ldo_reg: ldo_vrfck_1 {
regulator-name = "vrfck"; regulator-name = "vrfck_1";
regulator-min-microvolt = <1240000>; regulator-min-microvolt = <1240000>;
regulator-max-microvolt = <1600000>; regulator-max-microvolt = <1600000>;
}; };
@ -309,7 +309,7 @@ examples:
regulator-max-microvolt = <3300000>; regulator-max-microvolt = <3300000>;
}; };
mt6359_vemc_1_ldo_reg: ldo_vemc_1 { mt6359_vemc_1_ldo_reg: ldo_vemc_1 {
regulator-name = "vemc"; regulator-name = "vemc_1";
regulator-min-microvolt = <2500000>; regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <3300000>; regulator-max-microvolt = <3300000>;
}; };

View file

@ -23,6 +23,7 @@ properties:
enum: enum:
- nvidia,tegra210-audio-graph-card - nvidia,tegra210-audio-graph-card
- nvidia,tegra186-audio-graph-card - nvidia,tegra186-audio-graph-card
- nvidia,tegra238-audio-graph-card
- nvidia,tegra264-audio-graph-card - nvidia,tegra264-audio-graph-card
clocks: clocks:

View file

@ -20,6 +20,7 @@ properties:
- renesas,r9a07g044-ssi # RZ/G2{L,LC} - renesas,r9a07g044-ssi # RZ/G2{L,LC}
- renesas,r9a07g054-ssi # RZ/V2L - renesas,r9a07g054-ssi # RZ/V2L
- renesas,r9a08g045-ssi # RZ/G3S - renesas,r9a08g045-ssi # RZ/G3S
- renesas,r9a08g046-ssi # RZ/G3L
- const: renesas,rz-ssi - const: renesas,rz-ssi
reg: reg:

View file

@ -22,21 +22,6 @@ allOf:
properties: properties:
reg: reg:
minItems: 2 minItems: 2
- if:
properties:
compatible:
contains:
enum:
- baikal,bt1-sys-ssi
then:
properties:
mux-controls:
maxItems: 1
required:
- mux-controls
else:
required:
- interrupts
- if: - if:
properties: properties:
compatible: compatible:
@ -75,10 +60,6 @@ properties:
const: intel,mountevans-imc-ssi const: intel,mountevans-imc-ssi
- description: AMD Pensando Elba SoC SPI Controller - description: AMD Pensando Elba SoC SPI Controller
const: amd,pensando-elba-spi const: amd,pensando-elba-spi
- description: Baikal-T1 SPI Controller
const: baikal,bt1-ssi
- description: Baikal-T1 System Boot SPI Controller
const: baikal,bt1-sys-ssi
- description: Canaan Kendryte K210 SoS SPI Controller - description: Canaan Kendryte K210 SoS SPI Controller
const: canaan,k210-spi const: canaan,k210-spi
- description: Renesas RZ/N1 SPI Controller - description: Renesas RZ/N1 SPI Controller
@ -170,6 +151,7 @@ required:
- "#address-cells" - "#address-cells"
- "#size-cells" - "#size-cells"
- clocks - clocks
- interrupts
examples: examples:
- | - |
@ -190,15 +172,4 @@ examples:
rx-sample-delay-ns = <7>; rx-sample-delay-ns = <7>;
}; };
}; };
- |
spi@1f040100 {
compatible = "baikal,bt1-sys-ssi";
reg = <0x1f040100 0x900>,
<0x1c000000 0x1000000>;
#address-cells = <1>;
#size-cells = <0>;
mux-controls = <&boot_mux>;
clocks = <&ccu_sys>;
clock-names = "ssi_clk";
};
... ...

View file

@ -1396,7 +1396,10 @@ or its flags may be modified, but it may not be resized.
Memory for the region is taken starting at the address denoted by the Memory for the region is taken starting at the address denoted by the
field userspace_addr, which must point at user addressable memory for field userspace_addr, which must point at user addressable memory for
the entire memory slot size. Any object may back this memory, including the entire memory slot size. Any object may back this memory, including
anonymous memory, ordinary files, and hugetlbfs. anonymous memory, ordinary files, and hugetlbfs. Changes in the backing
of the memory region are automatically reflected into the guest.
For example, an mmap() that affects the region will be made visible
immediately. Another example is madvise(MADV_DROP).
On architectures that support a form of address tagging, userspace_addr must On architectures that support a form of address tagging, userspace_addr must
be an untagged address. be an untagged address.
@ -1412,11 +1415,6 @@ use it. The latter can be set, if KVM_CAP_READONLY_MEM capability allows it,
to make a new slot read-only. In this case, writes to this memory will be to make a new slot read-only. In this case, writes to this memory will be
posted to userspace as KVM_EXIT_MMIO exits. posted to userspace as KVM_EXIT_MMIO exits.
When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of
the memory region are automatically reflected into the guest. For example, an
mmap() that affects the region will be made visible immediately. Another
example is madvise(MADV_DROP).
For TDX guest, deleting/moving memory region loses guest memory contents. For TDX guest, deleting/moving memory region loses guest memory contents.
Read only region isn't supported. Only as-id 0 is supported. Read only region isn't supported. Only as-id 0 is supported.

View file

@ -1292,7 +1292,6 @@ F: include/trace/events/amdxdna.h
F: include/uapi/drm/amdxdna_accel.h F: include/uapi/drm/amdxdna_accel.h
AMD XGBE DRIVER AMD XGBE DRIVER
M: "Shyam Sundar S K" <Shyam-sundar.S-k@amd.com>
M: Raju Rangoju <Raju.Rangoju@amd.com> M: Raju Rangoju <Raju.Rangoju@amd.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Maintained S: Maintained
@ -6213,20 +6212,20 @@ F: drivers/scsi/fnic/
CISCO SCSI HBA DRIVER CISCO SCSI HBA DRIVER
M: Karan Tilak Kumar <kartilak@cisco.com> M: Karan Tilak Kumar <kartilak@cisco.com>
M: Narsimhulu Musini <nmusini@cisco.com>
M: Sesidhar Baddela <sebaddel@cisco.com> M: Sesidhar Baddela <sebaddel@cisco.com>
L: linux-scsi@vger.kernel.org L: linux-scsi@vger.kernel.org
S: Supported S: Supported
F: drivers/scsi/snic/ F: drivers/scsi/snic/
CISCO VIC ETHERNET NIC DRIVER CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Satish Kharat <satishkh@cisco.com> M: Satish Kharat <satishkh@cisco.com>
S: Maintained S: Maintained
F: drivers/net/ethernet/cisco/enic/ F: drivers/net/ethernet/cisco/enic/
CISCO VIC LOW LATENCY NIC DRIVER CISCO VIC LOW LATENCY NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
M: Nelson Escobar <neescoba@cisco.com> M: Nelson Escobar <neescoba@cisco.com>
M: Satish Kharat <satishkh@cisco.com>
S: Supported S: Supported
F: drivers/infiniband/hw/usnic/ F: drivers/infiniband/hw/usnic/
@ -6280,7 +6279,7 @@ S: Maintained
F: include/linux/clk.h F: include/linux/clk.h
CLOCKSOURCE, CLOCKEVENT DRIVERS CLOCKSOURCE, CLOCKEVENT DRIVERS
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@kernel.org>
M: Thomas Gleixner <tglx@kernel.org> M: Thomas Gleixner <tglx@kernel.org>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Supported S: Supported
@ -6669,7 +6668,7 @@ F: rust/kernel/cpu.rs
CPU IDLE TIME MANAGEMENT FRAMEWORK CPU IDLE TIME MANAGEMENT FRAMEWORK
M: "Rafael J. Wysocki" <rafael@kernel.org> M: "Rafael J. Wysocki" <rafael@kernel.org>
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@kernel.org>
R: Christian Loehle <christian.loehle@arm.com> R: Christian Loehle <christian.loehle@arm.com>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
S: Maintained S: Maintained
@ -6699,7 +6698,7 @@ F: arch/x86/kernel/msr.c
CPUIDLE DRIVER - ARM BIG LITTLE CPUIDLE DRIVER - ARM BIG LITTLE
M: Lorenzo Pieralisi <lpieralisi@kernel.org> M: Lorenzo Pieralisi <lpieralisi@kernel.org>
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@kernel.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained S: Maintained
@ -6707,7 +6706,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/cpuidle/cpuidle-big_little.c F: drivers/cpuidle/cpuidle-big_little.c
CPUIDLE DRIVER - ARM EXYNOS CPUIDLE DRIVER - ARM EXYNOS
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@kernel.org>
M: Kukjin Kim <kgene@kernel.org> M: Kukjin Kim <kgene@kernel.org>
R: Krzysztof Kozlowski <krzk@kernel.org> R: Krzysztof Kozlowski <krzk@kernel.org>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
@ -14412,9 +14411,9 @@ LANTIQ PEF2256 DRIVER
M: Herve Codina <herve.codina@bootlin.com> M: Herve Codina <herve.codina@bootlin.com>
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/net/lantiq,pef2256.yaml F: Documentation/devicetree/bindings/net/lantiq,pef2256.yaml
F: drivers/net/wan/framer/pef2256/ F: drivers/net/wan/framer/
F: drivers/pinctrl/pinctrl-pef2256.c F: drivers/pinctrl/pinctrl-pef2256.c
F: include/linux/framer/pef2256.h F: include/linux/framer/
LASI 53c700 driver for PARISC LASI 53c700 driver for PARISC
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
@ -16656,7 +16655,7 @@ M: Andrew Morton <akpm@linux-foundation.org>
M: David Hildenbrand <david@kernel.org> M: David Hildenbrand <david@kernel.org>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com> R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz> R: Vlastimil Babka <vbabka@kernel.org>
R: Mike Rapoport <rppt@kernel.org> R: Mike Rapoport <rppt@kernel.org>
R: Suren Baghdasaryan <surenb@google.com> R: Suren Baghdasaryan <surenb@google.com>
R: Michal Hocko <mhocko@suse.com> R: Michal Hocko <mhocko@suse.com>
@ -16786,7 +16785,7 @@ M: Andrew Morton <akpm@linux-foundation.org>
M: David Hildenbrand <david@kernel.org> M: David Hildenbrand <david@kernel.org>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com> R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz> R: Vlastimil Babka <vbabka@kernel.org>
R: Mike Rapoport <rppt@kernel.org> R: Mike Rapoport <rppt@kernel.org>
R: Suren Baghdasaryan <surenb@google.com> R: Suren Baghdasaryan <surenb@google.com>
R: Michal Hocko <mhocko@suse.com> R: Michal Hocko <mhocko@suse.com>
@ -16841,7 +16840,7 @@ F: mm/oom_kill.c
MEMORY MANAGEMENT - PAGE ALLOCATOR MEMORY MANAGEMENT - PAGE ALLOCATOR
M: Andrew Morton <akpm@linux-foundation.org> M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz> M: Vlastimil Babka <vbabka@kernel.org>
R: Suren Baghdasaryan <surenb@google.com> R: Suren Baghdasaryan <surenb@google.com>
R: Michal Hocko <mhocko@suse.com> R: Michal Hocko <mhocko@suse.com>
R: Brendan Jackman <jackmanb@google.com> R: Brendan Jackman <jackmanb@google.com>
@ -16887,7 +16886,7 @@ M: David Hildenbrand <david@kernel.org>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Rik van Riel <riel@surriel.com> R: Rik van Riel <riel@surriel.com>
R: Liam R. Howlett <Liam.Howlett@oracle.com> R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz> R: Vlastimil Babka <vbabka@kernel.org>
R: Harry Yoo <harry.yoo@oracle.com> R: Harry Yoo <harry.yoo@oracle.com>
R: Jann Horn <jannh@google.com> R: Jann Horn <jannh@google.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
@ -16986,7 +16985,7 @@ MEMORY MAPPING
M: Andrew Morton <akpm@linux-foundation.org> M: Andrew Morton <akpm@linux-foundation.org>
M: Liam R. Howlett <Liam.Howlett@oracle.com> M: Liam R. Howlett <Liam.Howlett@oracle.com>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz> R: Vlastimil Babka <vbabka@kernel.org>
R: Jann Horn <jannh@google.com> R: Jann Horn <jannh@google.com>
R: Pedro Falcato <pfalcato@suse.de> R: Pedro Falcato <pfalcato@suse.de>
L: linux-mm@kvack.org L: linux-mm@kvack.org
@ -17016,7 +17015,7 @@ M: Andrew Morton <akpm@linux-foundation.org>
M: Suren Baghdasaryan <surenb@google.com> M: Suren Baghdasaryan <surenb@google.com>
M: Liam R. Howlett <Liam.Howlett@oracle.com> M: Liam R. Howlett <Liam.Howlett@oracle.com>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz> R: Vlastimil Babka <vbabka@kernel.org>
R: Shakeel Butt <shakeel.butt@linux.dev> R: Shakeel Butt <shakeel.butt@linux.dev>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
@ -17032,7 +17031,7 @@ M: Andrew Morton <akpm@linux-foundation.org>
M: Liam R. Howlett <Liam.Howlett@oracle.com> M: Liam R. Howlett <Liam.Howlett@oracle.com>
M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
M: David Hildenbrand <david@kernel.org> M: David Hildenbrand <david@kernel.org>
R: Vlastimil Babka <vbabka@suse.cz> R: Vlastimil Babka <vbabka@kernel.org>
R: Jann Horn <jannh@google.com> R: Jann Horn <jannh@google.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
@ -20509,7 +20508,7 @@ F: Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
F: drivers/pci/controller/dwc/pcie-kirin.c F: drivers/pci/controller/dwc/pcie-kirin.c
PCIE DRIVER FOR HISILICON STB PCIE DRIVER FOR HISILICON STB
M: Shawn Guo <shawn.guo@linaro.org> M: Shawn Guo <shawnguo@kernel.org>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
@ -21695,7 +21694,7 @@ S: Maintained
F: drivers/net/ethernet/qualcomm/emac/ F: drivers/net/ethernet/qualcomm/emac/
QUALCOMM ETHQOS ETHERNET DRIVER QUALCOMM ETHQOS ETHERNET DRIVER
M: Vinod Koul <vkoul@kernel.org> M: Mohd Ayaan Anwar <mohd.anwar@oss.qualcomm.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: linux-arm-msm@vger.kernel.org L: linux-arm-msm@vger.kernel.org
S: Maintained S: Maintained
@ -23174,7 +23173,7 @@ K: \b(?i:rust)\b
RUST [ALLOC] RUST [ALLOC]
M: Danilo Krummrich <dakr@kernel.org> M: Danilo Krummrich <dakr@kernel.org>
R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
R: Vlastimil Babka <vbabka@suse.cz> R: Vlastimil Babka <vbabka@kernel.org>
R: Liam R. Howlett <Liam.Howlett@oracle.com> R: Liam R. Howlett <Liam.Howlett@oracle.com>
R: Uladzislau Rezki <urezki@gmail.com> R: Uladzislau Rezki <urezki@gmail.com>
L: rust-for-linux@vger.kernel.org L: rust-for-linux@vger.kernel.org
@ -24350,7 +24349,7 @@ F: Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml
F: drivers/nvmem/layouts/sl28vpd.c F: drivers/nvmem/layouts/sl28vpd.c
SLAB ALLOCATOR SLAB ALLOCATOR
M: Vlastimil Babka <vbabka@suse.cz> M: Vlastimil Babka <vbabka@kernel.org>
M: Andrew Morton <akpm@linux-foundation.org> M: Andrew Morton <akpm@linux-foundation.org>
R: Christoph Lameter <cl@gentwo.org> R: Christoph Lameter <cl@gentwo.org>
R: David Rientjes <rientjes@google.com> R: David Rientjes <rientjes@google.com>
@ -26217,7 +26216,7 @@ F: drivers/media/radio/radio-raremono.c
THERMAL THERMAL
M: Rafael J. Wysocki <rafael@kernel.org> M: Rafael J. Wysocki <rafael@kernel.org>
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@kernel.org>
R: Zhang Rui <rui.zhang@intel.com> R: Zhang Rui <rui.zhang@intel.com>
R: Lukasz Luba <lukasz.luba@arm.com> R: Lukasz Luba <lukasz.luba@arm.com>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
@ -26247,7 +26246,7 @@ F: drivers/thermal/amlogic_thermal.c
THERMAL/CPU_COOLING THERMAL/CPU_COOLING
M: Amit Daniel Kachhap <amit.kachhap@gmail.com> M: Amit Daniel Kachhap <amit.kachhap@gmail.com>
M: Daniel Lezcano <daniel.lezcano@linaro.org> M: Daniel Lezcano <daniel.lezcano@kernel.org>
M: Viresh Kumar <viresh.kumar@linaro.org> M: Viresh Kumar <viresh.kumar@linaro.org>
R: Lukasz Luba <lukasz.luba@arm.com> R: Lukasz Luba <lukasz.luba@arm.com>
L: linux-pm@vger.kernel.org L: linux-pm@vger.kernel.org
@ -29186,7 +29185,7 @@ K: zstd
ZSWAP COMPRESSED SWAP CACHING ZSWAP COMPRESSED SWAP CACHING
M: Johannes Weiner <hannes@cmpxchg.org> M: Johannes Weiner <hannes@cmpxchg.org>
M: Yosry Ahmed <yosry.ahmed@linux.dev> M: Yosry Ahmed <yosry@kernel.org>
M: Nhat Pham <nphamcs@gmail.com> M: Nhat Pham <nphamcs@gmail.com>
R: Chengming Zhou <chengming.zhou@linux.dev> R: Chengming Zhou <chengming.zhou@linux.dev>
L: linux-mm@kvack.org L: linux-mm@kvack.org

View file

@ -2,7 +2,7 @@
VERSION = 7 VERSION = 7
PATCHLEVEL = 0 PATCHLEVEL = 0
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Baby Opossum Posse NAME = Baby Opossum Posse
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -264,19 +264,33 @@ __iowrite64_copy(void __iomem *to, const void *from, size_t count)
typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size, typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size,
pgprot_t *prot); pgprot_t *prot);
int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook); int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
void __iomem *__ioremap_prot(phys_addr_t phys, size_t size, pgprot_t prot);
static inline void __iomem *ioremap_prot(phys_addr_t phys, size_t size,
pgprot_t user_prot)
{
pgprot_t prot;
ptdesc_t user_prot_val = pgprot_val(user_prot);
if (WARN_ON_ONCE(!(user_prot_val & PTE_USER)))
return NULL;
prot = __pgprot_modify(PAGE_KERNEL, PTE_ATTRINDX_MASK,
user_prot_val & PTE_ATTRINDX_MASK);
return __ioremap_prot(phys, size, prot);
}
#define ioremap_prot ioremap_prot #define ioremap_prot ioremap_prot
#define _PAGE_IOREMAP PROT_DEVICE_nGnRE #define ioremap(addr, size) \
__ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) \ #define ioremap_wc(addr, size) \
ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC)) __ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
#define ioremap_np(addr, size) \ #define ioremap_np(addr, size) \
ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
#define ioremap_encrypted(addr, size) \ #define ioremap_encrypted(addr, size) \
ioremap_prot((addr), (size), PAGE_KERNEL) __ioremap_prot((addr), (size), PAGE_KERNEL)
/* /*
* io{read,write}{16,32,64}be() macros * io{read,write}{16,32,64}be() macros
@ -297,7 +311,7 @@ static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
if (pfn_is_map_memory(__phys_to_pfn(addr))) if (pfn_is_map_memory(__phys_to_pfn(addr)))
return (void __iomem *)__phys_to_virt(addr); return (void __iomem *)__phys_to_virt(addr);
return ioremap_prot(addr, size, __pgprot(PROT_NORMAL)); return __ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
} }
/* /*

View file

@ -1616,7 +1616,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP)) (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP))
#define kvm_has_s1poe(k) \ #define kvm_has_s1poe(k) \
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP)) (system_supports_poe() && \
kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
#define kvm_has_ras(k) \ #define kvm_has_ras(k) \
(kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP)) (kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))

View file

@ -397,6 +397,8 @@ int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu);
int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu); int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu);
void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val); void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime);
#define vncr_fixmap(c) \ #define vncr_fixmap(c) \
({ \ ({ \
u32 __c = (c); \ u32 __c = (c); \

View file

@ -164,9 +164,6 @@ static inline bool __pure lpa2_is_enabled(void)
#define _PAGE_GCS (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_WRITE | PTE_USER) #define _PAGE_GCS (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_WRITE | PTE_USER)
#define _PAGE_GCS_RO (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_USER) #define _PAGE_GCS_RO (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_USER)
#define PAGE_GCS __pgprot(_PAGE_GCS)
#define PAGE_GCS_RO __pgprot(_PAGE_GCS_RO)
#define PIE_E0 ( \ #define PIE_E0 ( \
PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS), PIE_GCS) | \ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS), PIE_GCS) | \
PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS_RO), PIE_R) | \ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS_RO), PIE_R) | \

View file

@ -31,19 +31,11 @@
*/ */
#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \ #define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
"tlbi " #op "\n" \ "tlbi " #op "\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op, \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : ) : : )
#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \ #define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
"tlbi " #op ", %0\n" \ "tlbi " #op ", %x0\n" \
ALTERNATIVE("nop\n nop", \ : : "rZ" (arg))
"dsb ish\n tlbi " #op ", %0", \
ARM64_WORKAROUND_REPEAT_TLBI, \
CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : "r" (arg))
#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
@ -181,6 +173,34 @@ static inline unsigned long get_trans_granule(void)
(__pages >> (5 * (scale) + 1)) - 1; \ (__pages >> (5 * (scale) + 1)) - 1; \
}) })
#define __repeat_tlbi_sync(op, arg...) \
do { \
if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI)) \
break; \
__tlbi(op, ##arg); \
dsb(ish); \
} while (0)
/*
* Complete broadcast TLB maintenance issued by the host which invalidates
* stage 1 information in the host's own translation regime.
*/
static inline void __tlbi_sync_s1ish(void)
{
dsb(ish);
__repeat_tlbi_sync(vale1is, 0);
}
/*
* Complete broadcast TLB maintenance issued by hyp code which invalidates
* stage 1 translation information in any translation regime.
*/
static inline void __tlbi_sync_s1ish_hyp(void)
{
dsb(ish);
__repeat_tlbi_sync(vale2is, 0);
}
/* /*
* TLB Invalidation * TLB Invalidation
* ================ * ================
@ -279,7 +299,7 @@ static inline void flush_tlb_all(void)
{ {
dsb(ishst); dsb(ishst);
__tlbi(vmalle1is); __tlbi(vmalle1is);
dsb(ish); __tlbi_sync_s1ish();
isb(); isb();
} }
@ -291,7 +311,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
asid = __TLBI_VADDR(0, ASID(mm)); asid = __TLBI_VADDR(0, ASID(mm));
__tlbi(aside1is, asid); __tlbi(aside1is, asid);
__tlbi_user(aside1is, asid); __tlbi_user(aside1is, asid);
dsb(ish); __tlbi_sync_s1ish();
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
} }
@ -345,20 +365,11 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long uaddr) unsigned long uaddr)
{ {
flush_tlb_page_nosync(vma, uaddr); flush_tlb_page_nosync(vma, uaddr);
dsb(ish); __tlbi_sync_s1ish();
} }
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{ {
/*
* TLB flush deferral is not required on systems which are affected by
* ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
* will have two consecutive TLBI instructions with a dsb(ish) in between
* defeating the purpose (i.e save overall 'dsb ish' cost).
*/
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
return false;
return true; return true;
} }
@ -374,7 +385,7 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
*/ */
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{ {
dsb(ish); __tlbi_sync_s1ish();
} }
/* /*
@ -509,7 +520,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
{ {
__flush_tlb_range_nosync(vma->vm_mm, start, end, stride, __flush_tlb_range_nosync(vma->vm_mm, start, end, stride,
last_level, tlb_level); last_level, tlb_level);
dsb(ish); __tlbi_sync_s1ish();
} }
static inline void local_flush_tlb_contpte(struct vm_area_struct *vma, static inline void local_flush_tlb_contpte(struct vm_area_struct *vma,
@ -557,7 +568,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
dsb(ishst); dsb(ishst);
__flush_tlb_range_op(vaale1is, start, pages, stride, 0, __flush_tlb_range_op(vaale1is, start, pages, stride, 0,
TLBI_TTL_UNKNOWN, false, lpa2_is_enabled()); TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
dsb(ish); __tlbi_sync_s1ish();
isb(); isb();
} }
@ -571,7 +582,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ishst); dsb(ishst);
__tlbi(vaae1is, addr); __tlbi(vaae1is, addr);
dsb(ish); __tlbi_sync_s1ish();
isb(); isb();
} }

View file

@ -377,7 +377,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
prot = __acpi_get_writethrough_mem_attribute(); prot = __acpi_get_writethrough_mem_attribute();
} }
} }
return ioremap_prot(phys, size, prot); return __ioremap_prot(phys, size, prot);
} }
/* /*

View file

@ -37,7 +37,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
* We pick the reserved-ASID to minimise the impact. * We pick the reserved-ASID to minimise the impact.
*/ */
__tlbi(aside1is, __TLBI_VADDR(0, 0)); __tlbi(aside1is, __TLBI_VADDR(0, 0));
dsb(ish); __tlbi_sync_s1ish();
} }
ret = caches_clean_inval_user_pou(start, start + chunk); ret = caches_clean_inval_user_pou(start, start + chunk);

View file

@ -400,16 +400,25 @@ static inline
int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val) int counters_read_on_cpu(int cpu, smp_call_func_t func, u64 *val)
{ {
/* /*
* Abort call on counterless CPU or when interrupts are * Abort call on counterless CPU.
* disabled - can lead to deadlock in smp sync call.
*/ */
if (!cpu_has_amu_feat(cpu)) if (!cpu_has_amu_feat(cpu))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (WARN_ON_ONCE(irqs_disabled())) if (irqs_disabled()) {
return -EPERM; /*
* When IRQs are disabled (tick path: sched_tick ->
smp_call_function_single(cpu, func, val, 1); * topology_scale_freq_tick or cppc_scale_freq_tick), only local
* CPU counter reads are allowed. Remote CPU counter read would
* require smp_call_function_single() which is unsafe with IRQs
* disabled.
*/
if (WARN_ON_ONCE(cpu != smp_processor_id()))
return -EPERM;
func(val);
} else {
smp_call_function_single(cpu, func, val, 1);
}
return 0; return 0;
} }

View file

@ -21,7 +21,6 @@ menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support" bool "Kernel-based Virtual Machine (KVM) support"
select KVM_COMMON select KVM_COMMON
select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO select KVM_MMIO
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT

View file

@ -358,7 +358,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break; break;
case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD:
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_ONE_REG: case KVM_CAP_ONE_REG:
case KVM_CAP_ARM_PSCI: case KVM_CAP_ARM_PSCI:

View file

@ -540,31 +540,8 @@ static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0); wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0);
wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG); wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG);
if (wr->nG) { if (wr->nG)
u64 asid_ttbr, tcr; wr->asid = get_asid_by_regime(vcpu, wi->regime);
switch (wi->regime) {
case TR_EL10:
tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
asid_ttbr = ((tcr & TCR_A1) ?
vcpu_read_sys_reg(vcpu, TTBR1_EL1) :
vcpu_read_sys_reg(vcpu, TTBR0_EL1));
break;
case TR_EL20:
tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
asid_ttbr = ((tcr & TCR_A1) ?
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
break;
default:
BUG();
}
wr->asid = FIELD_GET(TTBR_ASID_MASK, asid_ttbr);
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
!(tcr & TCR_ASID16))
wr->asid &= GENMASK(7, 0);
}
return 0; return 0;

View file

@ -271,7 +271,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
*/ */
dsb(ishst); dsb(ishst);
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level); __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
} }

View file

@ -342,6 +342,7 @@ static void pkvm_init_features_from_host(struct pkvm_hyp_vm *hyp_vm, const struc
/* No restrictions for non-protected VMs. */ /* No restrictions for non-protected VMs. */
if (!kvm_vm_is_protected(kvm)) { if (!kvm_vm_is_protected(kvm)) {
hyp_vm->kvm.arch.flags = host_arch_flags; hyp_vm->kvm.arch.flags = host_arch_flags;
hyp_vm->kvm.arch.flags &= ~BIT_ULL(KVM_ARCH_FLAG_ID_REGS_INITIALIZED);
bitmap_copy(kvm->arch.vcpu_features, bitmap_copy(kvm->arch.vcpu_features,
host_kvm->arch.vcpu_features, host_kvm->arch.vcpu_features,
@ -391,7 +392,7 @@ static void unpin_host_sve_state(struct pkvm_hyp_vcpu *hyp_vcpu)
if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE)) if (!vcpu_has_feature(&hyp_vcpu->vcpu, KVM_ARM_VCPU_SVE))
return; return;
sve_state = kern_hyp_va(hyp_vcpu->vcpu.arch.sve_state); sve_state = hyp_vcpu->vcpu.arch.sve_state;
hyp_unpin_shared_mem(sve_state, hyp_unpin_shared_mem(sve_state,
sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu)); sve_state + vcpu_sve_state_size(&hyp_vcpu->vcpu));
} }
@ -471,6 +472,35 @@ err:
return ret; return ret;
} }
static int vm_copy_id_regs(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
const struct kvm *host_kvm = hyp_vm->host_kvm;
struct kvm *kvm = &hyp_vm->kvm;
if (!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &host_kvm->arch.flags))
return -EINVAL;
if (test_and_set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
return 0;
memcpy(kvm->arch.id_regs, host_kvm->arch.id_regs, sizeof(kvm->arch.id_regs));
return 0;
}
static int pkvm_vcpu_init_sysregs(struct pkvm_hyp_vcpu *hyp_vcpu)
{
int ret = 0;
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
else
ret = vm_copy_id_regs(hyp_vcpu);
return ret;
}
static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
struct pkvm_hyp_vm *hyp_vm, struct pkvm_hyp_vm *hyp_vm,
struct kvm_vcpu *host_vcpu) struct kvm_vcpu *host_vcpu)
@ -490,8 +520,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags); hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED; hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) ret = pkvm_vcpu_init_sysregs(hyp_vcpu);
kvm_init_pvm_id_regs(&hyp_vcpu->vcpu); if (ret)
goto done;
ret = pkvm_vcpu_init_traps(hyp_vcpu); ret = pkvm_vcpu_init_traps(hyp_vcpu);
if (ret) if (ret)

View file

@ -169,7 +169,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
*/ */
dsb(ish); dsb(ish);
__tlbi(vmalle1is); __tlbi(vmalle1is);
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
exit_vmid_context(&cxt); exit_vmid_context(&cxt);
@ -226,7 +226,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
dsb(ish); dsb(ish);
__tlbi(vmalle1is); __tlbi(vmalle1is);
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
exit_vmid_context(&cxt); exit_vmid_context(&cxt);
@ -240,7 +240,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
enter_vmid_context(mmu, &cxt, false); enter_vmid_context(mmu, &cxt, false);
__tlbi(vmalls12e1is); __tlbi(vmalls12e1is);
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
exit_vmid_context(&cxt); exit_vmid_context(&cxt);
@ -266,5 +266,5 @@ void __kvm_flush_vm_context(void)
/* Same remark as in enter_vmid_context() */ /* Same remark as in enter_vmid_context() */
dsb(ish); dsb(ish);
__tlbi(alle1is); __tlbi(alle1is);
dsb(ish); __tlbi_sync_s1ish_hyp();
} }

View file

@ -501,7 +501,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
*unmapped += granule; *unmapped += granule;
} }
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
mm_ops->put_page(ctx->ptep); mm_ops->put_page(ctx->ptep);

View file

@ -115,7 +115,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
*/ */
dsb(ish); dsb(ish);
__tlbi(vmalle1is); __tlbi(vmalle1is);
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
exit_vmid_context(&cxt); exit_vmid_context(&cxt);
@ -176,7 +176,7 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
dsb(ish); dsb(ish);
__tlbi(vmalle1is); __tlbi(vmalle1is);
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
exit_vmid_context(&cxt); exit_vmid_context(&cxt);
@ -192,7 +192,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
enter_vmid_context(mmu, &cxt); enter_vmid_context(mmu, &cxt);
__tlbi(vmalls12e1is); __tlbi(vmalls12e1is);
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
exit_vmid_context(&cxt); exit_vmid_context(&cxt);
@ -217,7 +217,7 @@ void __kvm_flush_vm_context(void)
{ {
dsb(ishst); dsb(ishst);
__tlbi(alle1is); __tlbi(alle1is);
dsb(ish); __tlbi_sync_s1ish_hyp();
} }
/* /*
@ -358,7 +358,7 @@ int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding)
default: default:
ret = -EINVAL; ret = -EINVAL;
} }
dsb(ish); __tlbi_sync_s1ish_hyp();
isb(); isb();
if (mmu) if (mmu)

View file

@ -1754,14 +1754,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
} }
/* /*
* Both the canonical IPA and fault IPA must be hugepage-aligned to * Both the canonical IPA and fault IPA must be aligned to the
* ensure we find the right PFN and lay down the mapping in the right * mapping size to ensure we find the right PFN and lay down the
* place. * mapping in the right place.
*/ */
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) { fault_ipa = ALIGN_DOWN(fault_ipa, vma_pagesize);
fault_ipa &= ~(vma_pagesize - 1); ipa = ALIGN_DOWN(ipa, vma_pagesize);
ipa &= ~(vma_pagesize - 1);
}
gfn = ipa >> PAGE_SHIFT; gfn = ipa >> PAGE_SHIFT;
mte_allowed = kvm_vma_mte_allowed(vma); mte_allowed = kvm_vma_mte_allowed(vma);

View file

@ -854,6 +854,33 @@ int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
return kvm_inject_nested_sync(vcpu, esr_el2); return kvm_inject_nested_sync(vcpu, esr_el2);
} }
u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime)
{
enum vcpu_sysreg ttbr_elx;
u64 tcr;
u16 asid;
switch (regime) {
case TR_EL10:
tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL1 : TTBR0_EL1;
break;
case TR_EL20:
tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL2 : TTBR0_EL2;
break;
default:
BUG();
}
asid = FIELD_GET(TTBRx_EL1_ASID, vcpu_read_sys_reg(vcpu, ttbr_elx));
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
!(tcr & TCR_ASID16))
asid &= GENMASK(7, 0);
return asid;
}
static void invalidate_vncr(struct vncr_tlb *vt) static void invalidate_vncr(struct vncr_tlb *vt)
{ {
vt->valid = false; vt->valid = false;
@ -1154,9 +1181,6 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
{ {
int i; int i;
if (!kvm->arch.nested_mmus_size)
return;
for (i = 0; i < kvm->arch.nested_mmus_size; i++) { for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i]; struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
@ -1336,20 +1360,8 @@ static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
if (read_vncr_el2(vcpu) != vt->gva) if (read_vncr_el2(vcpu) != vt->gva)
return false; return false;
if (vt->wr.nG) { if (vt->wr.nG)
u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); return get_asid_by_regime(vcpu, TR_EL20) == vt->wr.asid;
u64 ttbr = ((tcr & TCR_A1) ?
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
u16 asid;
asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
!(tcr & TCR_ASID16))
asid &= GENMASK(7, 0);
return asid == vt->wr.asid;
}
return true; return true;
} }
@ -1452,21 +1464,8 @@ static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
if (read_vncr_el2(vcpu) != vt->gva) if (read_vncr_el2(vcpu) != vt->gva)
return; return;
if (vt->wr.nG) { if (vt->wr.nG && get_asid_by_regime(vcpu, TR_EL20) != vt->wr.asid)
u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); return;
u64 ttbr = ((tcr & TCR_A1) ?
vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
vcpu_read_sys_reg(vcpu, TTBR0_EL2));
u16 asid;
asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
!(tcr & TCR_ASID16))
asid &= GENMASK(7, 0);
if (asid != vt->wr.asid)
return;
}
vt->cpu = smp_processor_id(); vt->cpu = smp_processor_id();

View file

@ -1816,6 +1816,9 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
ID_AA64MMFR3_EL1_SCTLRX | ID_AA64MMFR3_EL1_SCTLRX |
ID_AA64MMFR3_EL1_S1POE | ID_AA64MMFR3_EL1_S1POE |
ID_AA64MMFR3_EL1_S1PIE; ID_AA64MMFR3_EL1_S1PIE;
if (!system_supports_poe())
val &= ~ID_AA64MMFR3_EL1_S1POE;
break; break;
case SYS_ID_MMFR4_EL1: case SYS_ID_MMFR4_EL1:
val &= ~ID_MMFR4_EL1_CCIDX; val &= ~ID_MMFR4_EL1_CCIDX;

View file

@ -32,7 +32,11 @@ static inline unsigned long xloops_to_cycles(unsigned long xloops)
* Note that userspace cannot change the offset behind our back either, * Note that userspace cannot change the offset behind our back either,
* as the vcpu mutex is held as long as KVM_RUN is in progress. * as the vcpu mutex is held as long as KVM_RUN is in progress.
*/ */
#define __delay_cycles() __arch_counter_get_cntvct_stable() static cycles_t notrace __delay_cycles(void)
{
guard(preempt_notrace)();
return __arch_counter_get_cntvct_stable();
}
void __delay(unsigned long cycles) void __delay(unsigned long cycles)
{ {

View file

@ -14,8 +14,8 @@ int arm64_ioremap_prot_hook_register(ioremap_prot_hook_t hook)
return 0; return 0;
} }
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, void __iomem *__ioremap_prot(phys_addr_t phys_addr, size_t size,
pgprot_t pgprot) pgprot_t pgprot)
{ {
unsigned long last_addr = phys_addr + size - 1; unsigned long last_addr = phys_addr + size - 1;
@ -39,7 +39,7 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
return generic_ioremap_prot(phys_addr, size, pgprot); return generic_ioremap_prot(phys_addr, size, pgprot);
} }
EXPORT_SYMBOL(ioremap_prot); EXPORT_SYMBOL(__ioremap_prot);
/* /*
* Must be called after early_fixmap_init * Must be called after early_fixmap_init

View file

@ -34,6 +34,8 @@ static pgprot_t protection_map[16] __ro_after_init = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
}; };
static ptdesc_t gcs_page_prot __ro_after_init = _PAGE_GCS_RO;
/* /*
* You really shouldn't be using read() or write() on /dev/mem. This might go * You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future. * away in the future.
@ -73,9 +75,11 @@ static int __init adjust_protection_map(void)
protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
} }
if (lpa2_is_enabled()) if (lpa2_is_enabled()) {
for (int i = 0; i < ARRAY_SIZE(protection_map); i++) for (int i = 0; i < ARRAY_SIZE(protection_map); i++)
pgprot_val(protection_map[i]) &= ~PTE_SHARED; pgprot_val(protection_map[i]) &= ~PTE_SHARED;
gcs_page_prot &= ~PTE_SHARED;
}
return 0; return 0;
} }
@ -87,7 +91,11 @@ pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
/* Short circuit GCS to avoid bloating the table. */ /* Short circuit GCS to avoid bloating the table. */
if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) { if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) {
prot = _PAGE_GCS_RO; /* Honour mprotect(PROT_NONE) on shadow stack mappings */
if (vm_flags & VM_ACCESS_FLAGS)
prot = gcs_page_prot;
else
prot = pgprot_val(protection_map[VM_NONE]);
} else { } else {
prot = pgprot_val(protection_map[vm_flags & prot = pgprot_val(protection_map[vm_flags &
(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]); (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);

View file

@ -2119,7 +2119,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align); extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
image_size = extable_offset + extable_size; image_size = extable_offset + extable_size;
ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr, ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
sizeof(u32), &header, &image_ptr, sizeof(u64), &header, &image_ptr,
jit_fill_hole); jit_fill_hole);
if (!ro_header) { if (!ro_header) {
prog = orig_prog; prog = orig_prog;

View file

@ -28,7 +28,6 @@ config KVM
select KVM_COMMON select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER
select KVM_MMIO select KVM_MMIO
select VIRT_XFER_TO_GUEST_WORK select VIRT_XFER_TO_GUEST_WORK
select SCHED_INFO select SCHED_INFO

View file

@ -118,7 +118,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ONE_REG: case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP: case KVM_CAP_ENABLE_CAP:
case KVM_CAP_READONLY_MEM: case KVM_CAP_READONLY_MEM:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD:
case KVM_CAP_MP_STATE: case KVM_CAP_MP_STATE:

View file

@ -23,7 +23,6 @@ config KVM
select KVM_COMMON select KVM_COMMON
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_MMIO select KVM_MMIO
select KVM_GENERIC_MMU_NOTIFIER
select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_HARDWARE_ENABLING
select HAVE_KVM_READONLY_MEM select HAVE_KVM_READONLY_MEM
help help

View file

@ -1035,7 +1035,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ONE_REG: case KVM_CAP_ONE_REG:
case KVM_CAP_ENABLE_CAP: case KVM_CAP_ENABLE_CAP:
case KVM_CAP_READONLY_MEM: case KVM_CAP_READONLY_MEM:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_IMMEDIATE_EXIT:
r = 1; r = 1;
break; break;

View file

@ -38,7 +38,6 @@ config KVM_BOOK3S_64_HANDLER
config KVM_BOOK3S_PR_POSSIBLE config KVM_BOOK3S_PR_POSSIBLE
bool bool
select KVM_MMIO select KVM_MMIO
select KVM_GENERIC_MMU_NOTIFIER
config KVM_BOOK3S_HV_POSSIBLE config KVM_BOOK3S_HV_POSSIBLE
bool bool
@ -81,7 +80,6 @@ config KVM_BOOK3S_64_HV
tristate "KVM for POWER7 and later using hypervisor mode in host" tristate "KVM for POWER7 and later using hypervisor mode in host"
depends on KVM_BOOK3S_64 && PPC_POWERNV depends on KVM_BOOK3S_64 && PPC_POWERNV
select KVM_BOOK3S_HV_POSSIBLE select KVM_BOOK3S_HV_POSSIBLE
select KVM_GENERIC_MMU_NOTIFIER
select KVM_BOOK3S_HV_PMU select KVM_BOOK3S_HV_PMU
select CMA select CMA
help help
@ -203,7 +201,6 @@ config KVM_E500V2
depends on !CONTEXT_TRACKING_USER depends on !CONTEXT_TRACKING_USER
select KVM select KVM
select KVM_MMIO select KVM_MMIO
select KVM_GENERIC_MMU_NOTIFIER
help help
Support running unmodified E500 guest kernels in virtual machines on Support running unmodified E500 guest kernels in virtual machines on
E500v2 host processors. E500v2 host processors.
@ -220,7 +217,6 @@ config KVM_E500MC
select KVM select KVM
select KVM_MMIO select KVM_MMIO
select KVM_BOOKE_HV select KVM_BOOKE_HV
select KVM_GENERIC_MMU_NOTIFIER
help help
Support running unmodified E500MC/E5500/E6500 guest kernels in Support running unmodified E500MC/E5500/E6500 guest kernels in
virtual machines on E500MC/E5500/E6500 host processors. virtual machines on E500MC/E5500/E6500 host processors.

View file

@ -623,12 +623,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
!kvmppc_hv_ops->enable_nested(NULL)); !kvmppc_hv_ops->enable_nested(NULL));
break; break;
#endif
case KVM_CAP_SYNC_MMU:
BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER));
r = 1;
break;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_HTAB_FD: case KVM_CAP_PPC_HTAB_FD:
r = hv_enabled; r = hv_enabled;
break; break;

View file

@ -30,7 +30,6 @@ config KVM
select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_HARDWARE_ENABLING
select KVM_MMIO select KVM_MMIO
select VIRT_XFER_TO_GUEST_WORK select VIRT_XFER_TO_GUEST_WORK
select KVM_GENERIC_MMU_NOTIFIER
select SCHED_INFO select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS select GUEST_PERF_EVENTS if PERF_EVENTS
help help

View file

@ -181,7 +181,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break; break;
case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD:
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_ONE_REG: case KVM_CAP_ONE_REG:
case KVM_CAP_READONLY_MEM: case KVM_CAP_READONLY_MEM:

View file

@ -19,9 +19,9 @@ struct s390_idle_data {
unsigned long mt_cycles_enter[8]; unsigned long mt_cycles_enter[8];
}; };
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
extern struct device_attribute dev_attr_idle_count; extern struct device_attribute dev_attr_idle_count;
extern struct device_attribute dev_attr_idle_time_us; extern struct device_attribute dev_attr_idle_time_us;
void psw_idle(struct s390_idle_data *data, unsigned long psw_mask);
#endif /* _S390_IDLE_H */ #endif /* _S390_IDLE_H */

View file

@ -2,6 +2,12 @@
#ifndef _S390_VTIME_H #ifndef _S390_VTIME_H
#define _S390_VTIME_H #define _S390_VTIME_H
#include <asm/lowcore.h>
#include <asm/cpu_mf.h>
#include <asm/idle.h>
DECLARE_PER_CPU(u64, mt_cycles[8]);
static inline void update_timer_sys(void) static inline void update_timer_sys(void)
{ {
struct lowcore *lc = get_lowcore(); struct lowcore *lc = get_lowcore();
@ -20,4 +26,32 @@ static inline void update_timer_mcck(void)
lc->last_update_timer = lc->mcck_enter_timer; lc->last_update_timer = lc->mcck_enter_timer;
} }
static inline void update_timer_idle(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
struct lowcore *lc = get_lowcore();
u64 cycles_new[8];
int i, mtid;
mtid = smp_cpu_mtid;
if (mtid) {
stcctm(MT_DIAG, mtid, cycles_new);
for (i = 0; i < mtid; i++)
__this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
}
/*
* This is a bit subtle: Forward last_update_clock so it excludes idle
* time. For correct steal time calculation in do_account_vtime() add
* passed wall time before idle_enter to steal_timer:
* During the passed wall time before idle_enter CPU time may have
* been accounted to system, hardirq, softirq, etc. lowcore fields.
* The accounted CPU times will be subtracted again from steal_timer
* when accumulated steal time is calculated in do_account_vtime().
*/
lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
lc->last_update_clock = lc->int_clock;
lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
lc->last_update_timer = lc->sys_enter_timer;
}
#endif /* _S390_VTIME_H */ #endif /* _S390_VTIME_H */

View file

@ -56,8 +56,6 @@ long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user *return_code, unsigned long flags); long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user *return_code, unsigned long flags);
DECLARE_PER_CPU(u64, mt_cycles[8]);
unsigned long stack_alloc(void); unsigned long stack_alloc(void);
void stack_free(unsigned long stack); void stack_free(unsigned long stack);

View file

@ -15,37 +15,22 @@
#include <trace/events/power.h> #include <trace/events/power.h>
#include <asm/cpu_mf.h> #include <asm/cpu_mf.h>
#include <asm/cputime.h> #include <asm/cputime.h>
#include <asm/idle.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/smp.h> #include <asm/smp.h>
#include "entry.h"
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void account_idle_time_irq(void) void account_idle_time_irq(void)
{ {
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
struct lowcore *lc = get_lowcore();
unsigned long idle_time; unsigned long idle_time;
u64 cycles_new[8];
int i;
if (smp_cpu_mtid) { idle_time = get_lowcore()->int_clock - idle->clock_idle_enter;
stcctm(MT_DIAG, smp_cpu_mtid, cycles_new);
for (i = 0; i < smp_cpu_mtid; i++)
this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
}
idle_time = lc->int_clock - idle->clock_idle_enter;
lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
lc->last_update_clock = lc->int_clock;
lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
lc->last_update_timer = lc->sys_enter_timer;
/* Account time spent with enabled wait psw loaded as idle time. */ /* Account time spent with enabled wait psw loaded as idle time. */
WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time); __atomic64_add(idle_time, &idle->idle_time);
WRITE_ONCE(idle->idle_count, READ_ONCE(idle->idle_count) + 1); __atomic64_add_const(1, &idle->idle_count);
account_idle_time(cputime_to_nsecs(idle_time)); account_idle_time(cputime_to_nsecs(idle_time));
} }

View file

@ -2377,7 +2377,7 @@ void __init setup_ipl(void)
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
} }
void s390_reset_system(void) void __no_stack_protector s390_reset_system(void)
{ {
/* Disable prefixing */ /* Disable prefixing */
set_prefix(0); set_prefix(0);

View file

@ -146,6 +146,12 @@ void noinstr do_io_irq(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
bool from_idle; bool from_idle;
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
if (from_idle) {
update_timer_idle();
regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
}
irq_enter_rcu(); irq_enter_rcu();
if (user_mode(regs)) { if (user_mode(regs)) {
@ -154,7 +160,6 @@ void noinstr do_io_irq(struct pt_regs *regs)
current->thread.last_break = regs->last_break; current->thread.last_break = regs->last_break;
} }
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
if (from_idle) if (from_idle)
account_idle_time_irq(); account_idle_time_irq();
@ -171,9 +176,6 @@ void noinstr do_io_irq(struct pt_regs *regs)
set_irq_regs(old_regs); set_irq_regs(old_regs);
irqentry_exit(regs, state); irqentry_exit(regs, state);
if (from_idle)
regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
} }
void noinstr do_ext_irq(struct pt_regs *regs) void noinstr do_ext_irq(struct pt_regs *regs)
@ -182,6 +184,12 @@ void noinstr do_ext_irq(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
bool from_idle; bool from_idle;
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
if (from_idle) {
update_timer_idle();
regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
}
irq_enter_rcu(); irq_enter_rcu();
if (user_mode(regs)) { if (user_mode(regs)) {
@ -194,7 +202,6 @@ void noinstr do_ext_irq(struct pt_regs *regs)
regs->int_parm = get_lowcore()->ext_params; regs->int_parm = get_lowcore()->ext_params;
regs->int_parm_long = get_lowcore()->ext_params2; regs->int_parm_long = get_lowcore()->ext_params2;
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
if (from_idle) if (from_idle)
account_idle_time_irq(); account_idle_time_irq();
@ -203,9 +210,6 @@ void noinstr do_ext_irq(struct pt_regs *regs)
irq_exit_rcu(); irq_exit_rcu();
set_irq_regs(old_regs); set_irq_regs(old_regs);
irqentry_exit(regs, state); irqentry_exit(regs, state);
if (from_idle)
regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
} }
static void show_msi_interrupt(struct seq_file *p, int irq) static void show_msi_interrupt(struct seq_file *p, int irq)

View file

@ -48,8 +48,7 @@ static inline void set_vtimer(u64 expires)
static inline int virt_timer_forward(u64 elapsed) static inline int virt_timer_forward(u64 elapsed)
{ {
BUG_ON(!irqs_disabled()); lockdep_assert_irqs_disabled();
if (list_empty(&virt_timer_list)) if (list_empty(&virt_timer_list))
return 0; return 0;
elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed); elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
@ -137,23 +136,16 @@ static int do_account_vtime(struct task_struct *tsk)
lc->system_timer += timer; lc->system_timer += timer;
/* Update MT utilization calculation */ /* Update MT utilization calculation */
if (smp_cpu_mtid && if (smp_cpu_mtid && time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies)))
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
update_mt_scaling(); update_mt_scaling();
/* Calculate cputime delta */ /* Calculate cputime delta */
user = update_tsk_timer(&tsk->thread.user_timer, user = update_tsk_timer(&tsk->thread.user_timer, lc->user_timer);
READ_ONCE(lc->user_timer)); guest = update_tsk_timer(&tsk->thread.guest_timer, lc->guest_timer);
guest = update_tsk_timer(&tsk->thread.guest_timer, system = update_tsk_timer(&tsk->thread.system_timer, lc->system_timer);
READ_ONCE(lc->guest_timer)); hardirq = update_tsk_timer(&tsk->thread.hardirq_timer, lc->hardirq_timer);
system = update_tsk_timer(&tsk->thread.system_timer, softirq = update_tsk_timer(&tsk->thread.softirq_timer, lc->softirq_timer);
READ_ONCE(lc->system_timer)); lc->steal_timer += clock - user - guest - system - hardirq - softirq;
hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
READ_ONCE(lc->hardirq_timer));
softirq = update_tsk_timer(&tsk->thread.softirq_timer,
READ_ONCE(lc->softirq_timer));
lc->steal_timer +=
clock - user - guest - system - hardirq - softirq;
/* Push account value */ /* Push account value */
if (user) { if (user) {
@ -225,10 +217,6 @@ static u64 vtime_delta(void)
return timer - lc->last_update_timer; return timer - lc->last_update_timer;
} }
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
void vtime_account_kernel(struct task_struct *tsk) void vtime_account_kernel(struct task_struct *tsk)
{ {
struct lowcore *lc = get_lowcore(); struct lowcore *lc = get_lowcore();
@ -238,27 +226,17 @@ void vtime_account_kernel(struct task_struct *tsk)
lc->guest_timer += delta; lc->guest_timer += delta;
else else
lc->system_timer += delta; lc->system_timer += delta;
virt_timer_forward(delta);
} }
EXPORT_SYMBOL_GPL(vtime_account_kernel); EXPORT_SYMBOL_GPL(vtime_account_kernel);
void vtime_account_softirq(struct task_struct *tsk) void vtime_account_softirq(struct task_struct *tsk)
{ {
u64 delta = vtime_delta(); get_lowcore()->softirq_timer += vtime_delta();
get_lowcore()->softirq_timer += delta;
virt_timer_forward(delta);
} }
void vtime_account_hardirq(struct task_struct *tsk) void vtime_account_hardirq(struct task_struct *tsk)
{ {
u64 delta = vtime_delta(); get_lowcore()->hardirq_timer += vtime_delta();
get_lowcore()->hardirq_timer += delta;
virt_timer_forward(delta);
} }
/* /*

View file

@ -28,9 +28,7 @@ config KVM
select HAVE_KVM_INVALID_WAKEUPS select HAVE_KVM_INVALID_WAKEUPS
select HAVE_KVM_NO_POLL select HAVE_KVM_NO_POLL
select KVM_VFIO select KVM_VFIO
select MMU_NOTIFIER
select VIRT_XFER_TO_GUEST_WORK select VIRT_XFER_TO_GUEST_WORK
select KVM_GENERIC_MMU_NOTIFIER
select KVM_MMU_LOCKLESS_AGING select KVM_MMU_LOCKLESS_AGING
help help
Support hosting paravirtualized guest machines using the SIE Support hosting paravirtualized guest machines using the SIE

View file

@ -601,7 +601,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
switch (ext) { switch (ext) {
case KVM_CAP_S390_PSW: case KVM_CAP_S390_PSW:
case KVM_CAP_S390_GMAP: case KVM_CAP_S390_GMAP:
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_S390_UCONTROL #ifdef CONFIG_KVM_S390_UCONTROL
case KVM_CAP_S390_UCONTROL: case KVM_CAP_S390_UCONTROL:
#endif #endif

View file

@ -62,7 +62,7 @@ int __pfault_init(void)
"0: nopr %%r7\n" "0: nopr %%r7\n"
EX_TABLE(0b, 0b) EX_TABLE(0b, 0b)
: [rc] "+d" (rc) : [rc] "+d" (rc)
: [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : [refbk] "a" (virt_to_phys(&pfault_init_refbk)), "m" (pfault_init_refbk)
: "cc"); : "cc");
return rc; return rc;
} }
@ -84,7 +84,7 @@ void __pfault_fini(void)
"0: nopr %%r7\n" "0: nopr %%r7\n"
EX_TABLE(0b, 0b) EX_TABLE(0b, 0b)
: :
: [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : [refbk] "a" (virt_to_phys(&pfault_fini_refbk)), "m" (pfault_fini_refbk)
: "cc"); : "cc");
} }

View file

@ -312,6 +312,8 @@ static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys,
if (direction != DMA_TO_DEVICE) if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE; iopte_protection |= IOPTE_WRITE;
phys &= IO_PAGE_MASK;
for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE) for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE)
iopte_val(*base) = iopte_protection | phys; iopte_val(*base) = iopte_protection | phys;

View file

@ -410,6 +410,8 @@ static dma_addr_t dma_4v_map_phys(struct device *dev, phys_addr_t phys,
iommu_batch_start(dev, prot, entry); iommu_batch_start(dev, prot, entry);
phys &= IO_PAGE_MASK;
for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) { for (i = 0; i < npages; i++, phys += IO_PAGE_SIZE) {
long err = iommu_batch_add(phys, mask); long err = iommu_batch_add(phys, mask);
if (unlikely(err < 0L)) if (unlikely(err < 0L))

View file

@ -69,11 +69,11 @@ struct io_thread_req {
}; };
static struct io_thread_req * (*irq_req_buffer)[]; static struct io_thread_req **irq_req_buffer;
static struct io_thread_req *irq_remainder; static struct io_thread_req *irq_remainder;
static int irq_remainder_size; static int irq_remainder_size;
static struct io_thread_req * (*io_req_buffer)[]; static struct io_thread_req **io_req_buffer;
static struct io_thread_req *io_remainder; static struct io_thread_req *io_remainder;
static int io_remainder_size; static int io_remainder_size;
@ -398,7 +398,7 @@ static int thread_fd = -1;
static int bulk_req_safe_read( static int bulk_req_safe_read(
int fd, int fd,
struct io_thread_req * (*request_buffer)[], struct io_thread_req **request_buffer,
struct io_thread_req **remainder, struct io_thread_req **remainder,
int *remainder_size, int *remainder_size,
int max_recs int max_recs
@ -465,7 +465,7 @@ static irqreturn_t ubd_intr(int irq, void *dev)
&irq_remainder, &irq_remainder_size, &irq_remainder, &irq_remainder_size,
UBD_REQ_BUFFER_SIZE)) >= 0) { UBD_REQ_BUFFER_SIZE)) >= 0) {
for (i = 0; i < len / sizeof(struct io_thread_req *); i++) for (i = 0; i < len / sizeof(struct io_thread_req *); i++)
ubd_end_request((*irq_req_buffer)[i]); ubd_end_request(irq_req_buffer[i]);
} }
if (len < 0 && len != -EAGAIN) if (len < 0 && len != -EAGAIN)
@ -1512,7 +1512,7 @@ void *io_thread(void *arg)
} }
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) { for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
struct io_thread_req *req = (*io_req_buffer)[count]; struct io_thread_req *req = io_req_buffer[count];
int i; int i;
io_count++; io_count++;

View file

@ -160,8 +160,6 @@ void __init fred_complete_exception_setup(void)
static noinstr void fred_extint(struct pt_regs *regs) static noinstr void fred_extint(struct pt_regs *regs)
{ {
unsigned int vector = regs->fred_ss.vector; unsigned int vector = regs->fred_ss.vector;
unsigned int index = array_index_nospec(vector - FIRST_SYSTEM_VECTOR,
NR_SYSTEM_VECTORS);
if (WARN_ON_ONCE(vector < FIRST_EXTERNAL_VECTOR)) if (WARN_ON_ONCE(vector < FIRST_EXTERNAL_VECTOR))
return; return;
@ -170,7 +168,8 @@ static noinstr void fred_extint(struct pt_regs *regs)
irqentry_state_t state = irqentry_enter(regs); irqentry_state_t state = irqentry_enter(regs);
instrumentation_begin(); instrumentation_begin();
sysvec_table[index](regs); sysvec_table[array_index_nospec(vector - FIRST_SYSTEM_VECTOR,
NR_SYSTEM_VECTORS)](regs);
instrumentation_end(); instrumentation_end();
irqentry_exit(regs, state); irqentry_exit(regs, state);
} else { } else {

View file

@ -6497,6 +6497,32 @@ static struct intel_uncore_type gnr_uncore_ubox = {
.attr_update = uncore_alias_groups, .attr_update = uncore_alias_groups,
}; };
static struct uncore_event_desc gnr_uncore_imc_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x01,umask=0x00"),
INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0, "event=0x05,umask=0xcf"),
INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(cas_count_read_sch0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1, "event=0x06,umask=0xcf"),
INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(cas_count_read_sch1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0, "event=0x05,umask=0xf0"),
INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(cas_count_write_sch0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1, "event=0x06,umask=0xf0"),
INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.scale, "6.103515625e-5"),
INTEL_UNCORE_EVENT_DESC(cas_count_write_sch1.unit, "MiB"),
{ /* end: all zeroes */ },
};
static struct intel_uncore_type gnr_uncore_imc = {
SPR_UNCORE_MMIO_COMMON_FORMAT(),
.name = "imc",
.fixed_ctr_bits = 48,
.fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
.fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
.event_descs = gnr_uncore_imc_events,
};
static struct intel_uncore_type gnr_uncore_pciex8 = { static struct intel_uncore_type gnr_uncore_pciex8 = {
SPR_UNCORE_PCI_COMMON_FORMAT(), SPR_UNCORE_PCI_COMMON_FORMAT(),
.name = "pciex8", .name = "pciex8",
@ -6544,7 +6570,7 @@ static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = {
NULL, NULL,
&spr_uncore_pcu, &spr_uncore_pcu,
&gnr_uncore_ubox, &gnr_uncore_ubox,
&spr_uncore_imc, &gnr_uncore_imc,
NULL, NULL,
&gnr_uncore_upi, &gnr_uncore_upi,
NULL, NULL,

View file

@ -7,7 +7,7 @@
#include <linux/objtool.h> #include <linux/objtool.h>
#include <asm/asm.h> #include <asm/asm.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLER__
struct bug_entry; struct bug_entry;
extern void __WARN_trap(struct bug_entry *bug, ...); extern void __WARN_trap(struct bug_entry *bug, ...);
#endif #endif
@ -137,7 +137,7 @@ do { \
#ifdef HAVE_ARCH_BUG_FORMAT_ARGS #ifdef HAVE_ARCH_BUG_FORMAT_ARGS
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLER__
#include <linux/static_call_types.h> #include <linux/static_call_types.h>
DECLARE_STATIC_CALL(WARN_trap, __WARN_trap); DECLARE_STATIC_CALL(WARN_trap, __WARN_trap);
@ -153,7 +153,7 @@ struct arch_va_list {
struct sysv_va_list args; struct sysv_va_list args;
}; };
extern void *__warn_args(struct arch_va_list *args, struct pt_regs *regs); extern void *__warn_args(struct arch_va_list *args, struct pt_regs *regs);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLER__ */
#define __WARN_bug_entry(flags, format) ({ \ #define __WARN_bug_entry(flags, format) ({ \
struct bug_entry *bug; \ struct bug_entry *bug; \

View file

@ -111,6 +111,12 @@ extern bhi_thunk __bhi_args_end[];
struct pt_regs; struct pt_regs;
#ifdef CONFIG_CALL_PADDING
#define CFI_OFFSET (CONFIG_FUNCTION_PADDING_CFI+5)
#else
#define CFI_OFFSET 5
#endif
#ifdef CONFIG_CFI #ifdef CONFIG_CFI
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
#define __bpfcall #define __bpfcall
@ -119,11 +125,9 @@ static inline int cfi_get_offset(void)
{ {
switch (cfi_mode) { switch (cfi_mode) {
case CFI_FINEIBT: case CFI_FINEIBT:
return 16; return /* fineibt_prefix_size */ 16;
case CFI_KCFI: case CFI_KCFI:
if (IS_ENABLED(CONFIG_CALL_PADDING)) return CFI_OFFSET;
return 16;
return 5;
default: default:
return 0; return 0;
} }

View file

@ -77,7 +77,7 @@ static __always_inline void native_local_irq_restore(unsigned long flags)
#endif #endif
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLER__
/* /*
* Used in the idle loop; sti takes one instruction cycle * Used in the idle loop; sti takes one instruction cycle
* to complete: * to complete:
@ -95,7 +95,7 @@ static __always_inline void halt(void)
{ {
native_halt(); native_halt();
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLER__ */
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
#ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_PARAVIRT_XXL

View file

@ -68,7 +68,7 @@
* Depending on -fpatchable-function-entry=N,N usage (CONFIG_CALL_PADDING) the * Depending on -fpatchable-function-entry=N,N usage (CONFIG_CALL_PADDING) the
* CFI symbol layout changes. * CFI symbol layout changes.
* *
* Without CALL_THUNKS: * Without CALL_PADDING:
* *
* .align FUNCTION_ALIGNMENT * .align FUNCTION_ALIGNMENT
* __cfi_##name: * __cfi_##name:
@ -77,7 +77,7 @@
* .long __kcfi_typeid_##name * .long __kcfi_typeid_##name
* name: * name:
* *
* With CALL_THUNKS: * With CALL_PADDING:
* *
* .align FUNCTION_ALIGNMENT * .align FUNCTION_ALIGNMENT
* __cfi_##name: * __cfi_##name:

View file

@ -20,7 +20,7 @@
#define PER_CPU_VAR(var) __percpu(var)__percpu_rel #define PER_CPU_VAR(var) __percpu(var)__percpu_rel
#else /* !__ASSEMBLY__: */ #else /* !__ASSEMBLER__: */
#include <linux/args.h> #include <linux/args.h>
#include <linux/bits.h> #include <linux/bits.h>

View file

@ -6,7 +6,7 @@
#error "Cannot use runtime-const infrastructure from modules" #error "Cannot use runtime-const infrastructure from modules"
#endif #endif
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLER__
.macro RUNTIME_CONST_PTR sym reg .macro RUNTIME_CONST_PTR sym reg
movq $0x0123456789abcdef, %\reg movq $0x0123456789abcdef, %\reg
@ -16,7 +16,7 @@
.popsection .popsection
.endm .endm
#else /* __ASSEMBLY__ */ #else /* __ASSEMBLER__ */
#define runtime_const_ptr(sym) ({ \ #define runtime_const_ptr(sym) ({ \
typeof(sym) __ret; \ typeof(sym) __ret; \
@ -74,5 +74,5 @@ static inline void runtime_const_fixup(void (*fn)(void *, unsigned long),
} }
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLER__ */
#endif #endif

View file

@ -25,6 +25,8 @@ extern int ibt_selftest_noendbr(void);
void handle_invalid_op(struct pt_regs *regs); void handle_invalid_op(struct pt_regs *regs);
#endif #endif
noinstr bool handle_bug(struct pt_regs *regs);
static inline int get_si_code(unsigned long condition) static inline int get_si_code(unsigned long condition)
{ {
if (condition & DR_STEP) if (condition & DR_STEP)

View file

@ -1182,7 +1182,7 @@ void __init_or_module noinline apply_seal_endbr(s32 *start, s32 *end)
poison_endbr(addr); poison_endbr(addr);
if (IS_ENABLED(CONFIG_FINEIBT)) if (IS_ENABLED(CONFIG_FINEIBT))
poison_cfi(addr - 16); poison_cfi(addr - CFI_OFFSET);
} }
} }
@ -1389,6 +1389,8 @@ extern u8 fineibt_preamble_end[];
#define fineibt_preamble_ud 0x13 #define fineibt_preamble_ud 0x13
#define fineibt_preamble_hash 5 #define fineibt_preamble_hash 5
#define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE)
/* /*
* <fineibt_caller_start>: * <fineibt_caller_start>:
* 0: b8 78 56 34 12 mov $0x12345678, %eax * 0: b8 78 56 34 12 mov $0x12345678, %eax
@ -1634,7 +1636,7 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
* have determined there are no indirect calls to it and we * have determined there are no indirect calls to it and we
* don't need no CFI either. * don't need no CFI either.
*/ */
if (!is_endbr(addr + 16)) if (!is_endbr(addr + CFI_OFFSET))
continue; continue;
hash = decode_preamble_hash(addr, &arity); hash = decode_preamble_hash(addr, &arity);
@ -1642,6 +1644,15 @@ static int cfi_rewrite_preamble(s32 *start, s32 *end)
addr, addr, 5, addr)) addr, addr, 5, addr))
return -EINVAL; return -EINVAL;
/*
* FineIBT relies on being at func-16, so if the preamble is
* actually larger than that, place it the tail end.
*
* NOTE: this is possible with things like DEBUG_CALL_THUNKS
* and DEBUG_FORCE_FUNCTION_ALIGN_64B.
*/
addr += CFI_OFFSET - fineibt_prefix_size;
text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size); text_poke_early(addr, fineibt_preamble_start, fineibt_preamble_size);
WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678); WARN_ON(*(u32 *)(addr + fineibt_preamble_hash) != 0x12345678);
text_poke_early(addr + fineibt_preamble_hash, &hash, 4); text_poke_early(addr + fineibt_preamble_hash, &hash, 4);
@ -1664,10 +1675,10 @@ static void cfi_rewrite_endbr(s32 *start, s32 *end)
for (s = start; s < end; s++) { for (s = start; s < end; s++) {
void *addr = (void *)s + *s; void *addr = (void *)s + *s;
if (!exact_endbr(addr + 16)) if (!exact_endbr(addr + CFI_OFFSET))
continue; continue;
poison_endbr(addr + 16); poison_endbr(addr + CFI_OFFSET);
} }
} }
@ -1772,7 +1783,8 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
if (FINEIBT_WARN(fineibt_preamble_size, 20) || if (FINEIBT_WARN(fineibt_preamble_size, 20) ||
FINEIBT_WARN(fineibt_preamble_bhi + fineibt_bhi1_size, 20) || FINEIBT_WARN(fineibt_preamble_bhi + fineibt_bhi1_size, 20) ||
FINEIBT_WARN(fineibt_caller_size, 14) || FINEIBT_WARN(fineibt_caller_size, 14) ||
FINEIBT_WARN(fineibt_paranoid_size, 20)) FINEIBT_WARN(fineibt_paranoid_size, 20) ||
WARN_ON_ONCE(CFI_OFFSET < fineibt_prefix_size))
return; return;
if (cfi_mode == CFI_AUTO) { if (cfi_mode == CFI_AUTO) {
@ -1885,6 +1897,11 @@ static void poison_cfi(void *addr)
*/ */
switch (cfi_mode) { switch (cfi_mode) {
case CFI_FINEIBT: case CFI_FINEIBT:
/*
* FineIBT preamble is at func-16.
*/
addr += CFI_OFFSET - fineibt_prefix_size;
/* /*
* FineIBT prefix should start with an ENDBR. * FineIBT prefix should start with an ENDBR.
*/ */
@ -1923,8 +1940,6 @@ static void poison_cfi(void *addr)
} }
} }
#define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE)
/* /*
* When regs->ip points to a 0xD6 byte in the FineIBT preamble, * When regs->ip points to a 0xD6 byte in the FineIBT preamble,
* return true and fill out target and type. * return true and fill out target and type.

View file

@ -397,7 +397,7 @@ static inline void handle_invalid_op(struct pt_regs *regs)
ILL_ILLOPN, error_get_trap_addr(regs)); ILL_ILLOPN, error_get_trap_addr(regs));
} }
static noinstr bool handle_bug(struct pt_regs *regs) noinstr bool handle_bug(struct pt_regs *regs)
{ {
unsigned long addr = regs->ip; unsigned long addr = regs->ip;
bool handled = false; bool handled = false;

View file

@ -20,7 +20,6 @@ if VIRTUALIZATION
config KVM_X86 config KVM_X86
def_tristate KVM if (KVM_INTEL != n || KVM_AMD != n) def_tristate KVM if (KVM_INTEL != n || KVM_AMD != n)
select KVM_COMMON select KVM_COMMON
select KVM_GENERIC_MMU_NOTIFIER
select KVM_ELIDE_TLB_FLUSH_IF_YOUNG select KVM_ELIDE_TLB_FLUSH_IF_YOUNG
select KVM_MMU_LOCKLESS_AGING select KVM_MMU_LOCKLESS_AGING
select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQCHIP

View file

@ -4805,7 +4805,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#endif #endif
case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE: case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_USER_NMI: case KVM_CAP_USER_NMI:
case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD:

View file

@ -411,14 +411,11 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
return; return;
if (trapnr == X86_TRAP_UD) { if (trapnr == X86_TRAP_UD) {
if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) { if (handle_bug(regs))
/* Skip the ud2. */
regs->ip += LEN_UD2;
return; return;
}
/* /*
* If this was a BUG and report_bug returns or if this * If this was a BUG and handle_bug returns or if this
* was just a normal #UD, we want to continue onward and * was just a normal #UD, we want to continue onward and
* crash. * crash.
*/ */

View file

@ -438,17 +438,8 @@ static void emit_kcfi(u8 **pprog, u32 hash)
EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ EMIT1_off32(0xb8, hash); /* movl $hash, %eax */
#ifdef CONFIG_CALL_PADDING #ifdef CONFIG_CALL_PADDING
EMIT1(0x90); for (int i = 0; i < CONFIG_FUNCTION_PADDING_CFI; i++)
EMIT1(0x90); EMIT1(0x90);
EMIT1(0x90);
EMIT1(0x90);
EMIT1(0x90);
EMIT1(0x90);
EMIT1(0x90);
EMIT1(0x90);
EMIT1(0x90);
EMIT1(0x90);
EMIT1(0x90);
#endif #endif
EMIT_ENDBR(); EMIT_ENDBR();

View file

@ -23,9 +23,9 @@
#include "amdxdna_pci_drv.h" #include "amdxdna_pci_drv.h"
#include "amdxdna_pm.h" #include "amdxdna_pm.h"
static bool force_cmdlist; static bool force_cmdlist = true;
module_param(force_cmdlist, bool, 0600); module_param(force_cmdlist, bool, 0600);
MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default false)"); MODULE_PARM_DESC(force_cmdlist, "Force use command list (Default true)");
#define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */ #define HWCTX_MAX_TIMEOUT 60000 /* milliseconds */
@ -53,6 +53,7 @@ static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwct
{ {
drm_sched_stop(&hwctx->priv->sched, bad_job); drm_sched_stop(&hwctx->priv->sched, bad_job);
aie2_destroy_context(xdna->dev_handle, hwctx); aie2_destroy_context(xdna->dev_handle, hwctx);
drm_sched_start(&hwctx->priv->sched, 0);
} }
static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx) static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx)
@ -80,7 +81,6 @@ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hw
} }
out: out:
drm_sched_start(&hwctx->priv->sched, 0);
XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret); XDNA_DBG(xdna, "%s restarted, ret %d", hwctx->name, ret);
return ret; return ret;
} }
@ -297,19 +297,23 @@ aie2_sched_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence; struct dma_fence *fence;
int ret; int ret;
if (!hwctx->priv->mbox_chann) ret = amdxdna_pm_resume_get(hwctx->client->xdna);
if (ret)
return NULL; return NULL;
if (!mmget_not_zero(job->mm)) if (!hwctx->priv->mbox_chann) {
amdxdna_pm_suspend_put(hwctx->client->xdna);
return NULL;
}
if (!mmget_not_zero(job->mm)) {
amdxdna_pm_suspend_put(hwctx->client->xdna);
return ERR_PTR(-ESRCH); return ERR_PTR(-ESRCH);
}
kref_get(&job->refcnt); kref_get(&job->refcnt);
fence = dma_fence_get(job->fence); fence = dma_fence_get(job->fence);
ret = amdxdna_pm_resume_get(hwctx->client->xdna);
if (ret)
goto out;
if (job->drv_cmd) { if (job->drv_cmd) {
switch (job->drv_cmd->opcode) { switch (job->drv_cmd->opcode) {
case SYNC_DEBUG_BO: case SYNC_DEBUG_BO:
@ -497,7 +501,7 @@ static void aie2_release_resource(struct amdxdna_hwctx *hwctx)
if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) { if (AIE2_FEATURE_ON(xdna->dev_handle, AIE2_TEMPORAL_ONLY)) {
ret = aie2_destroy_context(xdna->dev_handle, hwctx); ret = aie2_destroy_context(xdna->dev_handle, hwctx);
if (ret) if (ret && ret != -ENODEV)
XDNA_ERR(xdna, "Destroy temporal only context failed, ret %d", ret); XDNA_ERR(xdna, "Destroy temporal only context failed, ret %d", ret);
} else { } else {
ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx); ret = xrs_release_resource(xdna->xrs_hdl, (uintptr_t)hwctx);
@ -629,7 +633,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
goto free_entity; goto free_entity;
} }
ret = amdxdna_pm_resume_get(xdna); ret = amdxdna_pm_resume_get_locked(xdna);
if (ret) if (ret)
goto free_col_list; goto free_col_list;
@ -760,7 +764,7 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
if (!hwctx->cus) if (!hwctx->cus)
return -ENOMEM; return -ENOMEM;
ret = amdxdna_pm_resume_get(xdna); ret = amdxdna_pm_resume_get_locked(xdna);
if (ret) if (ret)
goto free_cus; goto free_cus;
@ -1070,6 +1074,8 @@ void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo,
ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP, ret = dma_resv_wait_timeout(gobj->resv, DMA_RESV_USAGE_BOOKKEEP,
true, MAX_SCHEDULE_TIMEOUT); true, MAX_SCHEDULE_TIMEOUT);
if (!ret || ret == -ERESTARTSYS) if (!ret)
XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret); XDNA_ERR(xdna, "Failed to wait for bo, ret %ld", ret);
else if (ret == -ERESTARTSYS)
XDNA_DBG(xdna, "Wait for bo interrupted by signal");
} }

View file

@ -216,8 +216,10 @@ static int aie2_destroy_context_req(struct amdxdna_dev_hdl *ndev, u32 id)
req.context_id = id; req.context_id = id;
ret = aie2_send_mgmt_msg_wait(ndev, &msg); ret = aie2_send_mgmt_msg_wait(ndev, &msg);
if (ret) if (ret && ret != -ENODEV)
XDNA_WARN(xdna, "Destroy context failed, ret %d", ret); XDNA_WARN(xdna, "Destroy context failed, ret %d", ret);
else if (ret == -ENODEV)
XDNA_DBG(xdna, "Destroy context: device already stopped");
return ret; return ret;
} }
@ -318,6 +320,9 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc
struct amdxdna_dev *xdna = ndev->xdna; struct amdxdna_dev *xdna = ndev->xdna;
int ret; int ret;
if (!hwctx->priv->mbox_chann)
return 0;
xdna_mailbox_stop_channel(hwctx->priv->mbox_chann); xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
ret = aie2_destroy_context_req(ndev, hwctx->fw_ctx_id); ret = aie2_destroy_context_req(ndev, hwctx->fw_ctx_id);
xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann); xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
@ -694,11 +699,11 @@ aie2_cmdlist_fill_npu_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *siz
u32 cmd_len; u32 cmd_len;
void *cmd; void *cmd;
memset(npu_slot, 0, sizeof(*npu_slot));
cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
if (*size < sizeof(*npu_slot) + cmd_len) if (*size < sizeof(*npu_slot) + cmd_len)
return -EINVAL; return -EINVAL;
memset(npu_slot, 0, sizeof(*npu_slot));
npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
if (npu_slot->cu_idx == INVALID_CU_IDX) if (npu_slot->cu_idx == INVALID_CU_IDX)
return -EINVAL; return -EINVAL;
@ -719,7 +724,6 @@ aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
u32 cmd_len; u32 cmd_len;
u32 arg_sz; u32 arg_sz;
memset(npu_slot, 0, sizeof(*npu_slot));
sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
arg_sz = cmd_len - sizeof(*sn); arg_sz = cmd_len - sizeof(*sn);
if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE) if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE)
@ -728,6 +732,7 @@ aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
if (*size < sizeof(*npu_slot) + arg_sz) if (*size < sizeof(*npu_slot) + arg_sz)
return -EINVAL; return -EINVAL;
memset(npu_slot, 0, sizeof(*npu_slot));
npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
if (npu_slot->cu_idx == INVALID_CU_IDX) if (npu_slot->cu_idx == INVALID_CU_IDX)
return -EINVAL; return -EINVAL;
@ -751,7 +756,6 @@ aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t
u32 cmd_len; u32 cmd_len;
u32 arg_sz; u32 arg_sz;
memset(npu_slot, 0, sizeof(*npu_slot));
pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
arg_sz = cmd_len - sizeof(*pd); arg_sz = cmd_len - sizeof(*pd);
if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE) if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
@ -760,6 +764,7 @@ aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t
if (*size < sizeof(*npu_slot) + arg_sz) if (*size < sizeof(*npu_slot) + arg_sz)
return -EINVAL; return -EINVAL;
memset(npu_slot, 0, sizeof(*npu_slot));
npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo); npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
if (npu_slot->cu_idx == INVALID_CU_IDX) if (npu_slot->cu_idx == INVALID_CU_IDX)
return -EINVAL; return -EINVAL;
@ -787,7 +792,6 @@ aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
u32 cmd_len; u32 cmd_len;
u32 arg_sz; u32 arg_sz;
memset(npu_slot, 0, sizeof(*npu_slot));
pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len); pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
arg_sz = cmd_len - sizeof(*pd); arg_sz = cmd_len - sizeof(*pd);
if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE) if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
@ -796,6 +800,7 @@ aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
if (*size < sizeof(*npu_slot) + arg_sz) if (*size < sizeof(*npu_slot) + arg_sz)
return -EINVAL; return -EINVAL;
memset(npu_slot, 0, sizeof(*npu_slot));
npu_slot->type = EXEC_NPU_TYPE_ELF; npu_slot->type = EXEC_NPU_TYPE_ELF;
npu_slot->inst_buf_addr = pd->inst_buf; npu_slot->inst_buf_addr = pd->inst_buf;
npu_slot->save_buf_addr = pd->save_buf; npu_slot->save_buf_addr = pd->save_buf;

View file

@ -32,6 +32,11 @@ static int aie2_max_col = XRS_MAX_COL;
module_param(aie2_max_col, uint, 0600); module_param(aie2_max_col, uint, 0600);
MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used"); MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
static char *npu_fw[] = {
"npu_7.sbin",
"npu.sbin"
};
/* /*
* The management mailbox channel is allocated by firmware. * The management mailbox channel is allocated by firmware.
* The related register and ring buffer information is on SRAM BAR. * The related register and ring buffer information is on SRAM BAR.
@ -323,6 +328,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna)
return; return;
} }
aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, NULL);
aie2_mgmt_fw_fini(ndev); aie2_mgmt_fw_fini(ndev);
xdna_mailbox_stop_channel(ndev->mgmt_chann); xdna_mailbox_stop_channel(ndev->mgmt_chann);
xdna_mailbox_destroy_channel(ndev->mgmt_chann); xdna_mailbox_destroy_channel(ndev->mgmt_chann);
@ -406,18 +412,18 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
goto stop_psp; goto stop_psp;
} }
ret = aie2_pm_init(ndev);
if (ret) {
XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
goto destroy_mgmt_chann;
}
ret = aie2_mgmt_fw_init(ndev); ret = aie2_mgmt_fw_init(ndev);
if (ret) { if (ret) {
XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret); XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
goto destroy_mgmt_chann; goto destroy_mgmt_chann;
} }
ret = aie2_pm_init(ndev);
if (ret) {
XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
goto destroy_mgmt_chann;
}
ret = aie2_mgmt_fw_query(ndev); ret = aie2_mgmt_fw_query(ndev);
if (ret) { if (ret) {
XDNA_ERR(xdna, "failed to query fw, ret %d", ret); XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
@ -451,7 +457,6 @@ static int aie2_hw_suspend(struct amdxdna_dev *xdna)
{ {
struct amdxdna_client *client; struct amdxdna_client *client;
guard(mutex)(&xdna->dev_lock);
list_for_each_entry(client, &xdna->client_list, node) list_for_each_entry(client, &xdna->client_list, node)
aie2_hwctx_suspend(client); aie2_hwctx_suspend(client);
@ -489,6 +494,7 @@ static int aie2_init(struct amdxdna_dev *xdna)
struct psp_config psp_conf; struct psp_config psp_conf;
const struct firmware *fw; const struct firmware *fw;
unsigned long bars = 0; unsigned long bars = 0;
char *fw_full_path;
int i, nvec, ret; int i, nvec, ret;
if (!hypervisor_is_type(X86_HYPER_NATIVE)) { if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
@ -503,7 +509,19 @@ static int aie2_init(struct amdxdna_dev *xdna)
ndev->priv = xdna->dev_info->dev_priv; ndev->priv = xdna->dev_info->dev_priv;
ndev->xdna = xdna; ndev->xdna = xdna;
ret = request_firmware(&fw, ndev->priv->fw_path, &pdev->dev); for (i = 0; i < ARRAY_SIZE(npu_fw); i++) {
fw_full_path = kasprintf(GFP_KERNEL, "%s%s", ndev->priv->fw_path, npu_fw[i]);
if (!fw_full_path)
return -ENOMEM;
ret = firmware_request_nowarn(&fw, fw_full_path, &pdev->dev);
kfree(fw_full_path);
if (!ret) {
XDNA_INFO(xdna, "Load firmware %s%s", ndev->priv->fw_path, npu_fw[i]);
break;
}
}
if (ret) { if (ret) {
XDNA_ERR(xdna, "failed to request_firmware %s, ret %d", XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
ndev->priv->fw_path, ret); ndev->priv->fw_path, ret);
@ -951,7 +969,7 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
if (!drm_dev_enter(&xdna->ddev, &idx)) if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV; return -ENODEV;
ret = amdxdna_pm_resume_get(xdna); ret = amdxdna_pm_resume_get_locked(xdna);
if (ret) if (ret)
goto dev_exit; goto dev_exit;
@ -1044,7 +1062,7 @@ static int aie2_get_array(struct amdxdna_client *client,
if (!drm_dev_enter(&xdna->ddev, &idx)) if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV; return -ENODEV;
ret = amdxdna_pm_resume_get(xdna); ret = amdxdna_pm_resume_get_locked(xdna);
if (ret) if (ret)
goto dev_exit; goto dev_exit;
@ -1134,7 +1152,7 @@ static int aie2_set_state(struct amdxdna_client *client,
if (!drm_dev_enter(&xdna->ddev, &idx)) if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV; return -ENODEV;
ret = amdxdna_pm_resume_get(xdna); ret = amdxdna_pm_resume_get_locked(xdna);
if (ret) if (ret)
goto dev_exit; goto dev_exit;

View file

@ -31,7 +31,7 @@ int aie2_pm_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
{ {
int ret; int ret;
ret = amdxdna_pm_resume_get(ndev->xdna); ret = amdxdna_pm_resume_get_locked(ndev->xdna);
if (ret) if (ret)
return ret; return ret;

View file

@ -104,7 +104,10 @@ void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
if (size) { if (size) {
count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header); count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
if (unlikely(count <= num_masks)) { if (unlikely(count <= num_masks ||
count * sizeof(u32) +
offsetof(struct amdxdna_cmd, data[0]) >
abo->mem.size)) {
*size = 0; *size = 0;
return NULL; return NULL;
} }
@ -266,9 +269,9 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
struct amdxdna_drm_config_hwctx *args = data; struct amdxdna_drm_config_hwctx *args = data;
struct amdxdna_dev *xdna = to_xdna_dev(dev); struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_hwctx *hwctx; struct amdxdna_hwctx *hwctx;
int ret, idx;
u32 buf_size; u32 buf_size;
void *buf; void *buf;
int ret;
u64 val; u64 val;
if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad))) if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
@ -310,20 +313,17 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
return -EINVAL; return -EINVAL;
} }
mutex_lock(&xdna->dev_lock); guard(mutex)(&xdna->dev_lock);
idx = srcu_read_lock(&client->hwctx_srcu);
hwctx = xa_load(&client->hwctx_xa, args->handle); hwctx = xa_load(&client->hwctx_xa, args->handle);
if (!hwctx) { if (!hwctx) {
XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle); XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
ret = -EINVAL; ret = -EINVAL;
goto unlock_srcu; goto free_buf;
} }
ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size); ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
unlock_srcu: free_buf:
srcu_read_unlock(&client->hwctx_srcu, idx);
mutex_unlock(&xdna->dev_lock);
kfree(buf); kfree(buf);
return ret; return ret;
} }
@ -334,7 +334,7 @@ int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
struct amdxdna_hwctx *hwctx; struct amdxdna_hwctx *hwctx;
struct amdxdna_gem_obj *abo; struct amdxdna_gem_obj *abo;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
int ret, idx; int ret;
if (!xdna->dev_info->ops->hwctx_sync_debug_bo) if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -345,17 +345,15 @@ int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
abo = to_xdna_obj(gobj); abo = to_xdna_obj(gobj);
guard(mutex)(&xdna->dev_lock); guard(mutex)(&xdna->dev_lock);
idx = srcu_read_lock(&client->hwctx_srcu);
hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx); hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
if (!hwctx) { if (!hwctx) {
ret = -EINVAL; ret = -EINVAL;
goto unlock_srcu; goto put_obj;
} }
ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl); ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
unlock_srcu: put_obj:
srcu_read_unlock(&client->hwctx_srcu, idx);
drm_gem_object_put(gobj); drm_gem_object_put(gobj);
return ret; return ret;
} }

View file

@ -21,8 +21,6 @@
#include "amdxdna_pci_drv.h" #include "amdxdna_pci_drv.h"
#include "amdxdna_ubuf.h" #include "amdxdna_ubuf.h"
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
MODULE_IMPORT_NS("DMA_BUF"); MODULE_IMPORT_NS("DMA_BUF");
static int static int
@ -745,12 +743,6 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
{ {
struct amdxdna_dev *xdna = to_xdna_dev(dev); struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_gem_obj *abo; struct amdxdna_gem_obj *abo;
int ret;
if (args->size > XDNA_MAX_CMD_BO_SIZE) {
XDNA_ERR(xdna, "Command bo size 0x%llx too large", args->size);
return ERR_PTR(-EINVAL);
}
if (args->size < sizeof(struct amdxdna_cmd)) { if (args->size < sizeof(struct amdxdna_cmd)) {
XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size); XDNA_DBG(xdna, "Command BO size 0x%llx too small", args->size);
@ -764,17 +756,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
abo->type = AMDXDNA_BO_CMD; abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv; abo->client = filp->driver_priv;
ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
}
return abo; return abo;
release_obj:
drm_gem_object_put(to_gobj(abo));
return ERR_PTR(ret);
} }
int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@ -871,6 +853,7 @@ struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
struct amdxdna_dev *xdna = client->xdna; struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_gem_obj *abo; struct amdxdna_gem_obj *abo;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
int ret;
gobj = drm_gem_object_lookup(client->filp, bo_hdl); gobj = drm_gem_object_lookup(client->filp, bo_hdl);
if (!gobj) { if (!gobj) {
@ -879,9 +862,26 @@ struct amdxdna_gem_obj *amdxdna_gem_get_obj(struct amdxdna_client *client,
} }
abo = to_xdna_obj(gobj); abo = to_xdna_obj(gobj);
if (bo_type == AMDXDNA_BO_INVALID || abo->type == bo_type) if (bo_type != AMDXDNA_BO_INVALID && abo->type != bo_type)
goto put_obj;
if (bo_type != AMDXDNA_BO_CMD || abo->mem.kva)
return abo; return abo;
if (abo->mem.size > SZ_32K) {
XDNA_ERR(xdna, "Cmd bo is too big %ld", abo->mem.size);
goto put_obj;
}
ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto put_obj;
}
return abo;
put_obj:
drm_gem_object_put(gobj); drm_gem_object_put(gobj);
return NULL; return NULL;
} }

View file

@ -23,6 +23,9 @@ MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin"); MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin"); MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin"); MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
MODULE_FIRMWARE("amdnpu/1502_00/npu_7.sbin");
MODULE_FIRMWARE("amdnpu/17f0_10/npu_7.sbin");
MODULE_FIRMWARE("amdnpu/17f0_11/npu_7.sbin");
/* /*
* 0.0: Initial version * 0.0: Initial version

View file

@ -16,6 +16,7 @@ int amdxdna_pm_suspend(struct device *dev)
struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
int ret = -EOPNOTSUPP; int ret = -EOPNOTSUPP;
guard(mutex)(&xdna->dev_lock);
if (xdna->dev_info->ops->suspend) if (xdna->dev_info->ops->suspend)
ret = xdna->dev_info->ops->suspend(xdna); ret = xdna->dev_info->ops->suspend(xdna);
@ -28,6 +29,7 @@ int amdxdna_pm_resume(struct device *dev)
struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev)); struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
int ret = -EOPNOTSUPP; int ret = -EOPNOTSUPP;
guard(mutex)(&xdna->dev_lock);
if (xdna->dev_info->ops->resume) if (xdna->dev_info->ops->resume)
ret = xdna->dev_info->ops->resume(xdna); ret = xdna->dev_info->ops->resume(xdna);

View file

@ -15,4 +15,15 @@ void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
void amdxdna_pm_init(struct amdxdna_dev *xdna); void amdxdna_pm_init(struct amdxdna_dev *xdna);
void amdxdna_pm_fini(struct amdxdna_dev *xdna); void amdxdna_pm_fini(struct amdxdna_dev *xdna);
static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev *xdna)
{
int ret;
mutex_unlock(&xdna->dev_lock);
ret = amdxdna_pm_resume_get(xdna);
mutex_lock(&xdna->dev_lock);
return ret;
}
#endif /* _AMDXDNA_PM_H_ */ #endif /* _AMDXDNA_PM_H_ */

View file

@ -7,6 +7,7 @@
#include <drm/drm_device.h> #include <drm/drm_device.h>
#include <drm/drm_print.h> #include <drm/drm_print.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/overflow.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
@ -176,7 +177,10 @@ struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
goto free_ent; goto free_ent;
} }
exp_info.size += va_ent[i].len; if (check_add_overflow(exp_info.size, va_ent[i].len, &exp_info.size)) {
ret = -EINVAL;
goto free_ent;
}
} }
ubuf->nr_pages = exp_info.size >> PAGE_SHIFT; ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;

View file

@ -72,7 +72,7 @@ static const struct aie2_fw_feature_tbl npu1_fw_feature_table[] = {
}; };
static const struct amdxdna_dev_priv npu1_dev_priv = { static const struct amdxdna_dev_priv npu1_dev_priv = {
.fw_path = "amdnpu/1502_00/npu.sbin", .fw_path = "amdnpu/1502_00/",
.rt_config = npu1_default_rt_cfg, .rt_config = npu1_default_rt_cfg,
.dpm_clk_tbl = npu1_dpm_clk_table, .dpm_clk_tbl = npu1_dpm_clk_table,
.fw_feature_tbl = npu1_fw_feature_table, .fw_feature_tbl = npu1_fw_feature_table,

View file

@ -98,7 +98,7 @@ const struct aie2_fw_feature_tbl npu4_fw_feature_table[] = {
}; };
static const struct amdxdna_dev_priv npu4_dev_priv = { static const struct amdxdna_dev_priv npu4_dev_priv = {
.fw_path = "amdnpu/17f0_10/npu.sbin", .fw_path = "amdnpu/17f0_10/",
.rt_config = npu4_default_rt_cfg, .rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table, .dpm_clk_tbl = npu4_dpm_clk_table,
.fw_feature_tbl = npu4_fw_feature_table, .fw_feature_tbl = npu4_fw_feature_table,

View file

@ -63,7 +63,7 @@
#define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE #define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
static const struct amdxdna_dev_priv npu5_dev_priv = { static const struct amdxdna_dev_priv npu5_dev_priv = {
.fw_path = "amdnpu/17f0_11/npu.sbin", .fw_path = "amdnpu/17f0_11/",
.rt_config = npu4_default_rt_cfg, .rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table, .dpm_clk_tbl = npu4_dpm_clk_table,
.fw_feature_tbl = npu4_fw_feature_table, .fw_feature_tbl = npu4_fw_feature_table,

View file

@ -63,7 +63,7 @@
#define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE #define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
static const struct amdxdna_dev_priv npu6_dev_priv = { static const struct amdxdna_dev_priv npu6_dev_priv = {
.fw_path = "amdnpu/17f0_10/npu.sbin", .fw_path = "amdnpu/17f0_10/",
.rt_config = npu4_default_rt_cfg, .rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table, .dpm_clk_tbl = npu4_dpm_clk_table,
.fw_feature_tbl = npu4_fw_feature_table, .fw_feature_tbl = npu4_fw_feature_table,

View file

@ -154,7 +154,7 @@ static void cmd_state_init(struct cmd_state *st)
static u64 cmd_to_addr(u32 *cmd) static u64 cmd_to_addr(u32 *cmd)
{ {
return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1]; return (((u64)cmd[0] & 0xff0000) << 16) | cmd[1];
} }
static u64 dma_length(struct ethosu_validated_cmdstream_info *info, static u64 dma_length(struct ethosu_validated_cmdstream_info *info,

View file

@ -389,6 +389,19 @@ static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
}, },
}, },
/*
* The screen backlight turns off during udev device creation
* when returning true for _OSI("Windows 2009")
*/
{
.callback = dmi_disable_osi_win7,
.ident = "Acer Aspire One D255",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "AOD255"),
},
},
/* /*
* The wireless hotkey does not work on those machines when * The wireless hotkey does not work on those machines when
* returning true for _OSI("Windows 2012") * returning true for _OSI("Windows 2012")

View file

@ -386,6 +386,14 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E1"), DMI_MATCH(DMI_PRODUCT_NAME, "80E1"),
}, },
}, },
{
.callback = init_nvs_save_s3,
.ident = "Lenovo G70-35",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "80Q5"),
},
},
/* /*
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
* the Low Power S0 Idle firmware interface (see * the Low Power S0 Idle firmware interface (see

View file

@ -6269,10 +6269,6 @@ static void ata_port_detach(struct ata_port *ap)
} }
} }
/* Make sure the deferred qc work finished. */
cancel_work_sync(&ap->deferred_qc_work);
WARN_ON(ap->deferred_qc);
/* Tell EH to disable all devices */ /* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING; ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap); ata_port_schedule_eh(ap);
@ -6283,9 +6279,11 @@ static void ata_port_detach(struct ata_port *ap)
/* wait till EH commits suicide */ /* wait till EH commits suicide */
ata_port_wait_eh(ap); ata_port_wait_eh(ap);
/* it better be dead now */ /* It better be dead now and not have any remaining deferred qc. */
WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
WARN_ON(ap->deferred_qc);
cancel_work_sync(&ap->deferred_qc_work);
cancel_delayed_work_sync(&ap->hotplug_task); cancel_delayed_work_sync(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->scsi_rescan_task); cancel_delayed_work_sync(&ap->scsi_rescan_task);

View file

@ -640,12 +640,28 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
set_host_byte(scmd, DID_OK); set_host_byte(scmd, DID_OK);
ata_qc_for_each_raw(ap, qc, i) { ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE && if (qc->scsicmd != scmd)
qc->scsicmd == scmd) continue;
if ((qc->flags & ATA_QCFLAG_ACTIVE) ||
qc == ap->deferred_qc)
break; break;
} }
if (i < ATA_MAX_QUEUE) { if (qc == ap->deferred_qc) {
/*
* This is a deferred command that timed out while
* waiting for the command queue to drain. Since the qc
* is not active yet (deferred_qc is still set, so the
* deferred qc work has not issued the command yet),
* simply signal the timeout by finishing the SCSI
* command and clear the deferred qc to prevent the
* deferred qc work from issuing this qc.
*/
WARN_ON_ONCE(qc->flags & ATA_QCFLAG_ACTIVE);
ap->deferred_qc = NULL;
set_host_byte(scmd, DID_TIME_OUT);
scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
} else if (i < ATA_MAX_QUEUE) {
/* the scmd has an associated qc */ /* the scmd has an associated qc */
if (!(qc->flags & ATA_QCFLAG_EH)) { if (!(qc->flags & ATA_QCFLAG_EH)) {
/* which hasn't failed yet, timeout */ /* which hasn't failed yet, timeout */

View file

@ -797,7 +797,18 @@ struct fwnode_handle *
fwnode_get_next_child_node(const struct fwnode_handle *fwnode, fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
struct fwnode_handle *child) struct fwnode_handle *child)
{ {
return fwnode_call_ptr_op(fwnode, get_next_child_node, child); struct fwnode_handle *next;
if (IS_ERR_OR_NULL(fwnode))
return NULL;
/* Try to find a child in primary fwnode */
next = fwnode_call_ptr_op(fwnode, get_next_child_node, child);
if (next)
return next;
/* When no more children in primary, continue with secondary */
return fwnode_call_ptr_op(fwnode->secondary, get_next_child_node, child);
} }
EXPORT_SYMBOL_GPL(fwnode_get_next_child_node); EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
@ -841,19 +852,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
struct fwnode_handle *device_get_next_child_node(const struct device *dev, struct fwnode_handle *device_get_next_child_node(const struct device *dev,
struct fwnode_handle *child) struct fwnode_handle *child)
{ {
const struct fwnode_handle *fwnode = dev_fwnode(dev); return fwnode_get_next_child_node(dev_fwnode(dev), child);
struct fwnode_handle *next;
if (IS_ERR_OR_NULL(fwnode))
return NULL;
/* Try to find a child in primary fwnode */
next = fwnode_get_next_child_node(fwnode, child);
if (next)
return next;
/* When no more children in primary, continue with secondary */
return fwnode_get_next_child_node(fwnode->secondary, child);
} }
EXPORT_SYMBOL_GPL(device_get_next_child_node); EXPORT_SYMBOL_GPL(device_get_next_child_node);

View file

@ -483,38 +483,20 @@ void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i)
int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i) int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i)
{ {
struct lru_cache *al = device->act_log;
/* for bios crossing activity log extent boundaries, /* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */ * we may need to activate two extents in one go */
unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); unsigned first = i->sector >> (AL_EXTENT_SHIFT-9);
unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9);
unsigned nr_al_extents;
unsigned available_update_slots;
unsigned enr; unsigned enr;
D_ASSERT(device, first <= last); if (i->partially_in_al_next_enr) {
D_ASSERT(device, first < i->partially_in_al_next_enr);
nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */ D_ASSERT(device, last >= i->partially_in_al_next_enr);
available_update_slots = min(al->nr_elements - al->used, first = i->partially_in_al_next_enr;
al->max_pending_changes - al->pending_changes);
/* We want all necessary updates for a given request within the same transaction
* We could first check how many updates are *actually* needed,
* and use that instead of the worst-case nr_al_extents */
if (available_update_slots < nr_al_extents) {
/* Too many activity log extents are currently "hot".
*
* If we have accumulated pending changes already,
* we made progress.
*
* If we cannot get even a single pending change through,
* stop the fast path until we made some progress,
* or requests to "cold" extents could be starved. */
if (!al->pending_changes)
__set_bit(__LC_STARVING, &device->act_log->flags);
return -ENOBUFS;
} }
D_ASSERT(device, first <= last);
/* Is resync active in this area? */ /* Is resync active in this area? */
for (enr = first; enr <= last; enr++) { for (enr = first; enr <= last; enr++) {
struct lc_element *tmp; struct lc_element *tmp;
@ -529,14 +511,21 @@ int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *
} }
} }
/* Checkout the refcounts. /* Try to checkout the refcounts. */
* Given that we checked for available elements and update slots above,
* this has to be successful. */
for (enr = first; enr <= last; enr++) { for (enr = first; enr <= last; enr++) {
struct lc_element *al_ext; struct lc_element *al_ext;
al_ext = lc_get_cumulative(device->act_log, enr); al_ext = lc_get_cumulative(device->act_log, enr);
if (!al_ext)
drbd_info(device, "LOGIC BUG for enr=%u\n", enr); if (!al_ext) {
/* Did not work. We may have exhausted the possible
* changes per transaction. Or raced with someone
* "locking" it against changes.
* Remember where to continue from.
*/
if (enr > first)
i->partially_in_al_next_enr = enr;
return -ENOBUFS;
}
} }
return 0; return 0;
} }
@ -556,7 +545,11 @@ void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i)
for (enr = first; enr <= last; enr++) { for (enr = first; enr <= last; enr++) {
extent = lc_find(device->act_log, enr); extent = lc_find(device->act_log, enr);
if (!extent) { /* Yes, this masks a bug elsewhere. However, during normal
* operation this is harmless, so no need to crash the kernel
* by the BUG_ON(refcount == 0) in lc_put().
*/
if (!extent || extent->refcnt == 0) {
drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr); drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr);
continue; continue;
} }

View file

@ -8,12 +8,15 @@
struct drbd_interval { struct drbd_interval {
struct rb_node rb; struct rb_node rb;
sector_t sector; /* start sector of the interval */ sector_t sector; /* start sector of the interval */
unsigned int size; /* size in bytes */
sector_t end; /* highest interval end in subtree */ sector_t end; /* highest interval end in subtree */
unsigned int size; /* size in bytes */
unsigned int local:1 /* local or remote request? */; unsigned int local:1 /* local or remote request? */;
unsigned int waiting:1; /* someone is waiting for completion */ unsigned int waiting:1; /* someone is waiting for completion */
unsigned int completed:1; /* this has been completed already; unsigned int completed:1; /* this has been completed already;
* ignore for conflict detection */ * ignore for conflict detection */
/* to resume a partially successful drbd_al_begin_io_nonblock(); */
unsigned int partially_in_al_next_enr;
}; };
static inline void drbd_clear_interval(struct drbd_interval *i) static inline void drbd_clear_interval(struct drbd_interval *i)

View file

@ -32,6 +32,7 @@
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/mm_inline.h> #include <linux/mm_inline.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/notifier.h> #include <linux/notifier.h>
@ -732,9 +733,9 @@ int drbd_send_sync_param(struct drbd_peer_device *peer_device)
} }
if (apv >= 88) if (apv >= 88)
strcpy(p->verify_alg, nc->verify_alg); strscpy(p->verify_alg, nc->verify_alg);
if (apv >= 89) if (apv >= 89)
strcpy(p->csums_alg, nc->csums_alg); strscpy(p->csums_alg, nc->csums_alg);
rcu_read_unlock(); rcu_read_unlock();
return drbd_send_command(peer_device, sock, cmd, size, NULL, 0); return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
@ -745,6 +746,7 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_protocol *p; struct p_protocol *p;
struct net_conf *nc; struct net_conf *nc;
size_t integrity_alg_len;
int size, cf; int size, cf;
sock = &connection->data; sock = &connection->data;
@ -762,8 +764,10 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
} }
size = sizeof(*p); size = sizeof(*p);
if (connection->agreed_pro_version >= 87) if (connection->agreed_pro_version >= 87) {
size += strlen(nc->integrity_alg) + 1; integrity_alg_len = strlen(nc->integrity_alg) + 1;
size += integrity_alg_len;
}
p->protocol = cpu_to_be32(nc->wire_protocol); p->protocol = cpu_to_be32(nc->wire_protocol);
p->after_sb_0p = cpu_to_be32(nc->after_sb_0p); p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
@ -778,7 +782,7 @@ int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cm
p->conn_flags = cpu_to_be32(cf); p->conn_flags = cpu_to_be32(cf);
if (connection->agreed_pro_version >= 87) if (connection->agreed_pro_version >= 87)
strcpy(p->integrity_alg, nc->integrity_alg); strscpy(p->integrity_alg, nc->integrity_alg, integrity_alg_len);
rcu_read_unlock(); rcu_read_unlock();
return __conn_send_command(connection, sock, cmd, size, NULL, 0); return __conn_send_command(connection, sock, cmd, size, NULL, 0);

View file

@ -3801,14 +3801,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
*new_net_conf = *old_net_conf; *new_net_conf = *old_net_conf;
if (verify_tfm) { if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg); strscpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
crypto_free_shash(peer_device->connection->verify_tfm); crypto_free_shash(peer_device->connection->verify_tfm);
peer_device->connection->verify_tfm = verify_tfm; peer_device->connection->verify_tfm = verify_tfm;
drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg); drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
} }
if (csums_tfm) { if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg); strscpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
crypto_free_shash(peer_device->connection->csums_tfm); crypto_free_shash(peer_device->connection->csums_tfm);
peer_device->connection->csums_tfm = csums_tfm; peer_device->connection->csums_tfm = csums_tfm;

View file

@ -621,7 +621,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break; break;
case READ_COMPLETED_WITH_ERROR: case READ_COMPLETED_WITH_ERROR:
drbd_set_out_of_sync(peer_device, req->i.sector, req->i.size); drbd_set_out_of_sync(first_peer_device(device),
req->i.sector, req->i.size);
drbd_report_io_error(device, req); drbd_report_io_error(device, req);
__drbd_chk_io_error(device, DRBD_READ_ERROR); __drbd_chk_io_error(device, DRBD_READ_ERROR);
fallthrough; fallthrough;

View file

@ -542,6 +542,21 @@ out:
zloop_put_cmd(cmd); zloop_put_cmd(cmd);
} }
/*
* Sync the entire FS containing the zone files instead of walking all files.
*/
static int zloop_flush(struct zloop_device *zlo)
{
struct super_block *sb = file_inode(zlo->data_dir)->i_sb;
int ret;
down_read(&sb->s_umount);
ret = sync_filesystem(sb);
up_read(&sb->s_umount);
return ret;
}
static void zloop_handle_cmd(struct zloop_cmd *cmd) static void zloop_handle_cmd(struct zloop_cmd *cmd)
{ {
struct request *rq = blk_mq_rq_from_pdu(cmd); struct request *rq = blk_mq_rq_from_pdu(cmd);
@ -562,11 +577,7 @@ static void zloop_handle_cmd(struct zloop_cmd *cmd)
zloop_rw(cmd); zloop_rw(cmd);
return; return;
case REQ_OP_FLUSH: case REQ_OP_FLUSH:
/* cmd->ret = zloop_flush(zlo);
* Sync the entire FS containing the zone files instead of
* walking all files
*/
cmd->ret = sync_filesystem(file_inode(zlo->data_dir)->i_sb);
break; break;
case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET:
cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq)); cmd->ret = zloop_reset_zone(zlo, rq_zone_no(rq));
@ -981,7 +992,8 @@ static int zloop_ctl_add(struct zloop_options *opts)
struct queue_limits lim = { struct queue_limits lim = {
.max_hw_sectors = SZ_1M >> SECTOR_SHIFT, .max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
.chunk_sectors = opts->zone_size, .chunk_sectors = opts->zone_size,
.features = BLK_FEAT_ZONED, .features = BLK_FEAT_ZONED | BLK_FEAT_WRITE_CACHE,
}; };
unsigned int nr_zones, i, j; unsigned int nr_zones, i, j;
struct zloop_device *zlo; struct zloop_device *zlo;
@ -1162,7 +1174,12 @@ static int zloop_ctl_remove(struct zloop_options *opts)
int ret; int ret;
if (!(opts->mask & ZLOOP_OPT_ID)) { if (!(opts->mask & ZLOOP_OPT_ID)) {
pr_err("No ID specified\n"); pr_err("No ID specified for remove\n");
return -EINVAL;
}
if (opts->mask & ~ZLOOP_OPT_ID) {
pr_err("Invalid option specified for remove\n");
return -EINVAL; return -EINVAL;
} }

View file

@ -2046,19 +2046,23 @@ retry:
} }
out: out:
if (ret && retries < MAX_INIT_RETRIES) { if (ret) {
bt_dev_warn(hdev, "Retry BT power ON:%d", retries);
qca_power_shutdown(hu); qca_power_shutdown(hu);
if (hu->serdev) {
serdev_device_close(hu->serdev); if (retries < MAX_INIT_RETRIES) {
ret = serdev_device_open(hu->serdev); bt_dev_warn(hdev, "Retry BT power ON:%d", retries);
if (ret) { if (hu->serdev) {
bt_dev_err(hdev, "failed to open port"); serdev_device_close(hu->serdev);
return ret; ret = serdev_device_open(hu->serdev);
if (ret) {
bt_dev_err(hdev, "failed to open port");
return ret;
}
} }
retries++;
goto retry;
} }
retries++; return ret;
goto retry;
} }
/* Setup bdaddr */ /* Setup bdaddr */

View file

@ -202,11 +202,16 @@ static int ipmi_ipmb_slave_cb(struct i2c_client *client,
break; break;
case I2C_SLAVE_READ_REQUESTED: case I2C_SLAVE_READ_REQUESTED:
*val = 0xff;
ipmi_ipmb_check_msg_done(iidev);
break;
case I2C_SLAVE_STOP: case I2C_SLAVE_STOP:
ipmi_ipmb_check_msg_done(iidev); ipmi_ipmb_check_msg_done(iidev);
break; break;
case I2C_SLAVE_READ_PROCESSED: case I2C_SLAVE_READ_PROCESSED:
*val = 0xff;
break; break;
} }

View file

@ -602,6 +602,22 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
static int __scan_channels(struct ipmi_smi *intf, static int __scan_channels(struct ipmi_smi *intf,
struct ipmi_device_id *id, bool rescan); struct ipmi_device_id *id, bool rescan);
static void ipmi_lock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion,
unsigned long *flags)
{
if (run_to_completion)
return;
spin_lock_irqsave(&intf->xmit_msgs_lock, *flags);
}
static void ipmi_unlock_xmit_msgs(struct ipmi_smi *intf, int run_to_completion,
unsigned long *flags)
{
if (run_to_completion)
return;
spin_unlock_irqrestore(&intf->xmit_msgs_lock, *flags);
}
static void free_ipmi_user(struct kref *ref) static void free_ipmi_user(struct kref *ref)
{ {
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
@ -1869,21 +1885,32 @@ static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
return smi_msg; return smi_msg;
} }
static void smi_send(struct ipmi_smi *intf, static int smi_send(struct ipmi_smi *intf,
const struct ipmi_smi_handlers *handlers, const struct ipmi_smi_handlers *handlers,
struct ipmi_smi_msg *smi_msg, int priority) struct ipmi_smi_msg *smi_msg, int priority)
{ {
int run_to_completion = READ_ONCE(intf->run_to_completion); int run_to_completion = READ_ONCE(intf->run_to_completion);
unsigned long flags = 0; unsigned long flags = 0;
int rv = 0;
if (!run_to_completion) ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
smi_msg = smi_add_send_msg(intf, smi_msg, priority); smi_msg = smi_add_send_msg(intf, smi_msg, priority);
if (!run_to_completion) ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (smi_msg) if (smi_msg) {
handlers->sender(intf->send_info, smi_msg); rv = handlers->sender(intf->send_info, smi_msg);
if (rv) {
ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
intf->curr_msg = NULL;
ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
/*
* Something may have been added to the transmit
* queue, so schedule a check for that.
*/
queue_work(system_wq, &intf->smi_work);
}
}
return rv;
} }
static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg) static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
@ -2296,6 +2323,7 @@ static int i_ipmi_request(struct ipmi_user *user,
struct ipmi_recv_msg *recv_msg; struct ipmi_recv_msg *recv_msg;
int run_to_completion = READ_ONCE(intf->run_to_completion); int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0; int rv = 0;
bool in_seq_table = false;
if (supplied_recv) { if (supplied_recv) {
recv_msg = supplied_recv; recv_msg = supplied_recv;
@ -2349,33 +2377,50 @@ static int i_ipmi_request(struct ipmi_user *user,
rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg, rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
source_address, source_lun, source_address, source_lun,
retries, retry_time_ms); retries, retry_time_ms);
in_seq_table = true;
} else if (is_ipmb_direct_addr(addr)) { } else if (is_ipmb_direct_addr(addr)) {
rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg, rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
recv_msg, source_lun); recv_msg, source_lun);
} else if (is_lan_addr(addr)) { } else if (is_lan_addr(addr)) {
rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg, rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
source_lun, retries, retry_time_ms); source_lun, retries, retry_time_ms);
in_seq_table = true;
} else { } else {
/* Unknown address type. */ /* Unknown address type. */
ipmi_inc_stat(intf, sent_invalid_commands); ipmi_inc_stat(intf, sent_invalid_commands);
rv = -EINVAL; rv = -EINVAL;
} }
if (rv) { if (!rv) {
dev_dbg(intf->si_dev, "Send: %*ph\n",
smi_msg->data_size, smi_msg->data);
rv = smi_send(intf, intf->handlers, smi_msg, priority);
if (rv != IPMI_CC_NO_ERROR)
/* smi_send() returns an IPMI err, return a Linux one. */
rv = -EIO;
if (rv && in_seq_table) {
/*
* If it's in the sequence table, it will be
* retried later, so ignore errors.
*/
rv = 0;
/* But we need to fix the timeout. */
intf_start_seq_timer(intf, smi_msg->msgid);
ipmi_free_smi_msg(smi_msg);
smi_msg = NULL;
}
}
out_err: out_err:
if (!run_to_completion)
mutex_unlock(&intf->users_mutex);
if (rv) {
if (!supplied_smi) if (!supplied_smi)
ipmi_free_smi_msg(smi_msg); ipmi_free_smi_msg(smi_msg);
if (!supplied_recv) if (!supplied_recv)
ipmi_free_recv_msg(recv_msg); ipmi_free_recv_msg(recv_msg);
} else {
dev_dbg(intf->si_dev, "Send: %*ph\n",
smi_msg->data_size, smi_msg->data);
smi_send(intf, intf->handlers, smi_msg, priority);
} }
if (!run_to_completion)
mutex_unlock(&intf->users_mutex);
return rv; return rv;
} }
@ -3949,12 +3994,12 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
dev_dbg(intf->si_dev, "Invalid command: %*ph\n", dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
msg->data_size, msg->data); msg->data_size, msg->data);
smi_send(intf, intf->handlers, msg, 0); if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR)
/* /*
* We used the message, so return the value that * We used the message, so return the value that
* causes it to not be freed or queued. * causes it to not be freed or queued.
*/ */
rv = -1; rv = -1;
} else if (!IS_ERR(recv_msg)) { } else if (!IS_ERR(recv_msg)) {
/* Extract the source address from the data. */ /* Extract the source address from the data. */
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
@ -4028,12 +4073,12 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE; msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5; msg->data_size = 5;
smi_send(intf, intf->handlers, msg, 0); if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR)
/* /*
* We used the message, so return the value that * We used the message, so return the value that
* causes it to not be freed or queued. * causes it to not be freed or queued.
*/ */
rv = -1; rv = -1;
} else if (!IS_ERR(recv_msg)) { } else if (!IS_ERR(recv_msg)) {
/* Extract the source address from the data. */ /* Extract the source address from the data. */
daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
@ -4173,7 +4218,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg) struct ipmi_smi_msg *msg)
{ {
struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvr;
int rv = 0; int rv = 0; /* Free by default */
unsigned char netfn; unsigned char netfn;
unsigned char cmd; unsigned char cmd;
unsigned char chan; unsigned char chan;
@ -4226,12 +4271,12 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
dev_dbg(intf->si_dev, "Invalid command: %*ph\n", dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
msg->data_size, msg->data); msg->data_size, msg->data);
smi_send(intf, intf->handlers, msg, 0); if (smi_send(intf, intf->handlers, msg, 0) == IPMI_CC_NO_ERROR)
/* /*
* We used the message, so return the value that * We used the message, so return the value that
* causes it to not be freed or queued. * causes it to not be freed or queued.
*/ */
rv = -1; rv = -1;
} else if (!IS_ERR(recv_msg)) { } else if (!IS_ERR(recv_msg)) {
/* Extract the source address from the data. */ /* Extract the source address from the data. */
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
@ -4824,8 +4869,7 @@ static void smi_work(struct work_struct *t)
* message delivery. * message delivery.
*/ */
restart: restart:
if (!run_to_completion) ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) { if (intf->curr_msg == NULL && !intf->in_shutdown) {
struct list_head *entry = NULL; struct list_head *entry = NULL;
@ -4841,8 +4885,7 @@ restart:
intf->curr_msg = newmsg; intf->curr_msg = newmsg;
} }
} }
if (!run_to_completion) ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (newmsg) { if (newmsg) {
cc = intf->handlers->sender(intf->send_info, newmsg); cc = intf->handlers->sender(intf->send_info, newmsg);
@ -4850,8 +4893,11 @@ restart:
if (newmsg->recv_msg) if (newmsg->recv_msg)
deliver_err_response(intf, deliver_err_response(intf,
newmsg->recv_msg, cc); newmsg->recv_msg, cc);
else ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
ipmi_free_smi_msg(newmsg); intf->curr_msg = NULL;
ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
ipmi_free_smi_msg(newmsg);
newmsg = NULL;
goto restart; goto restart;
} }
} }
@ -4919,16 +4965,14 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
flags); flags);
if (!run_to_completion) ipmi_lock_xmit_msgs(intf, run_to_completion, &flags);
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
/* /*
* We can get an asynchronous event or receive message in addition * We can get an asynchronous event or receive message in addition
* to commands we send. * to commands we send.
*/ */
if (msg == intf->curr_msg) if (msg == intf->curr_msg)
intf->curr_msg = NULL; intf->curr_msg = NULL;
if (!run_to_completion) ipmi_unlock_xmit_msgs(intf, run_to_completion, &flags);
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion) if (run_to_completion)
smi_work(&intf->smi_work); smi_work(&intf->smi_work);
@ -5041,7 +5085,12 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
ipmi_inc_stat(intf, ipmi_inc_stat(intf,
retransmitted_ipmb_commands); retransmitted_ipmb_commands);
smi_send(intf, intf->handlers, smi_msg, 0); /* If this fails we'll retry later or timeout. */
if (smi_send(intf, intf->handlers, smi_msg, 0) != IPMI_CC_NO_ERROR) {
/* But fix the timeout. */
intf_start_seq_timer(intf, smi_msg->msgid);
ipmi_free_smi_msg(smi_msg);
}
} else } else
ipmi_free_smi_msg(smi_msg); ipmi_free_smi_msg(smi_msg);

View file

@ -809,6 +809,12 @@ restart:
*/ */
return_hosed_msg(smi_info, IPMI_BUS_ERR); return_hosed_msg(smi_info, IPMI_BUS_ERR);
} }
if (smi_info->waiting_msg != NULL) {
/* Also handle if there was a message waiting. */
smi_info->curr_msg = smi_info->waiting_msg;
smi_info->waiting_msg = NULL;
return_hosed_msg(smi_info, IPMI_BUS_ERR);
}
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED); smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
goto out; goto out;
} }
@ -918,9 +924,14 @@ static int sender(void *send_info, struct ipmi_smi_msg *msg)
{ {
struct smi_info *smi_info = send_info; struct smi_info *smi_info = send_info;
unsigned long flags; unsigned long flags;
int rv = IPMI_CC_NO_ERROR;
debug_timestamp(smi_info, "Enqueue"); debug_timestamp(smi_info, "Enqueue");
/*
* Check here for run to completion mode. A check under lock is
* later.
*/
if (smi_info->si_state == SI_HOSED) if (smi_info->si_state == SI_HOSED)
return IPMI_BUS_ERR; return IPMI_BUS_ERR;
@ -934,18 +945,15 @@ static int sender(void *send_info, struct ipmi_smi_msg *msg)
} }
spin_lock_irqsave(&smi_info->si_lock, flags); spin_lock_irqsave(&smi_info->si_lock, flags);
/* if (smi_info->si_state == SI_HOSED) {
* The following two lines don't need to be under the lock for rv = IPMI_BUS_ERR;
* the lock's sake, but they do need SMP memory barriers to } else {
* avoid getting things out of order. We are already claiming BUG_ON(smi_info->waiting_msg);
* the lock, anyway, so just do it under the lock to avoid the smi_info->waiting_msg = msg;
* ordering problem. check_start_timer_thread(smi_info);
*/ }
BUG_ON(smi_info->waiting_msg);
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags); spin_unlock_irqrestore(&smi_info->si_lock, flags);
return IPMI_CC_NO_ERROR; return rv;
} }
static void set_run_to_completion(void *send_info, bool i_run_to_completion) static void set_run_to_completion(void *send_info, bool i_run_to_completion)
@ -1113,7 +1121,9 @@ static void smi_timeout(struct timer_list *t)
* SI_USEC_PER_JIFFY); * SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff); smi_result = smi_event_handler(smi_info, time_diff);
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { if (smi_info->si_state == SI_HOSED) {
timeout = jiffies + SI_TIMEOUT_HOSED;
} else if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */ /* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES; timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts); smi_inc_stat(smi_info, long_timeouts);
@ -2226,7 +2236,8 @@ static void wait_msg_processed(struct smi_info *smi_info)
unsigned long jiffies_now; unsigned long jiffies_now;
long time_diff; long time_diff;
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { while (smi_info->si_state != SI_HOSED &&
(smi_info->curr_msg || (smi_info->si_state != SI_NORMAL))) {
jiffies_now = jiffies; jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY); * SI_USEC_PER_JIFFY);

View file

@ -168,7 +168,7 @@ static void ipmi_ls2k_remove(struct platform_device *pdev)
ipmi_si_remove_by_dev(&pdev->dev); ipmi_si_remove_by_dev(&pdev->dev);
} }
struct platform_driver ipmi_ls2k_platform_driver = { static struct platform_driver ipmi_ls2k_platform_driver = {
.driver = { .driver = {
.name = "ls2k-ipmi-si", .name = "ls2k-ipmi-si",
}, },

View file

@ -96,8 +96,7 @@ static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
/* Control how we warn userspace. */ /* Control how we warn userspace. */
static struct ratelimit_state urandom_warning = static struct ratelimit_state urandom_warning =
RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
static int ratelimit_disable __read_mostly = static int ratelimit_disable __read_mostly = 0;
IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
@ -168,12 +167,6 @@ int __cold execute_with_initialized_rng(struct notifier_block *nb)
return ret; return ret;
} }
#define warn_unseeded_randomness() \
if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
__func__, (void *)_RET_IP_, crng_init)
/********************************************************************* /*********************************************************************
* *
* Fast key erasure RNG, the "crng". * Fast key erasure RNG, the "crng".
@ -434,7 +427,6 @@ static void _get_random_bytes(void *buf, size_t len)
*/ */
void get_random_bytes(void *buf, size_t len) void get_random_bytes(void *buf, size_t len)
{ {
warn_unseeded_randomness();
_get_random_bytes(buf, len); _get_random_bytes(buf, len);
} }
EXPORT_SYMBOL(get_random_bytes); EXPORT_SYMBOL(get_random_bytes);
@ -523,8 +515,6 @@ type get_random_ ##type(void) \
struct batch_ ##type *batch; \ struct batch_ ##type *batch; \
unsigned long next_gen; \ unsigned long next_gen; \
\ \
warn_unseeded_randomness(); \
\
if (!crng_ready()) { \ if (!crng_ready()) { \
_get_random_bytes(&ret, sizeof(ret)); \ _get_random_bytes(&ret, sizeof(ret)); \
return ret; \ return ret; \

Some files were not shown because too many files have changed in this diff Show more