spi: Updates for v7.0

The highlight here is that David Lechner has added support for multi-lane
 SPI devices.  Unlike the existing dual/quad SPI support this is for
 devices (typically ADCs/DACs) which support multiple independent data
 streams over multiple data lanes, instead of sending one data stream N
 times as fast they simultaneously transfer N different data streams.
 This is very similar to the case where multiple devices are grouped
 together but in this case it's a single device in a way that's visible
 to software.
 
 Otherwise there's been quite a bit of work on existing drivers, both
 cleanup and feature improvement, and a reasonable collection of new
 drivers.
 
  - Support for multi-lane SPI devices.
  - Preparatory work for some memory mapped flash improvements that will
    happen in the MTD subsystem.
  - Several conversions to fwnode APIs.
  - A bunch of cleanup and hardening work on the ST drivers.
  - Support for DMA mode on Renesas RZV2H and i.MX target mode.
  - Support for ATCSPI200, AXIADO AX300, NXP XPI and Renesas RZ/N1.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEreZoqmdXGLWf4p/qJNaLcl1Uh9AFAmmJ9PIACgkQJNaLcl1U
 h9AdQgf/SFAw6kM4lDIFkHLoNpLaMHV+6t8bEk6lc//PT1NWy6+WohVIj0sJR7JG
 /NnRvkWZrvysQZSgV22sLkbuOalrPTJPrTefo0bVzkqdp6HTjaanJNDKlIxyHA/x
 rrm5kcZRB6MsMTBzDdrly9mWHLc/o+qFZE1FoZGYyA0qR8Hrrf5b1f1P3HXkh3T0
 mzgeJVJOrnfrkta6aiHGoJYroiPMZ7RChVOBVnlYbD7Dfhapr9/HneCJ1r+MlqsQ
 VhxYlmApd3C0sv32rGV+gIMxhwOrO5mii5+le7bf1c6IoDMYEyzAvXXPGk/qhi7U
 lvtsWfLemcqAL163924Dc3lmODvFVA==
 =38+G
 -----END PGP SIGNATURE-----

Merge tag 'spi-v6.20' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi updates from Mark Brown:
 "The highlight here is that David Lechner has added support for
  multi-lane SPI devices. Unlike the existing dual/quad SPI support this
  is for devices (typically ADCs/DACs) which support multiple
  independent data streams over multiple data lanes, instead of sending
  one data stream N times as fast they simultaneously transfer N
  different data streams.

  This is very similar to the case where multiple devices are grouped
  together but in this case it's a single device in a way that's visible
  to software.

  Otherwise there's been quite a bit of work on existing drivers, both
  cleanup and feature improvement, and a reasonable collection of new
  drivers.

   - Support for multi-lane SPI devices

   - Preparatory work for some memory mapped flash improvements that
     will happen in the MTD subsystem

   - Several conversions to fwnode APIs

   - A bunch of cleanup and hardening work on the ST drivers

   - Support for DMA mode on Renesas RZV2H and i.MX target mode

   - Support for ATCSPI200, AXIADO AX300, NXP XPI and Renesas RZ/N1"

* tag 'spi-v6.20' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (108 commits)
  spi: tools: Add include folder to .gitignore
  spi: cadence-qspi: Add support for the Renesas RZ/N1 controller
  spi: cadence-qspi: Kill cqspi_jh7110_clk_init
  spi: dt-bindings: cdns,qspi-nor: Add Renesas RZ/N1D400 to the list
  spi: geni-qcom: Add target abort support
  spi: geni-qcom: Drop unused msg parameter from timeout handlers
  spi: geni-qcom: Fix abort sequence execution for serial engine errors
  spi: geni-qcom: Improve target mode allocation by using proper allocation functions
  spi: xilinx: use device property accessors.
  dt-bindings: spi: Add binding for Faraday FTSSP010
  spi: axi-spi-engine: support SPI_MULTI_LANE_MODE_STRIPE
  spi: dt-bindings: adi,axi-spi-engine: add multi-lane support
  spi: Documentation: add page on multi-lane support
  spi: add multi_lane_mode field to struct spi_transfer
  spi: support controllers with multiple data lanes
  spi: dt-bindings: add spi-{tx,rx}-lane-map properties
  spi: dt-bindings: change spi-{rx,tx}-bus-width to arrays
  spi: dw: Remove not-going-to-be-supported code for Baikal SoC
  spi: cadence-qspi: Use a default value for cdns,fifo-width
  spi: cadence-qspi: Make sure write protection is disabled
  ...
This commit is contained in:
Linus Torvalds 2026-02-11 09:43:43 -08:00
commit e86dda7bde
141 changed files with 5812 additions and 1211 deletions

View file

@ -34,8 +34,9 @@ properties:
spi-cpol: true
spi-rx-bus-width:
minimum: 0
maximum: 1
items:
minimum: 0
maximum: 1
dc-gpios:
maxItems: 1

View file

@ -37,7 +37,15 @@ properties:
maximum: 102040816
spi-rx-bus-width:
enum: [1, 2, 4]
maxItems: 2
# all lanes must have the same width
oneOf:
- contains:
const: 1
- contains:
const: 2
- contains:
const: 4
vdd-5v-supply: true
vdd-1v8-supply: true
@ -88,6 +96,18 @@ oneOf:
unevaluatedProperties: false
allOf:
- if:
properties:
compatible:
enum:
- adi,ad4030-24
- adi,ad4032-24
then:
properties:
spi-rx-bus-width:
maxItems: 1
examples:
- |
#include <dt-bindings/gpio/gpio.h>
@ -108,3 +128,23 @@ examples:
reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
};
};
- |
#include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
adc@0 {
compatible = "adi,ad4630-24";
reg = <0>;
spi-max-frequency = <80000000>;
spi-rx-bus-width = <4>, <4>;
vdd-5v-supply = <&supply_5V>;
vdd-1v8-supply = <&supply_1_8V>;
vio-supply = <&supply_1_8V>;
ref-supply = <&supply_5V>;
cnv-gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
};
};

View file

@ -38,8 +38,9 @@ properties:
spi-cpha: true
spi-rx-bus-width:
minimum: 1
maximum: 4
items:
minimum: 1
maximum: 4
avdd-supply:
description: Analog power supply.

View file

@ -70,6 +70,21 @@ required:
unevaluatedProperties: false
patternProperties:
"^.*@[0-9a-f]+":
type: object
properties:
spi-rx-bus-width:
maxItems: 8
items:
enum: [0, 1]
spi-tx-bus-width:
maxItems: 8
items:
enum: [0, 1]
examples:
- |
spi@44a00000 {

View file

@ -55,10 +55,12 @@ patternProperties:
maximum: 4
spi-rx-bus-width:
const: 1
items:
- const: 1
spi-tx-bus-width:
const: 1
items:
- const: 1
required:
- compatible

View file

@ -81,10 +81,12 @@ patternProperties:
maximum: 4
spi-rx-bus-width:
const: 1
items:
- const: 1
spi-tx-bus-width:
const: 1
items:
- const: 1
required:
- compatible

View file

@ -0,0 +1,87 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/andestech,ae350-spi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Andes ATCSPI200 SPI controller
maintainers:
- CL Wang <cl634@andestech.com>
properties:
compatible:
oneOf:
- items:
- enum:
- andestech,qilai-spi
- const: andestech,ae350-spi
- const: andestech,ae350-spi
reg:
maxItems: 1
clocks:
maxItems: 1
num-cs:
description: Number of chip selects supported
maxItems: 1
dmas:
items:
- description: Transmit FIFO DMA channel
- description: Receive FIFO DMA channel
dma-names:
items:
- const: tx
- const: rx
patternProperties:
"@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
spi-rx-bus-width:
items:
- enum: [1, 4]
spi-tx-bus-width:
items:
- enum: [1, 4]
allOf:
- $ref: spi-controller.yaml#
required:
- compatible
- reg
- clocks
- dmas
- dma-names
unevaluatedProperties: false
examples:
- |
spi@f0b00000 {
compatible = "andestech,ae350-spi";
reg = <0xf0b00000 0x100>;
clocks = <&clk_spi>;
dmas = <&dma0 0>, <&dma0 1>;
dma-names = "tx", "rx";
#address-cells = <1>;
#size-cells = <0>;
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-tx-bus-width = <4>;
spi-rx-bus-width = <4>;
spi-cpol;
spi-cpha;
};
};

View file

@ -19,6 +19,7 @@ properties:
- const: atmel,at91rm9200-spi
- items:
- enum:
- microchip,lan9691-spi
- microchip,sam9x60-spi
- microchip,sam9x7-spi
- microchip,sama7d65-spi

View file

@ -0,0 +1,73 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/axiado,ax3000-spi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Axiado AX3000 SoC SPI controller
maintainers:
- Vladimir Moravcevic <vmoravcevic@axiado.com>
- Tzu-Hao Wei <twei@axiado.com>
- Swark Yang <syang@axiado.com>
- Prasad Bolisetty <pbolisetty@axiado.com>
allOf:
- $ref: spi-controller.yaml#
properties:
compatible:
enum:
- axiado,ax3000-spi
reg:
maxItems: 1
interrupts:
maxItems: 1
clock-names:
items:
- const: ref
- const: pclk
clocks:
maxItems: 2
num-cs:
description: |
Number of chip selects used.
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 4
default: 4
required:
- compatible
- reg
- interrupts
- clock-names
- clocks
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
spi@80510000 {
compatible = "axiado,ax3000-spi";
reg = <0x00 0x80510000 0x00 0x1000>;
clock-names = "ref", "pclk";
clocks = <&spi_clk>, <&apb_pclk>;
interrupt-parent = <&gic500>;
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
num-cs = <4>;
};
};
...

View file

@ -61,6 +61,20 @@ allOf:
cdns,fifo-depth:
enum: [ 128, 256 ]
default: 128
- if:
properties:
compatible:
contains:
const: renesas,rzn1-qspi
then:
properties:
cdns,trigger-address: false
cdns,fifo-depth: false
cdns,fifo-width: false
else:
required:
- cdns,trigger-address
- cdns,fifo-depth
properties:
compatible:
@ -80,6 +94,9 @@ properties:
# controllers are meant to be used with flashes of all kinds,
# ie. also NAND flashes, not only NOR flashes.
- const: cdns,qspi-nor
- items:
- const: renesas,r9a06g032-qspi
- const: renesas,rzn1-qspi
- const: cdns,qspi-nor
deprecated: true
@ -163,8 +180,6 @@ required:
- reg
- interrupts
- clocks
- cdns,fifo-width
- cdns,trigger-address
- '#address-cells'
- '#size-cells'
@ -172,7 +187,7 @@ unevaluatedProperties: false
examples:
- |
qspi: spi@ff705000 {
spi@ff705000 {
compatible = "intel,socfpga-qspi", "cdns,qspi-nor";
#address-cells = <1>;
#size-cells = <0>;

View file

@ -0,0 +1,43 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/faraday,ftssp010.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Faraday FTSSP010 SPI Controller
maintainers:
- Linus Walleij <linusw@kernel.org>
properties:
compatible:
const: faraday,ftssp010
interrupts:
maxItems: 1
reg:
maxItems: 1
cs-gpios: true
required:
- compatible
- interrupts
- reg
allOf:
- $ref: spi-controller.yaml#
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
spi@4a000000 {
compatible = "faraday,ftssp010";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x4a000000 0x1000>;
interrupts = <0>;
};

View file

@ -54,10 +54,12 @@ patternProperties:
properties:
spi-rx-bus-width:
enum: [1, 2, 4]
items:
- enum: [1, 2, 4]
spi-tx-bus-width:
enum: [1, 2, 4]
items:
- enum: [1, 2, 4]
required:
- compatible

View file

@ -0,0 +1,92 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/spi/nxp,imx94-xspi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP External Serial Peripheral Interface (xSPI)
maintainers:
- Haibo Chen <haibo.chen@nxp.com>
- Han Xu <han.xu@nxp.com>
properties:
compatible:
oneOf:
- enum:
- nxp,imx94-xspi
- items:
- enum:
- nxp,imx952-xspi
- const: nxp,imx94-xspi
reg:
items:
- description: registers address space
- description: memory mapped address space
reg-names:
items:
- const: base
- const: mmap
interrupts:
items:
- description: interrupt for EENV0
- description: interrupt for EENV1
- description: interrupt for EENV2
- description: interrupt for EENV3
- description: interrupt for EENV4
clocks:
items:
- description: SPI serial clock
clock-names:
items:
- const: per
required:
- compatible
- reg
- reg-names
- interrupts
- clocks
- clock-names
allOf:
- $ref: spi-controller.yaml#
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
spi@42b90000 {
compatible = "nxp,imx94-xspi";
reg = <0x0 0x42b90000 0x0 0x50000>, <0x0 0x28000000 0x0 0x08000000>;
reg-names = "base", "mmap";
interrupts = <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clocks = <&scmi_1>;
clock-names = "per";
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <200000000>;
spi-rx-bus-width = <8>;
spi-tx-bus-width = <8>;
};
};
};

View file

@ -20,6 +20,12 @@ properties:
clocks:
maxItems: 1
dmas:
maxItems: 1
dma-names:
const: rx-tx
allOf:
- $ref: spi-controller.yaml#
@ -38,6 +44,8 @@ examples:
compatible = "nxp,lpc3220-spi";
reg = <0x20088000 0x1000>;
clocks = <&clk LPC32XX_CLK_SPI1>;
dmas = <&dmamux 11 1 0>;
dma-names = "rx-tx";
#address-cells = <1>;
#size-cells = <0>;
};

View file

@ -57,6 +57,14 @@ properties:
- const: presetn
- const: tresetn
dmas:
maxItems: 2
dma-names:
items:
- const: rx
- const: tx
power-domains:
maxItems: 1

View file

@ -64,9 +64,23 @@ properties:
description:
Bus width to the SPI bus used for read transfers.
If 0 is provided, then no RX will be possible on this device.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2, 4, 8]
default: 1
Some SPI peripherals and controllers may have multiple data lanes for
receiving two or more words at the same time. If this is the case, each
index in the array represents the lane on both the SPI peripheral and
controller. Additional mapping properties may be needed if a lane is
skipped on either side.
$ref: /schemas/types.yaml#/definitions/uint32-array
items:
enum: [0, 1, 2, 4, 8]
default: [1]
spi-rx-lane-map:
description: Mapping of peripheral SDO lanes to controller SDI lanes.
Each index in the array represents a peripheral SDO lane, and the value
at that index represents the corresponding controller SDI lane.
$ref: /schemas/types.yaml#/definitions/uint32-array
default: [0, 1, 2, 3, 4, 5, 6, 7]
spi-rx-delay-us:
description:
@ -81,9 +95,23 @@ properties:
description:
Bus width to the SPI bus used for write transfers.
If 0 is provided, then no TX will be possible on this device.
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2, 4, 8]
default: 1
Some SPI peripherals and controllers may have multiple data lanes for
transmitting two or more words at the same time. If this is the case, each
index in the array represents the lane on both the SPI peripheral and
controller. Additional mapping properties may be needed if a lane is
skipped on either side.
$ref: /schemas/types.yaml#/definitions/uint32-array
items:
enum: [0, 1, 2, 4, 8]
default: [1]
spi-tx-lane-map:
description: Mapping of peripheral SDI lanes to controller SDO lanes.
Each index in the array represents a peripheral SDI lane, and the value
at that index represents the corresponding controller SDO lane.
$ref: /schemas/types.yaml#/definitions/uint32-array
default: [0, 1, 2, 3, 4, 5, 6, 7]
spi-tx-delay-us:
description:

View file

@ -38,7 +38,6 @@ properties:
required:
- compatible
- reg
- interrupts
unevaluatedProperties: false

View file

@ -96,6 +96,9 @@ properties:
The region should be defined as child node of the AHB SRAM node
as per the generic bindings in Documentation/devicetree/bindings/sram/sram.yaml
power-domains:
maxItems: 1
access-controllers:
minItems: 1
maxItems: 2

View file

@ -9,6 +9,7 @@ Serial Peripheral Interface (SPI)
spi-summary
spidev
multiple-data-lanes
butterfly
spi-lm70llp
spi-sc18is602

View file

@ -0,0 +1,217 @@
====================================
SPI devices with multiple data lanes
====================================
Some specialized SPI controllers and peripherals support multiple data lanes
that allow reading more than one word at a time in parallel. This is different
from dual/quad/octal SPI where multiple bits of a single word are transferred
simultaneously.
For example, controllers that support parallel flash memories have this feature
as do some simultaneous-sampling ADCs where each channel has its own data lane.
---------------------
Describing the wiring
---------------------
The ``spi-tx-bus-width`` and ``spi-rx-bus-width`` properties in the devicetree
are used to describe how many data lanes are connected between the controller
and how wide each lane is. The number of items in the array indicates how many
lanes there are, and the value of each item indicates how many bits wide that
lane is.
For example, a dual-simultaneous-sampling ADC with two 4-bit lanes might be
wired up like this::
+--------------+ +----------+
| SPI | | AD4630 |
| Controller | | ADC |
| | | |
| CS0 |--->| CS |
| SCK |--->| SCK |
| SDO |--->| SDI |
| | | |
| SDIA0 |<---| SDOA0 |
| SDIA1 |<---| SDOA1 |
| SDIA2 |<---| SDOA2 |
| SDIA3 |<---| SDOA3 |
| | | |
| SDIB0 |<---| SDOB0 |
| SDIB1 |<---| SDOB1 |
| SDIB2 |<---| SDOB2 |
| SDIB3 |<---| SDOB3 |
| | | |
+--------------+ +----------+
It is described in a devicetree like this::
spi {
compatible = "my,spi-controller";
...
adc@0 {
compatible = "adi,ad4630";
reg = <0>;
...
spi-rx-bus-width = <4>, <4>; /* 2 lanes of 4 bits each */
...
};
};
In most cases, lanes will be wired up symmetrically (A to A, B to B, etc). If
this isn't the case, extra ``spi-rx-lane-map`` and ``spi-tx-lane-map``
properties are needed to provide a mapping between controller lanes and the
physical lane wires.
Here is an example where a multi-lane SPI controller has each lane wired to
separate single-lane peripherals::
+--------------+ +----------+
| SPI | | Thing 1 |
| Controller | | |
| | | |
| CS0 |--->| CS |
| SDO0 |--->| SDI |
| SDI0 |<---| SDO |
| SCLK0 |--->| SCLK |
| | | |
| | +----------+
| |
| | +----------+
| | | Thing 2 |
| | | |
| CS1 |--->| CS |
| SDO1 |--->| SDI |
| SDI1 |<---| SDO |
| SCLK1 |--->| SCLK |
| | | |
+--------------+ +----------+
This is described in a devicetree like this::
spi {
compatible = "my,spi-controller";
...
thing1@0 {
compatible = "my,thing1";
reg = <0>;
...
};
thing2@1 {
compatible = "my,thing2";
reg = <1>;
...
spi-tx-lane-map = <1>; /* lane 0 is not used, lane 1 is used for tx wire */
spi-rx-lane-map = <1>; /* lane 0 is not used, lane 1 is used for rx wire */
...
};
};
The default values of ``spi-rx-bus-width`` and ``spi-tx-bus-width`` are ``<1>``,
so these properties can still be omitted even when ``spi-rx-lane-map`` and
``spi-tx-lane-map`` are used.
----------------------------
Usage in a peripheral driver
----------------------------
These types of SPI controllers generally do not support arbitrary use of the
multiple lanes. Instead, they operate in one of a few defined modes. Peripheral
drivers should set the :c:type:`struct spi_transfer.multi_lane_mode <spi_transfer>`
field to indicate which mode they want to use for a given transfer.
The possible values for this field have the following semantics:
- :c:macro:`SPI_MULTI_BUS_MODE_SINGLE`: Only use the first lane. Other lanes are
ignored. This means that it is operating just like a conventional SPI
peripheral. This is the default, so it does not need to be explicitly set.
Example::
tx_buf[0] = 0x88;
struct spi_transfer xfer = {
.tx_buf = tx_buf,
.len = 1,
};
spi_sync_transfer(spi, &xfer, 1);
Assuming the controller is sending the MSB first, the sequence of bits
sent over the tx wire would be (right-most bit is sent first)::
controller > data bits > peripheral
---------- ---------------- ----------
SDO 0 0-0-0-1-0-0-0-1 SDI 0
- :c:macro:`SPI_MULTI_BUS_MODE_MIRROR`: Send a single data word over all of the
lanes at the same time. This only makes sense for writes and not
for reads.
Example::
tx_buf[0] = 0x88;
struct spi_transfer xfer = {
.tx_buf = tx_buf,
.len = 1,
.multi_lane_mode = SPI_MULTI_BUS_MODE_MIRROR,
};
spi_sync_transfer(spi, &xfer, 1);
The data is mirrored on each tx wire::
controller > data bits > peripheral
---------- ---------------- ----------
SDO 0 0-0-0-1-0-0-0-1 SDI 0
SDO 1 0-0-0-1-0-0-0-1 SDI 1
- :c:macro:`SPI_MULTI_BUS_MODE_STRIPE`: Send or receive two different data words
at the same time, one on each lane. This means that the buffer needs to be
sized to hold data for all lanes. Data is interleaved in the buffer, with
the first word corresponding to lane 0, the second to lane 1, and so on.
Once the last lane is used, the next word in the buffer corresponds to lane
0 again. Accordingly, the buffer size must be a multiple of the number of
lanes. This mode works for both reads and writes.
Example::
struct spi_transfer xfer = {
.rx_buf = rx_buf,
.len = 2,
.multi_lane_mode = SPI_MULTI_BUS_MODE_STRIPE,
};
spi_sync_transfer(spi, &xfer, 1);
Each rx wire has a different data word sent simultaneously::
controller < data bits < peripheral
---------- ---------------- ----------
SDI 0 0-0-0-1-0-0-0-1 SDO 0
SDI 1 1-0-0-0-1-0-0-0 SDO 1
After the transfer, ``rx_buf[0] == 0x11`` (word from SDO 0) and
``rx_buf[1] == 0x88`` (word from SDO 1).
-----------------------------
SPI controller driver support
-----------------------------
To support multiple data lanes, SPI controller drivers need to set
:c:type:`struct spi_controller.num_data_lanes <spi_controller>` to a value
greater than 1.
Then the part of the driver that handles SPI transfers needs to check the
:c:type:`struct spi_transfer.multi_lane_mode <spi_transfer>` field and implement
the appropriate behavior for each supported mode and return an error for
unsupported modes.
The core SPI code should handle the rest.

View file

@ -1821,6 +1821,12 @@ S: Supported
F: drivers/clk/analogbits/*
F: include/linux/clk/analogbits*
ANDES ATCSPI200 SPI DRIVER
M: CL Wang <cl634@andestech.com>
S: Supported
F: Documentation/devicetree/bindings/spi/andestech,ae350-spi.yaml
F: drivers/spi/spi-atcspi200.c
ANDROID DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Arve Hjønnevåg <arve@android.com>
@ -4277,6 +4283,17 @@ W: https://ez.analog.com/linux-software-drivers
F: Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml
F: drivers/pwm/pwm-axi-pwmgen.c
AXIADO SPI DB DRIVER
M: Vladimir Moravcevic <vmoravcevic@axiado.com>
M: Tzu-Hao Wei <twei@axiado.com>
M: Swark Yang <syang@axiado.com>
M: Prasad Bolisetty <pbolisetty@axiado.com>
L: linux-spi@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/spi/axiado,ax3000-spi.yaml
F: drivers/spi/spi-axiado.c
F: drivers/spi/spi-axiado.h
AYANEO PLATFORM EC DRIVER
M: Antheas Kapenekakis <lkml@antheas.dev>
L: platform-driver-x86@vger.kernel.org
@ -18991,6 +19008,15 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/trivial-codec.yaml
F: sound/soc/codecs/tfa9879*
NXP XSPI DRIVER
M: Han Xu <han.xu@nxp.com>
M: Haibo Chen <haibo.chen@nxp.com>
L: linux-spi@vger.kernel.org
L: imx@lists.linux.dev
S: Maintained
F: Documentation/devicetree/bindings/spi/nxp,imx94-xspi.yaml
F: drivers/spi/spi-nxp-xspi.c
NXP-NCI NFC DRIVER
S: Orphan
F: Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml

View file

@ -136,6 +136,15 @@ config SPI_AR934X
This enables support for the SPI controller present on the
Qualcomm Atheros AR934X/QCA95XX SoCs.
config SPI_ATCSPI200
tristate "Andes ATCSPI200 SPI controller"
depends on ARCH_ANDES
help
SPI driver for Andes ATCSPI200 SPI controller.
ATCSPI200 controller supports DMA and PIO modes. When DMA
is not available, the driver automatically falls back to
PIO mode.
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
depends on ATH79 || COMPILE_TEST
@ -204,6 +213,17 @@ config SPI_AXI_SPI_ENGINE
It is part of the SPI Engine framework that is used in some Analog Devices
reference designs for FPGAs.
config SPI_AXIADO
tristate "Axiado DB-H SPI controller"
depends on SPI_MEM
depends on ARCH_AXIADO || COMPILE_TEST
help
Enable support for the SPI controller present on Axiado AX3000 SoCs.
The implementation supports host-only mode and does not provide target
functionality. It is intended for use cases where the SoC acts as the SPI
host, communicating with peripheral devices such as flash memory.
config SPI_BCM2835
tristate "BCM2835 SPI controller"
depends on GPIOLIB
@ -365,33 +385,6 @@ config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
depends on HAS_IOMEM
config SPI_DW_BT1
tristate "Baikal-T1 SPI driver for DW SPI core"
depends on MIPS_BAIKAL_T1 || COMPILE_TEST
select MULTIPLEXER
help
Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
controllers. Two of them are pretty much normal: with IRQ, DMA,
FIFOs of 64 words depth, 4x CSs, but the third one as being a
part of the Baikal-T1 System Boot Controller has got a very
limited resources: no IRQ, no DMA, only a single native
chip-select and Tx/Rx FIFO with just 8 words depth available.
The later one is normally connected to an external SPI-nor flash
of 128Mb (in general can be of bigger size).
config SPI_DW_BT1_DIRMAP
bool "Directly mapped Baikal-T1 Boot SPI flash support"
depends on SPI_DW_BT1
help
Directly mapped SPI flash memory is an interface specific to the
Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which
can be used to access a peripheral memory device just by
reading/writing data from/to it. Note that the system APB bus
will stall during each IO from/to the dirmap region until the
operation is finished. So try not to use it concurrently with
time-critical tasks (like the SPI memory operations implemented
in this driver).
endif
config SPI_DLN2
@ -481,6 +474,16 @@ config SPI_NXP_FLEXSPI
This controller does not support generic SPI messages and only
supports the high-level SPI memory interface.
config SPI_NXP_XSPI
tristate "NXP xSPI controller"
depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
help
This enables support for the xSPI controller. Up to two devices
can be connected to one host.
This controller does not support generic SPI messages and only
supports the high-level SPI memory interface.
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GPIOLIB || COMPILE_TEST

View file

@ -26,12 +26,14 @@ obj-$(CONFIG_SPI_APPLE) += spi-apple.o
obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ASPEED_SMC) += spi-aspeed-smc.o
obj-$(CONFIG_SPI_ATCSPI200) += spi-atcspi200.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o
obj-$(CONFIG_SPI_AXIADO) += spi-axiado.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
@ -52,7 +54,6 @@ obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
spi-dw-y := spi-dw-core.o
spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o
obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o
obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
@ -102,6 +103,7 @@ obj-$(CONFIG_SPI_WPCM_FIU) += spi-wpcm-fiu.o
obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
obj-$(CONFIG_SPI_NXP_XSPI) += spi-nxp-xspi.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o

View file

@ -1382,7 +1382,6 @@ static int atmel_qspi_probe(struct platform_device *pdev)
ctrl->bus_num = -1;
ctrl->mem_ops = &atmel_qspi_mem_ops;
ctrl->num_chipselect = 1;
ctrl->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, ctrl);
/* Map the registers */

View file

@ -1124,7 +1124,6 @@ static int airoha_snand_probe(struct platform_device *pdev)
ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
ctrl->mode_bits = SPI_RX_DUAL;
ctrl->setup = airoha_snand_setup;
device_set_node(&ctrl->dev, dev_fwnode(dev));
err = airoha_snand_nfi_init(as_ctrl);
if (err)

View file

@ -67,8 +67,6 @@ static int altera_spi_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
}
host->dev.of_node = pdev->dev.of_node;
hw = spi_controller_get_devdata(host);
hw->dev = &pdev->dev;

View file

@ -358,7 +358,6 @@ static int amlogic_spifc_a1_probe(struct platform_device *pdev)
return ret;
ctrl->num_chipselect = 1;
ctrl->dev.of_node = pdev->dev.of_node;
ctrl->bits_per_word_mask = SPI_BPW_MASK(8);
ctrl->auto_runtime_pm = true;
ctrl->mem_ops = &amlogic_spifc_a1_mem_ops;

View file

@ -781,7 +781,6 @@ static int aml_spisg_probe(struct platform_device *pdev)
pm_runtime_resume_and_get(&spisg->pdev->dev);
ctlr->num_chipselect = 4;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST |
SPI_3WIRE | SPI_TX_QUAD | SPI_RX_QUAD;
ctlr->max_speed_hz = 1000 * 1000 * 100;

View file

@ -485,7 +485,6 @@ static int apple_spi_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret, "Unable to bind to interrupt\n");
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->bus_num = pdev->id;
ctlr->num_chipselect = 1;
ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;

View file

@ -195,7 +195,6 @@ static int ar934x_spi_probe(struct platform_device *pdev)
ctlr->transfer_one_message = ar934x_spi_transfer_one_message;
ctlr->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->num_chipselect = 3;
dev_set_drvdata(&pdev->dev, ctlr);

View file

@ -813,7 +813,6 @@ MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids);
static int a3700_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
struct spi_controller *host;
struct a3700_spi *spi;
u32 num_cs = 0;
@ -826,14 +825,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto out;
}
if (of_property_read_u32(of_node, "num-cs", &num_cs)) {
if (of_property_read_u32(dev->of_node, "num-cs", &num_cs)) {
dev_err(dev, "could not find num-cs\n");
ret = -ENXIO;
goto error;
}
host->bus_num = pdev->id;
host->dev.of_node = of_node;
host->mode_bits = SPI_MODE_3;
host->num_chipselect = num_cs;
host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);

View file

@ -48,6 +48,8 @@
/* CEx Address Decoding Range Register */
#define CE0_SEGMENT_ADDR_REG 0x30
#define FULL_DUPLEX_RX_DATA 0x1e4
/* CEx Read timing compensation register */
#define CE0_TIMING_COMPENSATION_REG 0x94
@ -81,6 +83,7 @@ struct aspeed_spi_data {
u32 hclk_mask;
u32 hdiv_max;
u32 min_window_size;
bool full_duplex;
phys_addr_t (*segment_start)(struct aspeed_spi *aspi, u32 reg);
phys_addr_t (*segment_end)(struct aspeed_spi *aspi, u32 reg);
@ -105,6 +108,7 @@ struct aspeed_spi {
struct clk *clk;
u32 clk_freq;
u8 cs_change;
struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS];
};
@ -280,7 +284,8 @@ stop_user:
}
/* support for 1-1-1, 1-1-2 or 1-1-4 */
static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
static bool aspeed_spi_supports_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (op->cmd.buswidth > 1)
return false;
@ -305,7 +310,8 @@ static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op
static const struct aspeed_spi_data ast2400_spi_data;
static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static int do_aspeed_spi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)];
@ -367,11 +373,12 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
return ret;
}
static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static int aspeed_spi_exec_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
int ret;
ret = do_aspeed_spi_exec_op(mem, op);
ret = do_aspeed_spi_exec_mem_op(mem, op);
if (ret)
dev_err(&mem->spi->dev, "operation failed: %d\n", ret);
return ret;
@ -773,8 +780,8 @@ static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
}
static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
.supports_op = aspeed_spi_supports_op,
.exec_op = aspeed_spi_exec_op,
.supports_op = aspeed_spi_supports_mem_op,
.exec_op = aspeed_spi_exec_mem_op,
.get_name = aspeed_spi_get_name,
.dirmap_create = aspeed_spi_dirmap_create,
.dirmap_read = aspeed_spi_dirmap_read,
@ -843,6 +850,110 @@ static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable)
aspeed_spi_chip_enable(aspi, cs, enable);
}
static int aspeed_spi_user_prepare_msg(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct aspeed_spi *aspi =
(struct aspeed_spi *)spi_controller_get_devdata(ctlr);
const struct aspeed_spi_data *data = aspi->data;
struct spi_device *spi = msg->spi;
u32 cs = spi_get_chipselect(spi, 0);
struct aspeed_spi_chip *chip = &aspi->chips[cs];
u32 ctrl_val;
u32 clk_div = data->get_clk_div(chip, spi->max_speed_hz);
ctrl_val = chip->ctl_val[ASPEED_SPI_BASE];
ctrl_val &= ~CTRL_IO_MODE_MASK & data->hclk_mask;
ctrl_val |= clk_div;
chip->ctl_val[ASPEED_SPI_BASE] = ctrl_val;
if (aspi->cs_change == 0)
aspeed_spi_start_user(chip);
return 0;
}
static int aspeed_spi_user_unprepare_msg(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct aspeed_spi *aspi =
(struct aspeed_spi *)spi_controller_get_devdata(ctlr);
struct spi_device *spi = msg->spi;
u32 cs = spi_get_chipselect(spi, 0);
struct aspeed_spi_chip *chip = &aspi->chips[cs];
if (aspi->cs_change == 0)
aspeed_spi_stop_user(chip);
return 0;
}
static void aspeed_spi_user_transfer_tx(struct aspeed_spi *aspi,
struct spi_device *spi,
const u8 *tx_buf, u8 *rx_buf,
void *dst, u32 len)
{
const struct aspeed_spi_data *data = aspi->data;
bool full_duplex_transfer = data->full_duplex && tx_buf == rx_buf;
u32 i;
if (full_duplex_transfer &&
!!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD))) {
dev_err(aspi->dev,
"full duplex is only supported for single IO mode\n");
return;
}
for (i = 0; i < len; i++) {
writeb(tx_buf[i], dst);
if (full_duplex_transfer)
rx_buf[i] = readb(aspi->regs + FULL_DUPLEX_RX_DATA);
}
}
static int aspeed_spi_user_transfer(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct aspeed_spi *aspi =
(struct aspeed_spi *)spi_controller_get_devdata(ctlr);
u32 cs = spi_get_chipselect(spi, 0);
struct aspeed_spi_chip *chip = &aspi->chips[cs];
void __iomem *ahb_base = aspi->chips[cs].ahb_base;
const u8 *tx_buf = xfer->tx_buf;
u8 *rx_buf = xfer->rx_buf;
dev_dbg(aspi->dev,
"[cs%d] xfer: width %d, len %u, tx %p, rx %p\n",
cs, xfer->bits_per_word, xfer->len,
tx_buf, rx_buf);
if (tx_buf) {
if (spi->mode & SPI_TX_DUAL)
aspeed_spi_set_io_mode(chip, CTRL_IO_DUAL_DATA);
else if (spi->mode & SPI_TX_QUAD)
aspeed_spi_set_io_mode(chip, CTRL_IO_QUAD_DATA);
aspeed_spi_user_transfer_tx(aspi, spi, tx_buf, rx_buf,
(void *)ahb_base, xfer->len);
}
if (rx_buf && rx_buf != tx_buf) {
if (spi->mode & SPI_RX_DUAL)
aspeed_spi_set_io_mode(chip, CTRL_IO_DUAL_DATA);
else if (spi->mode & SPI_RX_QUAD)
aspeed_spi_set_io_mode(chip, CTRL_IO_QUAD_DATA);
ioread8_rep(ahb_base, rx_buf, xfer->len);
}
xfer->error = 0;
aspi->cs_change = xfer->cs_change;
return 0;
}
static int aspeed_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@ -898,7 +1009,9 @@ static int aspeed_spi_probe(struct platform_device *pdev)
ctlr->setup = aspeed_spi_setup;
ctlr->cleanup = aspeed_spi_cleanup;
ctlr->num_chipselect = of_get_available_child_count(dev->of_node);
ctlr->dev.of_node = dev->of_node;
ctlr->prepare_message = aspeed_spi_user_prepare_msg;
ctlr->unprepare_message = aspeed_spi_user_unprepare_msg;
ctlr->transfer_one = aspeed_spi_user_transfer;
aspi->num_cs = ctlr->num_chipselect;
@ -1455,6 +1568,7 @@ static const struct aspeed_spi_data ast2400_fmc_data = {
.hclk_mask = 0xfffff0ff,
.hdiv_max = 1,
.min_window_size = 0x800000,
.full_duplex = false,
.calibrate = aspeed_spi_calibrate,
.get_clk_div = aspeed_get_clk_div_ast2400,
.segment_start = aspeed_spi_segment_start,
@ -1471,6 +1585,7 @@ static const struct aspeed_spi_data ast2400_spi_data = {
.timing = 0x14,
.hclk_mask = 0xfffff0ff,
.hdiv_max = 1,
.full_duplex = false,
.get_clk_div = aspeed_get_clk_div_ast2400,
.calibrate = aspeed_spi_calibrate,
/* No segment registers */
@ -1485,6 +1600,7 @@ static const struct aspeed_spi_data ast2500_fmc_data = {
.hclk_mask = 0xffffd0ff,
.hdiv_max = 1,
.min_window_size = 0x800000,
.full_duplex = false,
.get_clk_div = aspeed_get_clk_div_ast2500,
.calibrate = aspeed_spi_calibrate,
.segment_start = aspeed_spi_segment_start,
@ -1502,6 +1618,7 @@ static const struct aspeed_spi_data ast2500_spi_data = {
.hclk_mask = 0xffffd0ff,
.hdiv_max = 1,
.min_window_size = 0x800000,
.full_duplex = false,
.get_clk_div = aspeed_get_clk_div_ast2500,
.calibrate = aspeed_spi_calibrate,
.segment_start = aspeed_spi_segment_start,
@ -1520,6 +1637,7 @@ static const struct aspeed_spi_data ast2600_fmc_data = {
.hclk_mask = 0xf0fff0ff,
.hdiv_max = 2,
.min_window_size = 0x200000,
.full_duplex = false,
.get_clk_div = aspeed_get_clk_div_ast2600,
.calibrate = aspeed_spi_ast2600_calibrate,
.segment_start = aspeed_spi_segment_ast2600_start,
@ -1538,6 +1656,7 @@ static const struct aspeed_spi_data ast2600_spi_data = {
.hclk_mask = 0xf0fff0ff,
.hdiv_max = 2,
.min_window_size = 0x200000,
.full_duplex = false,
.get_clk_div = aspeed_get_clk_div_ast2600,
.calibrate = aspeed_spi_ast2600_calibrate,
.segment_start = aspeed_spi_segment_ast2600_start,
@ -1556,6 +1675,7 @@ static const struct aspeed_spi_data ast2700_fmc_data = {
.hclk_mask = 0xf0fff0ff,
.hdiv_max = 2,
.min_window_size = 0x10000,
.full_duplex = true,
.get_clk_div = aspeed_get_clk_div_ast2600,
.calibrate = aspeed_spi_ast2600_calibrate,
.segment_start = aspeed_spi_segment_ast2700_start,
@ -1573,6 +1693,7 @@ static const struct aspeed_spi_data ast2700_spi_data = {
.hclk_mask = 0xf0fff0ff,
.hdiv_max = 2,
.min_window_size = 0x10000,
.full_duplex = true,
.get_clk_div = aspeed_get_clk_div_ast2600,
.calibrate = aspeed_spi_ast2600_calibrate,
.segment_start = aspeed_spi_segment_ast2700_start,

679
drivers/spi/spi-atcspi200.c Normal file
View file

@ -0,0 +1,679 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for Andes ATCSPI200 SPI Controller
*
* Copyright (C) 2025 Andes Technology Corporation.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/dev_printk.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
/* Register definitions */
#define ATCSPI_TRANS_FMT 0x10 /* SPI transfer format register */
#define ATCSPI_TRANS_CTRL 0x20 /* SPI transfer control register */
#define ATCSPI_CMD 0x24 /* SPI command register */
#define ATCSPI_ADDR 0x28 /* SPI address register */
#define ATCSPI_DATA 0x2C /* SPI data register */
#define ATCSPI_CTRL 0x30 /* SPI control register */
#define ATCSPI_STATUS 0x34 /* SPI status register */
#define ATCSPI_TIMING 0x40 /* SPI interface timing register */
#define ATCSPI_CONFIG 0x7C /* SPI configuration register */
/* Transfer format register */
#define TRANS_FMT_CPHA BIT(0)
#define TRANS_FMT_CPOL BIT(1)
#define TRANS_FMT_DATA_MERGE_EN BIT(7)
#define TRANS_FMT_DATA_LEN_MASK GENMASK(12, 8)
#define TRANS_FMT_ADDR_LEN_MASK GENMASK(17, 16)
#define TRANS_FMT_DATA_LEN(x) FIELD_PREP(TRANS_FMT_DATA_LEN_MASK, (x) - 1)
#define TRANS_FMT_ADDR_LEN(x) FIELD_PREP(TRANS_FMT_ADDR_LEN_MASK, (x) - 1)
/* Transfer control register */
#define TRANS_MODE_MASK GENMASK(27, 24)
#define TRANS_MODE_W_ONLY FIELD_PREP(TRANS_MODE_MASK, 1)
#define TRANS_MODE_R_ONLY FIELD_PREP(TRANS_MODE_MASK, 2)
#define TRANS_MODE_NONE_DATA FIELD_PREP(TRANS_MODE_MASK, 7)
#define TRANS_MODE_DMY_READ FIELD_PREP(TRANS_MODE_MASK, 9)
#define TRANS_FIELD_DECNZ(m, x) ((x) ? FIELD_PREP(m, (x) - 1) : 0)
#define TRANS_RD_TRANS_CNT(x) TRANS_FIELD_DECNZ(GENMASK(8, 0), x)
#define TRANS_DUMMY_CNT(x) TRANS_FIELD_DECNZ(GENMASK(10, 9), x)
#define TRANS_WR_TRANS_CNT(x) TRANS_FIELD_DECNZ(GENMASK(20, 12), x)
#define TRANS_DUAL_QUAD(x) FIELD_PREP(GENMASK(23, 22), (x))
#define TRANS_ADDR_FMT BIT(28)
#define TRANS_ADDR_EN BIT(29)
#define TRANS_CMD_EN BIT(30)
/* Control register */
#define CTRL_SPI_RST BIT(0)
#define CTRL_RX_FIFO_RST BIT(1)
#define CTRL_TX_FIFO_RST BIT(2)
#define CTRL_RX_DMA_EN BIT(3)
#define CTRL_TX_DMA_EN BIT(4)
/* Status register */
#define ATCSPI_ACTIVE BIT(0)
#define ATCSPI_RX_EMPTY BIT(14)
#define ATCSPI_TX_FULL BIT(23)
/* Interface timing setting */
#define TIMING_SCLK_DIV_MASK GENMASK(7, 0)
#define TIMING_SCLK_DIV_MAX 0xFE
/* Configuration register */
#define RXFIFO_SIZE(x) FIELD_GET(GENMASK(3, 0), (x))
#define TXFIFO_SIZE(x) FIELD_GET(GENMASK(7, 4), (x))
/* driver configurations */
#define ATCSPI_MAX_TRANS_LEN 512
#define ATCSPI_MAX_SPEED_HZ 50000000
#define ATCSPI_RDY_TIMEOUT_US 1000000
#define ATCSPI_XFER_TIMEOUT(n) ((n) * 10)
#define ATCSPI_MAX_CS_NUM 1
#define ATCSPI_DMA_THRESHOLD 256
#define ATCSPI_BITS_PER_UINT 8
#define ATCSPI_DATA_MERGE_EN 1
#define ATCSPI_DMA_SUPPORT 1
/**
* struct atcspi_dev - Andes ATCSPI200 SPI controller private data
* @host: Pointer to the SPI controller structure.
* @mutex_lock: A mutex to protect concurrent access to the controller.
* @dma_completion: A completion to signal the end of a DMA transfer.
* @dev: Pointer to the device structure.
* @regmap: Register map for accessing controller registers.
* @clk: Pointer to the controller's functional clock.
* @dma_addr: The physical address of the SPI data register for DMA.
* @clk_rate: The cached frequency of the functional clock.
* @sclk_rate: The target frequency for the SPI clock (SCLK).
* @txfifo_size: The size of the transmit FIFO in bytes.
* @rxfifo_size: The size of the receive FIFO in bytes.
* @data_merge: A flag indicating if the data merge mode is enabled for
* the current transfer.
* @use_dma: Enable DMA mode if ATCSPI_DMA_SUPPORT is set and DMA is
* successfully configured.
*/
struct atcspi_dev {
struct spi_controller *host;
struct mutex mutex_lock;
struct completion dma_completion;
struct device *dev;
struct regmap *regmap;
struct clk *clk;
dma_addr_t dma_addr;
unsigned int clk_rate;
unsigned int sclk_rate;
unsigned int txfifo_size;
unsigned int rxfifo_size;
bool data_merge;
bool use_dma;
};
static int atcspi_wait_fifo_ready(struct atcspi_dev *spi,
enum spi_mem_data_dir dir)
{
unsigned int val;
unsigned int mask;
int ret;
mask = (dir == SPI_MEM_DATA_OUT) ? ATCSPI_TX_FULL : ATCSPI_RX_EMPTY;
ret = regmap_read_poll_timeout(spi->regmap,
ATCSPI_STATUS,
val,
!(val & mask),
0,
ATCSPI_RDY_TIMEOUT_US);
if (ret)
dev_info(spi->dev, "Timed out waiting for FIFO ready\n");
return ret;
}
static int atcspi_xfer_data_poll(struct atcspi_dev *spi,
const struct spi_mem_op *op)
{
void *rx_buf = op->data.buf.in;
const void *tx_buf = op->data.buf.out;
unsigned int val;
int trans_bytes = op->data.nbytes;
int num_byte;
int ret = 0;
num_byte = spi->data_merge ? 4 : 1;
while (trans_bytes) {
if (op->data.dir == SPI_MEM_DATA_OUT) {
ret = atcspi_wait_fifo_ready(spi, SPI_MEM_DATA_OUT);
if (ret)
return ret;
if (spi->data_merge)
val = *(unsigned int *)tx_buf;
else
val = *(unsigned char *)tx_buf;
regmap_write(spi->regmap, ATCSPI_DATA, val);
tx_buf = (unsigned char *)tx_buf + num_byte;
} else {
ret = atcspi_wait_fifo_ready(spi, SPI_MEM_DATA_IN);
if (ret)
return ret;
regmap_read(spi->regmap, ATCSPI_DATA, &val);
if (spi->data_merge)
*(unsigned int *)rx_buf = val;
else
*(unsigned char *)rx_buf = (unsigned char)val;
rx_buf = (unsigned char *)rx_buf + num_byte;
}
trans_bytes -= num_byte;
}
return ret;
}
static void atcspi_set_trans_ctl(struct atcspi_dev *spi,
const struct spi_mem_op *op)
{
unsigned int tc = 0;
if (op->cmd.nbytes)
tc |= TRANS_CMD_EN;
if (op->addr.nbytes)
tc |= TRANS_ADDR_EN;
if (op->addr.buswidth > 1)
tc |= TRANS_ADDR_FMT;
if (op->data.nbytes) {
tc |= TRANS_DUAL_QUAD(ffs(op->data.buswidth) - 1);
if (op->data.dir == SPI_MEM_DATA_IN) {
if (op->dummy.nbytes)
tc |= TRANS_MODE_DMY_READ |
TRANS_DUMMY_CNT(op->dummy.nbytes);
else
tc |= TRANS_MODE_R_ONLY;
tc |= TRANS_RD_TRANS_CNT(op->data.nbytes);
} else {
tc |= TRANS_MODE_W_ONLY |
TRANS_WR_TRANS_CNT(op->data.nbytes);
}
} else {
tc |= TRANS_MODE_NONE_DATA;
}
regmap_write(spi->regmap, ATCSPI_TRANS_CTRL, tc);
}
static void atcspi_set_trans_fmt(struct atcspi_dev *spi,
const struct spi_mem_op *op)
{
unsigned int val;
regmap_read(spi->regmap, ATCSPI_TRANS_FMT, &val);
if (op->data.nbytes) {
if (ATCSPI_DATA_MERGE_EN && ATCSPI_BITS_PER_UINT == 8 &&
!(op->data.nbytes % 4)) {
val |= TRANS_FMT_DATA_MERGE_EN;
spi->data_merge = true;
} else {
val &= ~TRANS_FMT_DATA_MERGE_EN;
spi->data_merge = false;
}
}
val = (val & ~TRANS_FMT_ADDR_LEN_MASK) |
TRANS_FMT_ADDR_LEN(op->addr.nbytes);
regmap_write(spi->regmap, ATCSPI_TRANS_FMT, val);
}
static void atcspi_prepare_trans(struct atcspi_dev *spi,
const struct spi_mem_op *op)
{
atcspi_set_trans_fmt(spi, op);
atcspi_set_trans_ctl(spi, op);
if (op->addr.nbytes)
regmap_write(spi->regmap, ATCSPI_ADDR, op->addr.val);
regmap_write(spi->regmap, ATCSPI_CMD, op->cmd.opcode);
}
static int atcspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct atcspi_dev *spi;
spi = spi_controller_get_devdata(mem->spi->controller);
op->data.nbytes = min(op->data.nbytes, ATCSPI_MAX_TRANS_LEN);
/* DMA needs to be aligned to 4 byte */
if (spi->use_dma && op->data.nbytes >= ATCSPI_DMA_THRESHOLD)
op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 4);
return 0;
}
static int atcspi_dma_config(struct atcspi_dev *spi, bool is_rx)
{
struct dma_slave_config conf = { 0 };
struct dma_chan *chan;
if (is_rx) {
chan = spi->host->dma_rx;
conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = spi->dma_addr;
} else {
chan = spi->host->dma_tx;
conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = spi->dma_addr;
}
conf.dst_maxburst = spi->rxfifo_size / 2;
conf.src_maxburst = spi->txfifo_size / 2;
if (spi->data_merge) {
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
} else {
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
}
return dmaengine_slave_config(chan, &conf);
}
static void atcspi_dma_callback(void *arg)
{
struct completion *dma_completion = arg;
complete(dma_completion);
}
static int atcspi_dma_trans(struct atcspi_dev *spi,
const struct spi_mem_op *op)
{
struct dma_async_tx_descriptor *desc;
struct dma_chan *dma_ch;
struct sg_table sgt;
enum dma_transfer_direction dma_dir;
dma_cookie_t cookie;
unsigned int ctrl;
int timeout;
int ret;
regmap_read(spi->regmap, ATCSPI_CTRL, &ctrl);
ctrl |= CTRL_TX_DMA_EN | CTRL_RX_DMA_EN;
regmap_write(spi->regmap, ATCSPI_CTRL, ctrl);
if (op->data.dir == SPI_MEM_DATA_IN) {
ret = atcspi_dma_config(spi, TRUE);
dma_dir = DMA_DEV_TO_MEM;
dma_ch = spi->host->dma_rx;
} else {
ret = atcspi_dma_config(spi, FALSE);
dma_dir = DMA_MEM_TO_DEV;
dma_ch = spi->host->dma_tx;
}
if (ret)
return ret;
ret = spi_controller_dma_map_mem_op_data(spi->host, op, &sgt);
if (ret)
return ret;
desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents, dma_dir,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
ret = -ENOMEM;
goto exit_unmap;
}
reinit_completion(&spi->dma_completion);
desc->callback = atcspi_dma_callback;
desc->callback_param = &spi->dma_completion;
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret)
goto exit_unmap;
dma_async_issue_pending(dma_ch);
timeout = msecs_to_jiffies(ATCSPI_XFER_TIMEOUT(op->data.nbytes));
if (!wait_for_completion_timeout(&spi->dma_completion, timeout)) {
ret = -ETIMEDOUT;
dmaengine_terminate_all(dma_ch);
}
exit_unmap:
spi_controller_dma_unmap_mem_op_data(spi->host, op, &sgt);
return ret;
}
static int atcspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct spi_device *spi_dev = mem->spi;
struct atcspi_dev *spi;
unsigned int val;
int ret;
spi = spi_controller_get_devdata(spi_dev->controller);
mutex_lock(&spi->mutex_lock);
atcspi_prepare_trans(spi, op);
if (op->data.nbytes) {
if (spi->use_dma && op->data.nbytes >= ATCSPI_DMA_THRESHOLD)
ret = atcspi_dma_trans(spi, op);
else
ret = atcspi_xfer_data_poll(spi, op);
if (ret) {
dev_info(spi->dev, "SPI transmission failed\n");
goto exec_mem_exit;
}
}
ret = regmap_read_poll_timeout(spi->regmap,
ATCSPI_STATUS,
val,
!(val & ATCSPI_ACTIVE),
0,
ATCSPI_RDY_TIMEOUT_US);
if (ret)
dev_info(spi->dev, "Timed out waiting for ATCSPI_ACTIVE\n");
exec_mem_exit:
mutex_unlock(&spi->mutex_lock);
return ret;
}
static const struct spi_controller_mem_ops atcspi_mem_ops = {
.exec_op = atcspi_exec_mem_op,
.adjust_op_size = atcspi_adjust_op_size,
};
static int atcspi_setup(struct atcspi_dev *spi)
{
unsigned int ctrl_val;
unsigned int val;
int actual_spi_sclk_f;
int ret;
unsigned char div;
ctrl_val = CTRL_TX_FIFO_RST | CTRL_RX_FIFO_RST | CTRL_SPI_RST;
regmap_write(spi->regmap, ATCSPI_CTRL, ctrl_val);
ret = regmap_read_poll_timeout(spi->regmap,
ATCSPI_CTRL,
val,
!(val & ctrl_val),
0,
ATCSPI_RDY_TIMEOUT_US);
if (ret)
return dev_err_probe(spi->dev, ret,
"Timed out waiting for ATCSPI_CTRL\n");
val = TRANS_FMT_DATA_LEN(ATCSPI_BITS_PER_UINT) |
TRANS_FMT_CPHA | TRANS_FMT_CPOL;
regmap_write(spi->regmap, ATCSPI_TRANS_FMT, val);
regmap_read(spi->regmap, ATCSPI_CONFIG, &val);
spi->txfifo_size = BIT(TXFIFO_SIZE(val) + 1);
spi->rxfifo_size = BIT(RXFIFO_SIZE(val) + 1);
regmap_read(spi->regmap, ATCSPI_TIMING, &val);
val &= ~TIMING_SCLK_DIV_MASK;
/*
* The SCLK_DIV value 0xFF is special and indicates that the
* SCLK rate should be the same as the SPI clock rate.
*/
if (spi->sclk_rate >= spi->clk_rate) {
div = TIMING_SCLK_DIV_MASK;
} else {
/*
* The divider value is determined as follows:
* 1. If the divider can generate the exact target frequency,
* use that setting.
* 2. If an exact match is not possible, select the closest
* available setting that is lower than the target frequency.
*/
div = (spi->clk_rate + (spi->sclk_rate * 2 - 1)) /
(spi->sclk_rate * 2) - 1;
/* Check if the actual SPI clock is lower than the target */
actual_spi_sclk_f = spi->clk_rate / ((div + 1) * 2);
if (actual_spi_sclk_f < spi->sclk_rate)
dev_info(spi->dev,
"Clock adjusted %d to %d due to divider limitation",
spi->sclk_rate, actual_spi_sclk_f);
if (div > TIMING_SCLK_DIV_MAX)
return dev_err_probe(spi->dev, -EINVAL,
"Unsupported SPI clock %d\n",
spi->sclk_rate);
}
val |= div;
regmap_write(spi->regmap, ATCSPI_TIMING, val);
return ret;
}
static int atcspi_init_resources(struct platform_device *pdev,
struct atcspi_dev *spi,
struct resource **mem_res)
{
void __iomem *base;
const struct regmap_config atcspi_regmap_cfg = {
.name = "atcspi",
.reg_bits = 32,
.val_bits = 32,
.cache_type = REGCACHE_NONE,
.reg_stride = 4,
.pad_bits = 0,
.max_register = ATCSPI_CONFIG
};
base = devm_platform_get_and_ioremap_resource(pdev, 0, mem_res);
if (IS_ERR(base))
return dev_err_probe(spi->dev, PTR_ERR(base),
"Failed to get ioremap resource\n");
spi->regmap = devm_regmap_init_mmio(spi->dev, base,
&atcspi_regmap_cfg);
if (IS_ERR(spi->regmap))
return dev_err_probe(spi->dev, PTR_ERR(spi->regmap),
"Failed to init regmap\n");
spi->clk = devm_clk_get(spi->dev, NULL);
if (IS_ERR(spi->clk))
return dev_err_probe(spi->dev, PTR_ERR(spi->clk),
"Failed to get SPI clock\n");
spi->sclk_rate = ATCSPI_MAX_SPEED_HZ;
return 0;
}
static int atcspi_configure_dma(struct atcspi_dev *spi)
{
struct dma_chan *dma_chan;
int ret = 0;
dma_chan = devm_dma_request_chan(spi->dev, "rx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
goto err_exit;
}
spi->host->dma_rx = dma_chan;
dma_chan = devm_dma_request_chan(spi->dev, "tx");
if (IS_ERR(dma_chan)) {
ret = PTR_ERR(dma_chan);
goto free_rx;
}
spi->host->dma_tx = dma_chan;
init_completion(&spi->dma_completion);
return ret;
free_rx:
dma_release_channel(spi->host->dma_rx);
spi->host->dma_rx = NULL;
err_exit:
return ret;
}
static int atcspi_enable_clk(struct atcspi_dev *spi)
{
int ret;
ret = clk_prepare_enable(spi->clk);
if (ret)
return dev_err_probe(spi->dev, ret,
"Failed to enable clock\n");
spi->clk_rate = clk_get_rate(spi->clk);
if (!spi->clk_rate)
return dev_err_probe(spi->dev, -EINVAL,
"Failed to get SPI clock rate\n");
return 0;
}
static void atcspi_init_controller(struct platform_device *pdev,
struct atcspi_dev *spi,
struct spi_controller *host,
struct resource *mem_res)
{
/* Get the physical address of the data register for DMA transfers. */
spi->dma_addr = (dma_addr_t)(mem_res->start + ATCSPI_DATA);
/* Initialize controller properties */
host->bus_num = pdev->id;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_QUAD | SPI_TX_QUAD;
host->num_chipselect = ATCSPI_MAX_CS_NUM;
host->mem_ops = &atcspi_mem_ops;
host->max_speed_hz = spi->sclk_rate;
}
static int atcspi_probe(struct platform_device *pdev)
{
struct spi_controller *host;
struct atcspi_dev *spi;
struct resource *mem_res;
int ret;
host = spi_alloc_host(&pdev->dev, sizeof(*spi));
if (!host)
return -ENOMEM;
spi = spi_controller_get_devdata(host);
spi->host = host;
spi->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, host);
ret = atcspi_init_resources(pdev, spi, &mem_res);
if (ret)
goto free_controller;
ret = atcspi_enable_clk(spi);
if (ret)
goto free_controller;
atcspi_init_controller(pdev, spi, host, mem_res);
ret = atcspi_setup(spi);
if (ret)
goto disable_clk;
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret) {
dev_err_probe(spi->dev, ret,
"Failed to register SPI controller\n");
goto disable_clk;
}
spi->use_dma = false;
if (ATCSPI_DMA_SUPPORT) {
ret = atcspi_configure_dma(spi);
if (ret)
dev_info(spi->dev,
"Failed to init DMA, fallback to PIO mode\n");
else
spi->use_dma = true;
}
mutex_init(&spi->mutex_lock);
return 0;
disable_clk:
clk_disable_unprepare(spi->clk);
free_controller:
spi_controller_put(host);
return ret;
}
static int atcspi_suspend(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct atcspi_dev *spi = spi_controller_get_devdata(host);
spi_controller_suspend(host);
clk_disable_unprepare(spi->clk);
return 0;
}
static int atcspi_resume(struct device *dev)
{
struct spi_controller *host = dev_get_drvdata(dev);
struct atcspi_dev *spi = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(spi->clk);
if (ret)
return ret;
ret = atcspi_setup(spi);
if (ret)
goto disable_clk;
ret = spi_controller_resume(host);
if (ret)
goto disable_clk;
return ret;
disable_clk:
clk_disable_unprepare(spi->clk);
return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(atcspi_pm_ops, atcspi_suspend, atcspi_resume);
static const struct of_device_id atcspi_of_match[] = {
{ .compatible = "andestech,qilai-spi", },
{ .compatible = "andestech,ae350-spi", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atcspi_of_match);
static struct platform_driver atcspi_driver = {
.probe = atcspi_probe,
.driver = {
.name = "atcspi200",
.owner = THIS_MODULE,
.of_match_table = atcspi_of_match,
.pm = pm_sleep_ptr(&atcspi_pm_ops)
}
};
module_platform_driver(atcspi_driver);
MODULE_AUTHOR("CL Wang <cl634@andestech.com>");
MODULE_DESCRIPTION("Andes ATCSPI200 SPI controller driver");
MODULE_LICENSE("GPL");

View file

@ -180,7 +180,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
}
sp = spi_controller_get_devdata(host);
host->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, sp);
host->use_gpio_descriptors = true;

View file

@ -1536,7 +1536,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
host->use_gpio_descriptors = true;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
host->dev.of_node = pdev->dev.of_node;
host->bus_num = pdev->id;
host->num_chipselect = 4;
host->setup = atmel_spi_setup;

View file

@ -23,6 +23,9 @@
#include <linux/spi/spi.h>
#include <trace/events/spi.h>
#define SPI_ENGINE_REG_DATA_WIDTH 0x0C
#define SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK GENMASK(23, 16)
#define SPI_ENGINE_REG_DATA_WIDTH_MASK GENMASK(15, 0)
#define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10
#define SPI_ENGINE_REG_RESET 0x40
@ -75,6 +78,8 @@
#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
#define SPI_ENGINE_CMD_REG_CONFIG 0x1
#define SPI_ENGINE_CMD_REG_XFER_BITS 0x2
#define SPI_ENGINE_CMD_REG_SDI_MASK 0x3
#define SPI_ENGINE_CMD_REG_SDO_MASK 0x4
#define SPI_ENGINE_MISC_SYNC 0x0
#define SPI_ENGINE_MISC_SLEEP 0x1
@ -105,6 +110,10 @@
#define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16
#define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16
/* Extending SPI_MULTI_LANE_MODE values for optimizing messages. */
#define SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN -1
#define SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING -2
struct spi_engine_program {
unsigned int length;
uint16_t instructions[] __counted_by(length);
@ -142,6 +151,11 @@ struct spi_engine_offload {
unsigned long flags;
unsigned int offload_num;
unsigned int spi_mode_config;
unsigned int multi_lane_mode;
u8 rx_primary_lane_mask;
u8 tx_primary_lane_mask;
u8 rx_all_lanes_mask;
u8 tx_all_lanes_mask;
u8 bits_per_word;
};
@ -165,6 +179,25 @@ struct spi_engine {
bool offload_requires_sync;
};
static void spi_engine_primary_lane_flag(struct spi_device *spi,
u8 *rx_lane_flags, u8 *tx_lane_flags)
{
*rx_lane_flags = BIT(spi->rx_lane_map[0]);
*tx_lane_flags = BIT(spi->tx_lane_map[0]);
}
static void spi_engine_all_lanes_flags(struct spi_device *spi,
u8 *rx_lane_flags, u8 *tx_lane_flags)
{
int i;
for (i = 0; i < spi->num_rx_lanes; i++)
*rx_lane_flags |= BIT(spi->rx_lane_map[i]);
for (i = 0; i < spi->num_tx_lanes; i++)
*tx_lane_flags |= BIT(spi->tx_lane_map[i]);
}
static void spi_engine_program_add_cmd(struct spi_engine_program *p,
bool dry, uint16_t cmd)
{
@ -193,7 +226,7 @@ static unsigned int spi_engine_get_config(struct spi_device *spi)
}
static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
struct spi_transfer *xfer)
struct spi_transfer *xfer, u32 num_lanes)
{
unsigned int len;
@ -204,6 +237,9 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
else
len = xfer->len / 4;
if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE)
len /= num_lanes;
while (len) {
unsigned int n = min(len, 256U);
unsigned int flags = 0;
@ -269,6 +305,7 @@ static int spi_engine_precompile_message(struct spi_message *msg)
{
unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz;
struct spi_transfer *xfer;
int multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN;
u8 min_bits_per_word = U8_MAX;
u8 max_bits_per_word = 0;
@ -284,6 +321,24 @@ static int spi_engine_precompile_message(struct spi_message *msg)
min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word);
max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word);
}
if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM ||
xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) {
switch (xfer->multi_lane_mode) {
case SPI_MULTI_LANE_MODE_SINGLE:
case SPI_MULTI_LANE_MODE_STRIPE:
break;
default:
/* Other modes, like mirror not supported */
return -EINVAL;
}
/* If all xfers have the same multi-lane mode, we can optimize. */
if (multi_lane_mode == SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN)
multi_lane_mode = xfer->multi_lane_mode;
else if (multi_lane_mode != xfer->multi_lane_mode)
multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING;
}
}
/*
@ -297,6 +352,14 @@ static int spi_engine_precompile_message(struct spi_message *msg)
priv->bits_per_word = min_bits_per_word;
else
priv->bits_per_word = 0;
priv->multi_lane_mode = multi_lane_mode;
spi_engine_primary_lane_flag(msg->spi,
&priv->rx_primary_lane_mask,
&priv->tx_primary_lane_mask);
spi_engine_all_lanes_flags(msg->spi,
&priv->rx_all_lanes_mask,
&priv->tx_all_lanes_mask);
}
return 0;
@ -310,6 +373,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
struct spi_engine_offload *priv;
struct spi_transfer *xfer;
int clk_div, new_clk_div, inst_ns;
int prev_multi_lane_mode = SPI_MULTI_LANE_MODE_SINGLE;
bool keep_cs = false;
u8 bits_per_word = 0;
@ -334,6 +398,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
* in the same way.
*/
bits_per_word = priv->bits_per_word;
prev_multi_lane_mode = priv->multi_lane_mode;
} else {
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
@ -344,6 +409,28 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
spi_engine_gen_cs(p, dry, spi, !xfer->cs_off);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM ||
xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) {
if (xfer->multi_lane_mode != prev_multi_lane_mode) {
u8 tx_lane_flags, rx_lane_flags;
if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE)
spi_engine_all_lanes_flags(spi, &rx_lane_flags,
&tx_lane_flags);
else
spi_engine_primary_lane_flag(spi, &rx_lane_flags,
&tx_lane_flags);
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
rx_lane_flags));
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
tx_lane_flags));
}
prev_multi_lane_mode = xfer->multi_lane_mode;
}
new_clk_div = host->max_speed_hz / xfer->effective_speed_hz;
if (new_clk_div != clk_div) {
clk_div = new_clk_div;
@ -360,7 +447,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
bits_per_word));
}
spi_engine_gen_xfer(p, dry, xfer);
spi_engine_gen_xfer(p, dry, xfer, spi->num_rx_lanes);
spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer),
inst_ns, xfer->effective_speed_hz);
@ -394,6 +481,19 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry,
if (clk_div != 1)
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0));
/* Restore single lane mode unless offload disable will restore it later. */
if (prev_multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE &&
(!msg->offload || priv->multi_lane_mode != SPI_MULTI_LANE_MODE_STRIPE)) {
u8 rx_lane_flags, tx_lane_flags;
spi_engine_primary_lane_flag(spi, &rx_lane_flags, &tx_lane_flags);
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, rx_lane_flags));
spi_engine_program_add_cmd(p, dry,
SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, tx_lane_flags));
}
}
static void spi_engine_xfer_next(struct spi_message *msg,
@ -799,6 +899,19 @@ static int spi_engine_setup(struct spi_device *device)
writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
if (host->num_data_lanes > 1) {
u8 rx_lane_flags, tx_lane_flags;
spi_engine_primary_lane_flag(device, &rx_lane_flags, &tx_lane_flags);
writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
rx_lane_flags),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
tx_lane_flags),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
}
/*
* In addition to setting the flags, we have to do a CS assert command
* to make the new setting actually take effect.
@ -902,6 +1015,15 @@ static int spi_engine_trigger_enable(struct spi_offload *offload)
priv->bits_per_word),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) {
writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
priv->rx_all_lanes_mask),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
priv->tx_all_lanes_mask),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
}
writel_relaxed(SPI_ENGINE_CMD_SYNC(1),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
@ -929,6 +1051,16 @@ static void spi_engine_trigger_disable(struct spi_offload *offload)
reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE;
writel_relaxed(reg, spi_engine->base +
SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num));
/* Restore single-lane mode. */
if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) {
writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK,
priv->rx_primary_lane_mask),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK,
priv->tx_primary_lane_mask),
spi_engine->base + SPI_ENGINE_REG_CMD_FIFO);
}
}
static struct dma_chan
@ -973,7 +1105,7 @@ static int spi_engine_probe(struct platform_device *pdev)
{
struct spi_engine *spi_engine;
struct spi_controller *host;
unsigned int version;
unsigned int version, data_width_reg_val;
int irq, ret;
irq = platform_get_irq(pdev, 0);
@ -1042,7 +1174,7 @@ static int spi_engine_probe(struct platform_device *pdev)
return PTR_ERR(spi_engine->base);
version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
if (ADI_AXI_PCORE_VER_MAJOR(version) > 2) {
dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
ADI_AXI_PCORE_VER_MAJOR(version),
ADI_AXI_PCORE_VER_MINOR(version),
@ -1050,6 +1182,8 @@ static int spi_engine_probe(struct platform_device *pdev)
return -ENODEV;
}
data_width_reg_val = readl(spi_engine->base + SPI_ENGINE_REG_DATA_WIDTH);
if (adi_axi_pcore_ver_gteq(version, 1, 1)) {
unsigned int sizes = readl(spi_engine->base +
SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH);
@ -1080,7 +1214,6 @@ static int spi_engine_probe(struct platform_device *pdev)
if (ret)
return ret;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
@ -1097,6 +1230,9 @@ static int spi_engine_probe(struct platform_device *pdev)
}
if (adi_axi_pcore_ver_gteq(version, 1, 3))
host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH;
if (adi_axi_pcore_ver_gteq(version, 2, 0))
host->num_data_lanes = FIELD_GET(SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK,
data_width_reg_val);
if (host->max_speed_hz == 0)
return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0");

1007
drivers/spi/spi-axiado.c Normal file

File diff suppressed because it is too large Load diff

133
drivers/spi/spi-axiado.h Normal file
View file

@ -0,0 +1,133 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Axiado SPI controller driver (Host mode only)
*
* Copyright (C) 2022-2025 Axiado Corporation (or its affiliates).
*/
#ifndef SPI_AXIADO_H
#define SPI_AXIADO_H
/* Name of this driver */
#define AX_SPI_NAME "axiado-db-spi"
/* Axiado - SPI Digital Blocks IP design registers */
#define AX_SPI_TX_FAETR 0x18 // TX-FAETR
#define ALMOST_EMPTY_TRESHOLD 0x00 // Programmed threshold value
#define AX_SPI_RX_FAFTR 0x28 // RX-FAETR
#define ALMOST_FULL_TRESHOLD 0x0c // Programmed threshold value
#define FIFO_DEPTH 256 // 256 bytes
#define AX_SPI_CR1 0x00 // CR1
#define AX_SPI_CR1_CLR 0x00 // CR1 - Clear
#define AX_SPI_CR1_SCR 0x01 // CR1 - controller reset
#define AX_SPI_CR1_SCE 0x02 // CR1 - Controller Enable/Disable
#define AX_SPI_CR1_CPHA 0x08 // CR1 - CPH
#define AX_SPI_CR1_CPOL 0x10 // CR1 - CPO
#define AX_SPI_CR2 0x04 // CR2
#define AX_SPI_CR2_SWD 0x04 // CR2 - Write Enabel/Disable
#define AX_SPI_CR2_SRD 0x08 // CR2 - Read Enable/Disable
#define AX_SPI_CR2_SRI 0x10 // CR2 - Read First Byte Ignore
#define AX_SPI_CR2_HTE 0x40 // CR2 - Host Transmit Enable
#define AX_SPI_CR3 0x08 // CR3
#define AX_SPI_CR3_SDL 0x00 // CR3 - Data lines
#define AX_SPI_CR3_QUAD 0x02 // CR3 - Data lines
/* As per Digital Blocks datasheet clock frequency range
* Min - 244KHz
* Max - 62.5MHz
* SCK Clock Divider Register Values
*/
#define AX_SPI_RX_FBCAR 0x24 // RX_FBCAR
#define AX_SPI_TX_FBCAR 0x14 // TX_FBCAR
#define AX_SPI_SCDR 0x2c // SCDR
#define AX_SPI_SCD_MIN 0x1fe // Valid SCD (SCK Clock Divider Register)
#define AX_SPI_SCD_DEFAULT 0x06 // Default SCD (SCK Clock Divider Register)
#define AX_SPI_SCD_MAX 0x00 // Valid SCD (SCK Clock Divider Register)
#define AX_SPI_SCDR_SCS 0x0200 // SCDR - AMBA Bus Clock source
#define AX_SPI_IMR 0x34 // IMR
#define AX_SPI_IMR_CLR 0x00 // IMR - Clear
#define AX_SPI_IMR_TFOM 0x02 // IMR - TFO
#define AX_SPI_IMR_MTCM 0x40 // IMR - MTC
#define AX_SPI_IMR_TFEM 0x10 // IMR - TFE
#define AX_SPI_IMR_RFFM 0x20 // IMR - RFFM
#define AX_SPI_ISR 0x30 // ISR
#define AX_SPI_ISR_CLR 0xff // ISR - Clear
#define AX_SPI_ISR_MTC 0x40 // ISR - MTC
#define AX_SPI_ISR_TFE 0x10 // ISR - TFE
#define AX_SPI_ISR_RFF 0x20 // ISR - RFF
#define AX_SPI_IVR 0x38 // IVR
#define AX_SPI_IVR_TFOV 0x02 // IVR - TFOV
#define AX_SPI_IVR_MTCV 0x40 // IVR - MTCV
#define AX_SPI_IVR_TFEV 0x10 // IVR - TFEV
#define AX_SPI_IVR_RFFV 0x20 // IVR - RFFV
#define AX_SPI_TXFIFO 0x0c // TX_FIFO
#define AX_SPI_TX_RX_FBCR 0x10 // TX_RX_FBCR
#define AX_SPI_RXFIFO 0x1c // RX_FIFO
#define AX_SPI_TS0 0x00 // Target select 0
#define AX_SPI_TS1 0x01 // Target select 1
#define AX_SPI_TS2 0x10 // Target select 2
#define AX_SPI_TS3 0x11 // Target select 3
#define SPI_AUTOSUSPEND_TIMEOUT 3000
/* Default number of chip select lines also used as maximum number of chip select lines */
#define AX_SPI_DEFAULT_NUM_CS 4
/* Default number of command buffer size */
#define AX_SPI_COMMAND_BUFFER_SIZE 16 //Command + address bytes
/* Target select mask
* 00 TS0
* 01 TS1
* 10 TS2
* 11 TS3
*/
#define AX_SPI_DEFAULT_TS_MASK 0x03
#define AX_SPI_RX_FIFO_DRAIN_LIMIT 24
#define AX_SPI_TRX_FIFO_TIMEOUT 1000
/**
* struct ax_spi - This definition defines spi driver instance
* @regs: Virtual address of the SPI controller registers
* @ref_clk: Pointer to the peripheral clock
* @pclk: Pointer to the APB clock
* @speed_hz: Current SPI bus clock speed in Hz
* @txbuf: Pointer to the TX buffer
* @rxbuf: Pointer to the RX buffer
* @tx_bytes: Number of bytes left to transfer
* @rx_bytes: Number of bytes requested
* @tx_fifo_depth: Depth of the TX FIFO
* @current_rx_fifo_word: Buffers the 32-bit word read from RXFIFO
* @bytes_left_in_current_rx_word: Bytes to be extracted from current 32-bit word
* @current_rx_fifo_word_for_irq: Buffers the 32-bit word read from RXFIFO for IRQ
* @bytes_left_in_current_rx_word_for_irq: IRQ bytes to be extracted from current 32-bit word
* @rx_discard: Number of bytes to discard
* @rx_copy_remaining: Number of bytes to copy
*/
struct ax_spi {
void __iomem *regs;
struct clk *ref_clk;
struct clk *pclk;
unsigned int clk_rate;
u32 speed_hz;
const u8 *tx_buf;
u8 *rx_buf;
int tx_bytes;
int rx_bytes;
unsigned int tx_fifo_depth;
u32 current_rx_fifo_word;
int bytes_left_in_current_rx_word;
u32 current_rx_fifo_word_for_irq;
int bytes_left_in_current_rx_word_for_irq;
int rx_discard;
int rx_copy_remaining;
};
#endif /* SPI_AXIADO_H */

View file

@ -1529,7 +1529,6 @@ int bcm_qspi_probe(struct platform_device *pdev,
host->transfer_one = bcm_qspi_transfer_one;
host->mem_ops = &bcm_qspi_mem_ops;
host->cleanup = bcm_qspi_cleanup;
host->dev.of_node = dev->of_node;
host->num_chipselect = NUM_CHIPSELECT;
host->use_gpio_descriptors = true;

View file

@ -1368,7 +1368,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
ctlr->transfer_one = bcm2835_spi_transfer_one;
ctlr->handle_err = bcm2835_spi_handle_err;
ctlr->prepare_message = bcm2835_spi_prepare_message;
ctlr->dev.of_node = pdev->dev.of_node;
bs = spi_controller_get_devdata(ctlr);
bs->ctlr = ctlr;

View file

@ -502,7 +502,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
host->handle_err = bcm2835aux_spi_handle_err;
host->prepare_message = bcm2835aux_spi_prepare_message;
host->unprepare_message = bcm2835aux_spi_unprepare_message;
host->dev.of_node = pdev->dev.of_node;
host->use_gpio_descriptors = true;
bs = spi_controller_get_devdata(host);

View file

@ -142,6 +142,7 @@ struct bcm63xx_hsspi {
u32 wait_mode;
u32 xfer_mode;
u32 prepend_cnt;
u32 md_start;
u8 *prepend_buf;
};
@ -268,18 +269,20 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host,
{
struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host);
bool tx_only = false;
bool tx_only = false, multidata = false;
struct spi_transfer *t;
/*
* Multiple transfers within a message may be combined into one transfer
* to the controller using its prepend feature. A SPI message is prependable
* only if the following are all true:
* 1. One or more half duplex write transfer in single bit mode
* 2. Optional full duplex read/write at the end
* 3. No delay and cs_change between transfers
* 1. One or more half duplex write transfers at the start
* 2. Optional switch from single to dual bit within the write transfers
* 3. Optional full duplex read/write at the end if all single bit
* 4. No delay and cs_change between transfers
*/
bs->prepend_cnt = 0;
bs->md_start = 0;
list_for_each_entry(t, &msg->transfers, transfer_list) {
if ((spi_delay_to_ns(&t->delay, t) > 0) || t->cs_change) {
bcm63xx_prepend_printk_on_checkfail(bs,
@ -297,31 +300,44 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host,
return false;
}
if (t->tx_nbits > SPI_NBITS_SINGLE &&
!list_is_last(&t->transfer_list, &msg->transfers)) {
if (t->tx_nbits == SPI_NBITS_SINGLE &&
!list_is_last(&t->transfer_list, &msg->transfers) &&
multidata) {
bcm63xx_prepend_printk_on_checkfail(bs,
"multi-bit prepend buf not supported!\n");
"single-bit after multi-bit not supported!\n");
return false;
}
if (t->tx_nbits == SPI_NBITS_SINGLE) {
memcpy(bs->prepend_buf + bs->prepend_cnt, t->tx_buf, t->len);
bs->prepend_cnt += t->len;
}
if (t->tx_nbits > SPI_NBITS_SINGLE)
multidata = true;
memcpy(bs->prepend_buf + bs->prepend_cnt, t->tx_buf, t->len);
bs->prepend_cnt += t->len;
if (t->tx_nbits == SPI_NBITS_SINGLE)
bs->md_start += t->len;
} else {
if (!list_is_last(&t->transfer_list, &msg->transfers)) {
bcm63xx_prepend_printk_on_checkfail(bs,
"rx/tx_rx transfer not supported when it is not last one!\n");
return false;
}
if (t->rx_buf && t->rx_nbits == SPI_NBITS_SINGLE &&
multidata) {
bcm63xx_prepend_printk_on_checkfail(bs,
"single-bit after multi-bit not supported!\n");
return false;
}
}
if (list_is_last(&t->transfer_list, &msg->transfers)) {
memcpy(t_prepend, t, sizeof(struct spi_transfer));
if (tx_only && t->tx_nbits == SPI_NBITS_SINGLE) {
if (tx_only) {
/*
* if the last one is also a single bit tx only transfer, merge
* if the last one is also a tx only transfer, merge
* all of them into one single tx transfer
*/
t_prepend->len = bs->prepend_cnt;
@ -329,7 +345,7 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host,
bs->prepend_cnt = 0;
} else {
/*
* if the last one is not a tx only transfer or dual tx xfer, all
* if the last one is not a tx only transfer, all
* the previous transfers are sent through prepend bytes and
* make sure it does not exceed the max prepend len
*/
@ -339,6 +355,15 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host,
return false;
}
}
/*
* If switching from single-bit to multi-bit, make sure
* the start offset does not exceed the maximum
*/
if (multidata && bs->md_start > HSSPI_MAX_PREPEND_LEN) {
bcm63xx_prepend_printk_on_checkfail(bs,
"exceed max multi-bit offset, abort prepending transfers!\n");
return false;
}
}
}
@ -381,11 +406,11 @@ static int bcm63xx_hsspi_do_prepend_txrx(struct spi_device *spi,
if (t->rx_nbits == SPI_NBITS_DUAL) {
reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT;
reg |= bs->md_start << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT;
}
if (t->tx_nbits == SPI_NBITS_DUAL) {
reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT;
reg |= bs->md_start << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT;
}
}
@ -692,13 +717,6 @@ static bool bcm63xx_hsspi_mem_supports_op(struct spi_mem *mem,
if (!spi_mem_default_supports_op(mem, op))
return false;
/* Controller doesn't support spi mem dual io mode */
if ((op->cmd.opcode == SPINOR_OP_READ_1_2_2) ||
(op->cmd.opcode == SPINOR_OP_READ_1_2_2_4B) ||
(op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR) ||
(op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR_4B))
return false;
return true;
}
@ -804,7 +822,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
init_completion(&bs->done);
host->mem_ops = &bcm63xx_hsspi_mem_ops;
host->dev.of_node = dev->of_node;
if (!dev->of_node)
host->bus_num = HSSPI_BUS_NUM;

View file

@ -571,7 +571,6 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
goto out_err;
}
host->dev.of_node = dev->of_node;
host->bus_num = bus_num;
host->num_chipselect = num_cs;
host->transfer_one_message = bcm63xx_spi_transfer_one;

View file

@ -500,7 +500,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev)
mutex_init(&bs->msg_mutex);
init_completion(&bs->done);
host->dev.of_node = dev->of_node;
if (!dev->of_node)
host->bus_num = HSSPI_BUS_NUM;

View file

@ -40,13 +40,15 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_DEVICE_CS_CNT_MAX);
#define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
#define CQSPI_SLOW_SRAM BIT(4)
#define CQSPI_SLOW_SRAM BIT(4)
#define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
#define CQSPI_RD_NO_IRQ BIT(6)
#define CQSPI_DMA_SET_MASK BIT(7)
#define CQSPI_SUPPORT_DEVICE_RESET BIT(8)
#define CQSPI_DISABLE_STIG_MODE BIT(9)
#define CQSPI_DISABLE_RUNTIME_PM BIT(10)
#define CQSPI_NO_INDIRECT_MODE BIT(11)
#define CQSPI_HAS_WR_PROTECT BIT(12)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@ -55,7 +57,8 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_DEVICE_CS_CNT_MAX);
#define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
enum {
CLK_QSPI_APB = 0,
CLK_QSPI_REF = 0,
CLK_QSPI_APB,
CLK_QSPI_AHB,
CLK_QSPI_NUM,
};
@ -76,8 +79,7 @@ struct cqspi_flash_pdata {
struct cqspi_st {
struct platform_device *pdev;
struct spi_controller *host;
struct clk *clk;
struct clk *clks[CLK_QSPI_NUM];
struct clk_bulk_data clks[CLK_QSPI_NUM];
unsigned int sclk;
void __iomem *iobase;
@ -108,6 +110,7 @@ struct cqspi_st {
bool apb_ahb_hazard;
bool is_jh7110; /* Flag for StarFive JH7110 SoC */
bool is_rzn1; /* Flag for Renesas RZ/N1 SoC */
bool disable_stig_mode;
refcount_t refcount;
refcount_t inflight_ops;
@ -121,8 +124,6 @@ struct cqspi_driver_platdata {
int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
u_char *rxbuf, loff_t from_addr, size_t n_rx);
u32 (*get_dma_status)(struct cqspi_st *cqspi);
int (*jh7110_clk_init)(struct platform_device *pdev,
struct cqspi_st *cqspi);
};
/* Operation timeout value */
@ -219,6 +220,8 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_IRQSTATUS 0x40
#define CQSPI_REG_IRQMASK 0x44
#define CQSPI_REG_WR_PROT_CTRL 0x58
#define CQSPI_REG_INDIRECTRD 0x60
#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
@ -374,17 +377,12 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
/* Clear interrupt */
writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
if (cqspi->use_dma_read && ddata && ddata->get_dma_status) {
if (ddata->get_dma_status(cqspi)) {
complete(&cqspi->transfer_complete);
return IRQ_HANDLED;
}
}
else if (!cqspi->slow_sram)
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
else
if (cqspi->use_dma_read && ddata && ddata->get_dma_status)
irq_status = ddata->get_dma_status(cqspi);
else if (cqspi->slow_sram)
irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR;
else
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
if (irq_status)
complete(&cqspi->transfer_complete);
@ -1263,7 +1261,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
reg = readl(reg_base + CQSPI_REG_CONFIG);
reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
reg |= div << CQSPI_REG_CONFIG_BAUD_LSB;
writel(reg, reg_base + CQSPI_REG_CONFIG);
}
@ -1340,8 +1338,9 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
* mode. So, we can not use direct mode when in DTR mode for writing
* data.
*/
if (!op->cmd.dtr && cqspi->use_direct_mode &&
cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) {
if ((!op->cmd.dtr && cqspi->use_direct_mode &&
cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) ||
(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
return cqspi_wait_idle(cqspi);
}
@ -1430,7 +1429,8 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
if (ret)
return ret;
if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
if ((cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) ||
(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE))
return cqspi_direct_read_execute(f_pdata, buf, from, len);
if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
@ -1514,6 +1514,7 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
static bool cqspi_supports_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
bool all_true, all_false;
/*
@ -1536,6 +1537,13 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
return false;
if (op->data.nbytes && op->data.buswidth != 8)
return false;
/* A single opcode is supported, it will be repeated */
if ((op->cmd.opcode >> 8) != (op->cmd.opcode & 0xFF))
return false;
if (cqspi->is_rzn1)
return false;
} else if (!all_false) {
/* Mixed DTR modes are not supported. */
return false;
@ -1589,20 +1597,20 @@ static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
/* Zero signals FIFO depth should be runtime detected. */
cqspi->fifo_depth = 0;
}
if (!(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) {
if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
/* Zero signals FIFO depth should be runtime detected. */
cqspi->fifo_depth = 0;
}
if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
dev_err(dev, "couldn't determine fifo-width\n");
return -ENXIO;
}
if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width))
cqspi->fifo_width = 4;
if (of_property_read_u32(np, "cdns,trigger-address",
&cqspi->trigger_address)) {
dev_err(dev, "couldn't determine trigger-address\n");
return -ENXIO;
if (of_property_read_u32(np, "cdns,trigger-address",
&cqspi->trigger_address)) {
dev_err(dev, "couldn't determine trigger-address\n");
return -ENXIO;
}
}
if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect))
@ -1627,19 +1635,24 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
/* Disable all interrupts. */
writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
/* Configure the SRAM split to 1:1 . */
writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
if (!(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) {
/* Configure the SRAM split to 1:1 . */
writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
/* Load indirect trigger address. */
writel(cqspi->trigger_address,
cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
/* Load indirect trigger address. */
writel(cqspi->trigger_address,
cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
/* Program read watermark -- 1/2 of the FIFO. */
writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
/* Program write watermark -- 1/8 of the FIFO. */
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
}
/* Program read watermark -- 1/2 of the FIFO. */
writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
/* Program write watermark -- 1/8 of the FIFO. */
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
/* Disable write protection at controller level */
if (cqspi->ddata && cqspi->ddata->quirks & CQSPI_HAS_WR_PROTECT)
writel(0, cqspi->iobase + CQSPI_REG_WR_PROT_CTRL);
/* Disable direct access controller */
if (!cqspi->use_direct_mode) {
@ -1661,6 +1674,9 @@ static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi)
struct device *dev = &cqspi->pdev->dev;
u32 reg, fifo_depth;
if (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)
return;
/*
* Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N
* the FIFO depth.
@ -1764,51 +1780,6 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi)
return 0;
}
static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi)
{
static struct clk_bulk_data qspiclk[] = {
{ .id = "apb" },
{ .id = "ahb" },
};
int ret = 0;
ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk);
if (ret) {
dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__);
return ret;
}
cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk;
cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk;
ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]);
if (ret) {
dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__);
return ret;
}
ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]);
if (ret) {
dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__);
goto disable_apb_clk;
}
cqspi->is_jh7110 = true;
return 0;
disable_apb_clk:
clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
return ret;
}
static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi)
{
clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]);
clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]);
}
static int cqspi_probe(struct platform_device *pdev)
{
const struct cqspi_driver_platdata *ddata;
@ -1817,8 +1788,7 @@ static int cqspi_probe(struct platform_device *pdev)
struct spi_controller *host;
struct resource *res_ahb;
struct cqspi_st *cqspi;
int ret;
int irq;
int ret, irq;
host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi));
if (!host)
@ -1827,13 +1797,15 @@ static int cqspi_probe(struct platform_device *pdev)
host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
host->mem_ops = &cqspi_mem_ops;
host->mem_caps = &cqspi_mem_caps;
host->dev.of_node = pdev->dev.of_node;
cqspi = spi_controller_get_devdata(host);
if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi"))
cqspi->is_jh7110 = true;
if (of_device_is_compatible(pdev->dev.of_node, "renesas,rzn1-qspi"))
cqspi->is_rzn1 = true;
cqspi->pdev = pdev;
cqspi->host = host;
cqspi->is_jh7110 = false;
cqspi->ddata = ddata = of_device_get_match_data(dev);
platform_set_drvdata(pdev, cqspi);
@ -1844,14 +1816,22 @@ static int cqspi_probe(struct platform_device *pdev)
return -ENODEV;
}
/* Obtain QSPI clock. */
cqspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(cqspi->clk)) {
dev_err(dev, "Cannot claim QSPI clock.\n");
ret = PTR_ERR(cqspi->clk);
ret = cqspi_setup_flash(cqspi);
if (ret) {
dev_err(dev, "failed to setup flash parameters %d\n", ret);
return ret;
}
/* Obtain QSPI clocks. */
ret = devm_clk_bulk_get_optional(dev, CLK_QSPI_NUM, cqspi->clks);
if (ret)
return dev_err_probe(dev, ret, "Failed to get clocks\n");
if (!cqspi->clks[CLK_QSPI_REF].clk) {
dev_err(dev, "Cannot claim mandatory QSPI ref clock.\n");
return -ENODEV;
}
/* Obtain and remap controller address. */
cqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cqspi->iobase)) {
@ -1881,11 +1861,10 @@ static int cqspi_probe(struct platform_device *pdev)
if (ret)
return ret;
ret = clk_prepare_enable(cqspi->clk);
ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks);
if (ret) {
dev_err(dev, "Cannot enable QSPI clock.\n");
goto probe_clk_failed;
dev_err(dev, "Cannot enable QSPI clocks.\n");
goto disable_rpm;
}
/* Obtain QSPI reset control */
@ -1893,22 +1872,22 @@ static int cqspi_probe(struct platform_device *pdev)
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
dev_err(dev, "Cannot get QSPI reset.\n");
goto probe_reset_failed;
goto disable_clks;
}
rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
if (IS_ERR(rstc_ocp)) {
ret = PTR_ERR(rstc_ocp);
dev_err(dev, "Cannot get QSPI OCP reset.\n");
goto probe_reset_failed;
goto disable_clks;
}
if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) {
if (cqspi->is_jh7110) {
rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref");
if (IS_ERR(rstc_ref)) {
ret = PTR_ERR(rstc_ref);
dev_err(dev, "Cannot get QSPI REF reset.\n");
goto probe_reset_failed;
goto disable_clks;
}
reset_control_assert(rstc_ref);
reset_control_deassert(rstc_ref);
@ -1920,8 +1899,13 @@ static int cqspi_probe(struct platform_device *pdev)
reset_control_assert(rstc_ocp);
reset_control_deassert(rstc_ocp);
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
host->max_speed_hz = cqspi->master_ref_clk_hz;
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clks[CLK_QSPI_REF].clk);
if (!cqspi->is_rzn1) {
host->max_speed_hz = cqspi->master_ref_clk_hz;
} else {
host->max_speed_hz = cqspi->master_ref_clk_hz / 2;
host->min_speed_hz = cqspi->master_ref_clk_hz / 32;
}
/* write completion is supported by default */
cqspi->wr_completion = true;
@ -1946,19 +1930,13 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->slow_sram = true;
if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR)
cqspi->apb_ahb_hazard = true;
if (ddata->jh7110_clk_init) {
ret = cqspi_jh7110_clk_init(pdev, cqspi);
if (ret)
goto probe_reset_failed;
}
if (ddata->quirks & CQSPI_DISABLE_STIG_MODE)
cqspi->disable_stig_mode = true;
if (ddata->quirks & CQSPI_DMA_SET_MASK) {
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
goto probe_reset_failed;
goto disable_clks;
}
}
@ -1969,7 +1947,7 @@ static int cqspi_probe(struct platform_device *pdev)
pdev->name, cqspi);
if (ret) {
dev_err(dev, "Cannot request IRQ.\n");
goto probe_reset_failed;
goto disable_clks;
}
cqspi_wait_idle(cqspi);
@ -1987,48 +1965,42 @@ static int cqspi_probe(struct platform_device *pdev)
pm_runtime_get_noresume(dev);
}
ret = cqspi_setup_flash(cqspi);
if (ret) {
dev_err(dev, "failed to setup flash parameters %d\n", ret);
goto probe_setup_failed;
}
host->num_chipselect = cqspi->num_chipselect;
if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET))
cqspi_device_reset(cqspi);
if (cqspi->use_direct_mode) {
if (cqspi->use_direct_mode && !cqspi->is_rzn1) {
ret = cqspi_request_mmap_dma(cqspi);
if (ret == -EPROBE_DEFER) {
dev_err_probe(&pdev->dev, ret, "Failed to request mmap DMA\n");
goto probe_setup_failed;
goto disable_controller;
}
}
ret = spi_register_controller(host);
if (ret) {
dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
goto probe_setup_failed;
goto release_dma_chan;
}
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
pm_runtime_mark_last_busy(dev);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
pm_runtime_put_autosuspend(dev);
}
return 0;
probe_setup_failed:
release_dma_chan:
if (cqspi->rx_chan)
dma_release_channel(cqspi->rx_chan);
disable_controller:
cqspi_controller_enable(cqspi, 0);
disable_clks:
if (pm_runtime_get_sync(&pdev->dev) >= 0)
clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
disable_rpm:
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
pm_runtime_disable(dev);
cqspi_controller_enable(cqspi, 0);
probe_reset_failed:
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
if (pm_runtime_get_sync(&pdev->dev) >= 0)
clk_disable_unprepare(cqspi->clk);
probe_clk_failed:
return ret;
}
@ -2037,6 +2009,7 @@ static void cqspi_remove(struct platform_device *pdev)
const struct cqspi_driver_platdata *ddata;
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
int ret = 0;
ddata = of_device_get_match_data(dev);
@ -2046,17 +2019,18 @@ static void cqspi_remove(struct platform_device *pdev)
cqspi_wait_idle(cqspi);
spi_unregister_controller(cqspi->host);
cqspi_controller_enable(cqspi, 0);
if (cqspi->rx_chan)
dma_release_channel(cqspi->rx_chan);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
if (pm_runtime_get_sync(&pdev->dev) >= 0)
clk_disable(cqspi->clk);
cqspi_controller_enable(cqspi, 0);
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
ret = pm_runtime_get_sync(&pdev->dev);
if (ret >= 0)
clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
pm_runtime_put_sync(&pdev->dev);
@ -2069,15 +2043,19 @@ static int cqspi_runtime_suspend(struct device *dev)
struct cqspi_st *cqspi = dev_get_drvdata(dev);
cqspi_controller_enable(cqspi, 0);
clk_disable_unprepare(cqspi->clk);
clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
return 0;
}
static int cqspi_runtime_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
int ret;
ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks);
if (ret)
return ret;
clk_prepare_enable(cqspi->clk);
cqspi_wait_idle(cqspi);
cqspi_controller_enable(cqspi, 0);
cqspi_controller_init(cqspi);
@ -2137,33 +2115,29 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
};
static const struct cqspi_driver_platdata socfpga_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE
| CQSPI_NO_SUPPORT_WR_COMPLETION
| CQSPI_SLOW_SRAM
| CQSPI_DISABLE_STIG_MODE
| CQSPI_DISABLE_RUNTIME_PM,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION |
CQSPI_SLOW_SRAM | CQSPI_DISABLE_STIG_MODE |
CQSPI_DISABLE_RUNTIME_PM,
};
static const struct cqspi_driver_platdata versal_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA
| CQSPI_DMA_SET_MASK,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA |
CQSPI_DMA_SET_MASK,
.indirect_read_dma = cqspi_versal_indirect_read_dma,
.get_dma_status = cqspi_get_versal_dma_status,
};
static const struct cqspi_driver_platdata versal2_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA
| CQSPI_DMA_SET_MASK
| CQSPI_SUPPORT_DEVICE_RESET,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA |
CQSPI_DMA_SET_MASK | CQSPI_SUPPORT_DEVICE_RESET,
.indirect_read_dma = cqspi_versal_indirect_read_dma,
.get_dma_status = cqspi_get_versal_dma_status,
};
static const struct cqspi_driver_platdata jh7110_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
.jh7110_clk_init = cqspi_jh7110_clk_init,
};
static const struct cqspi_driver_platdata pensando_cdns_qspi = {
@ -2173,7 +2147,13 @@ static const struct cqspi_driver_platdata pensando_cdns_qspi = {
static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION |
CQSPI_RD_NO_IRQ,
CQSPI_RD_NO_IRQ,
};
static const struct cqspi_driver_platdata renesas_rzn1_qspi = {
.hwcaps_mask = CQSPI_SUPPORTS_QUAD,
.quirks = CQSPI_NO_SUPPORT_WR_COMPLETION | CQSPI_RD_NO_IRQ |
CQSPI_HAS_WR_PROTECT | CQSPI_NO_INDIRECT_MODE,
};
static const struct of_device_id cqspi_dt_ids[] = {
@ -2217,6 +2197,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "amd,versal2-ospi",
.data = &versal2_ospi,
},
{
.compatible = "renesas,rzn1-qspi",
.data = &renesas_rzn1_qspi,
},
{ /* end of table */ }
};

View file

@ -2,7 +2,6 @@
// Cadence XSPI flash controller driver
// Copyright (C) 2020-21 Cadence
#include <linux/acpi.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
@ -12,15 +11,16 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/bitfield.h>
#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/bitrev.h>
#include <linux/util_macros.h>
#define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522
#define CDNS_XSPI_MAX_BANKS 8
@ -350,6 +350,7 @@ static const int cdns_mrvl_xspi_clk_div_list[] = {
struct cdns_xspi_dev {
struct platform_device *pdev;
struct spi_controller *host;
struct device *dev;
void __iomem *iobase;
@ -774,19 +775,15 @@ static int marvell_xspi_mem_op_execute(struct spi_mem *mem,
return ret;
}
#ifdef CONFIG_ACPI
static bool cdns_xspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_device *spi = mem->spi;
const union acpi_object *obj;
struct acpi_device *adev;
struct device *dev = &spi->dev;
u32 value;
adev = ACPI_COMPANION(&spi->dev);
if (!acpi_dev_get_property(adev, "spi-tx-bus-width", ACPI_TYPE_INTEGER,
&obj)) {
switch (obj->integer.value) {
if (!device_property_read_u32(dev, "spi-tx-bus-width", &value)) {
switch (value) {
case 1:
break;
case 2:
@ -799,16 +796,13 @@ static bool cdns_xspi_supports_op(struct spi_mem *mem,
spi->mode |= SPI_TX_OCTAL;
break;
default:
dev_warn(&spi->dev,
"spi-tx-bus-width %lld not supported\n",
obj->integer.value);
dev_warn(dev, "spi-tx-bus-width %u not supported\n", value);
break;
}
}
if (!acpi_dev_get_property(adev, "spi-rx-bus-width", ACPI_TYPE_INTEGER,
&obj)) {
switch (obj->integer.value) {
if (!device_property_read_u32(dev, "spi-rx-bus-width", &value)) {
switch (value) {
case 1:
break;
case 2:
@ -821,9 +815,7 @@ static bool cdns_xspi_supports_op(struct spi_mem *mem,
spi->mode |= SPI_RX_OCTAL;
break;
default:
dev_warn(&spi->dev,
"spi-rx-bus-width %lld not supported\n",
obj->integer.value);
dev_warn(dev, "spi-rx-bus-width %u not supported\n", value);
break;
}
}
@ -833,7 +825,6 @@ static bool cdns_xspi_supports_op(struct spi_mem *mem,
return true;
}
#endif
static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
@ -846,17 +837,13 @@ static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *
}
static const struct spi_controller_mem_ops cadence_xspi_mem_ops = {
#ifdef CONFIG_ACPI
.supports_op = cdns_xspi_supports_op,
#endif
.supports_op = PTR_IF(IS_ENABLED(CONFIG_ACPI), cdns_xspi_supports_op),
.exec_op = cdns_xspi_mem_op_execute,
.adjust_op_size = cdns_xspi_adjust_mem_op_size,
};
static const struct spi_controller_mem_ops marvell_xspi_mem_ops = {
#ifdef CONFIG_ACPI
.supports_op = cdns_xspi_supports_op,
#endif
.supports_op = PTR_IF(IS_ENABLED(CONFIG_ACPI), cdns_xspi_supports_op),
.exec_op = marvell_xspi_mem_op_execute,
.adjust_op_size = cdns_xspi_adjust_mem_op_size,
};
@ -1157,12 +1144,9 @@ static int cdns_xspi_probe(struct platform_device *pdev)
SPI_MODE_0 | SPI_MODE_3;
cdns_xspi = spi_controller_get_devdata(host);
cdns_xspi->driver_data = of_device_get_match_data(dev);
if (!cdns_xspi->driver_data) {
cdns_xspi->driver_data = acpi_device_get_match_data(dev);
if (!cdns_xspi->driver_data)
return -ENODEV;
}
cdns_xspi->driver_data = device_get_match_data(dev);
if (!cdns_xspi->driver_data)
return -ENODEV;
if (cdns_xspi->driver_data->mrvl_hw_overlay) {
host->mem_ops = &marvell_xspi_mem_ops;
@ -1174,12 +1158,12 @@ static int cdns_xspi_probe(struct platform_device *pdev)
cdns_xspi->sdma_handler = &cdns_xspi_sdma_handle;
cdns_xspi->set_interrupts_handler = &cdns_xspi_set_interrupts;
}
host->dev.of_node = pdev->dev.of_node;
host->bus_num = -1;
platform_set_drvdata(pdev, host);
platform_set_drvdata(pdev, cdns_xspi);
cdns_xspi->pdev = pdev;
cdns_xspi->host = host;
cdns_xspi->dev = &pdev->dev;
cdns_xspi->cur_cs = 0;
@ -1268,6 +1252,30 @@ static int cdns_xspi_probe(struct platform_device *pdev)
return 0;
}
static int cdns_xspi_suspend(struct device *dev)
{
struct cdns_xspi_dev *cdns_xspi = dev_get_drvdata(dev);
return spi_controller_suspend(cdns_xspi->host);
}
static int cdns_xspi_resume(struct device *dev)
{
struct cdns_xspi_dev *cdns_xspi = dev_get_drvdata(dev);
if (cdns_xspi->driver_data->mrvl_hw_overlay) {
cdns_mrvl_xspi_setup_clock(cdns_xspi, MRVL_DEFAULT_CLK);
cdns_xspi_configure_phy(cdns_xspi);
}
cdns_xspi->set_interrupts_handler(cdns_xspi, false);
return spi_controller_resume(cdns_xspi->host);
}
static DEFINE_SIMPLE_DEV_PM_OPS(cdns_xspi_pm_ops,
cdns_xspi_suspend, cdns_xspi_resume);
static const struct of_device_id cdns_xspi_of_match[] = {
{
.compatible = "cdns,xspi-nor",
@ -1286,6 +1294,7 @@ static struct platform_driver cdns_xspi_platform_driver = {
.driver = {
.name = CDNS_XSPI_NAME,
.of_match_table = cdns_xspi_of_match,
.pm = pm_sleep_ptr(&cdns_xspi_pm_ops),
},
};

View file

@ -651,7 +651,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
return -ENOMEM;
xspi = spi_controller_get_devdata(ctlr);
ctlr->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, ctlr);
xspi->regs = devm_platform_ioremap_resource(pdev, 0);

View file

@ -54,7 +54,6 @@ static int octeon_spi_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
host->dev.of_node = pdev->dev.of_node;
err = devm_spi_register_controller(&pdev->dev, host);
if (err) {
dev_err(&pdev->dev, "register host failed: %d\n", err);

View file

@ -67,7 +67,6 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
host->transfer_one_message = octeon_spi_transfer_one_message;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
host->dev.of_node = pdev->dev.of_node;
pci_set_drvdata(pdev, host);

View file

@ -107,7 +107,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
host->bus_num = -1;
host->mode_bits = SPI_CPHA | SPI_CS_HIGH;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
host->dev.of_node = pdev->dev.of_node;
host->prepare_message = spi_clps711x_prepare_message;
host->transfer_one = spi_clps711x_transfer_one;

View file

@ -371,6 +371,14 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
fwnode_property_read_u32(xu_fwnode, "01fa-sidecar-instances", &nsidecars);
/*
* Depending on the value of nsidecars we either create a software node
* or assign an fwnode. We don't want software node to be attached to
* the default one. That's why we need to clear the SPI controller fwnode
* first.
*/
device_set_node(&priv->ctlr->dev, NULL);
if (nsidecars) {
struct software_node_ref_args args[] = {
SOFTWARE_NODE_REFERENCE(fwnode, 0, GPIO_ACTIVE_LOW),

View file

@ -988,7 +988,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
}
host->use_gpio_descriptors = true;
host->dev.of_node = pdev->dev.of_node;
host->bus_num = pdev->id;
host->num_chipselect = pdata->num_chipselect;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);

View file

@ -682,15 +682,12 @@ static int dln2_spi_probe(struct platform_device *pdev)
struct spi_controller *host;
struct dln2_spi *dln2;
struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
int ret;
host = spi_alloc_host(&pdev->dev, sizeof(*dln2));
if (!host)
return -ENOMEM;
device_set_node(&host->dev, dev_fwnode(dev));
platform_set_drvdata(pdev, host);
dln2 = spi_controller_get_devdata(host);

View file

@ -1,331 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
//
// Authors:
// Ramil Zaripov <Ramil.Zaripov@baikalelectronics.ru>
// Serge Semin <Sergey.Semin@baikalelectronics.ru>
//
// Baikal-T1 DW APB SPI and System Boot SPI driver
//
#include <linux/clk.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spi/spi-mem.h>
#include <linux/spi/spi.h>
#include "spi-dw.h"
#define BT1_BOOT_DIRMAP 0
#define BT1_BOOT_REGS 1
struct dw_spi_bt1 {
struct dw_spi dws;
struct clk *clk;
struct mux_control *mux;
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
void __iomem *map;
resource_size_t map_len;
#endif
};
#define to_dw_spi_bt1(_ctlr) \
container_of(spi_controller_get_devdata(_ctlr), struct dw_spi_bt1, dws)
typedef int (*dw_spi_bt1_init_cb)(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1);
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
static int dw_spi_bt1_dirmap_create(struct spi_mem_dirmap_desc *desc)
{
struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
if (!dwsbt1->map ||
!dwsbt1->dws.mem_ops.supports_op(desc->mem, &desc->info.op_tmpl))
return -EOPNOTSUPP;
if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
return -EOPNOTSUPP;
/*
* Make sure the requested region doesn't go out of the physically
* mapped flash memory bounds.
*/
if (desc->info.offset + desc->info.length > dwsbt1->map_len)
return -EINVAL;
return 0;
}
/*
* Directly mapped SPI memory region is only accessible in the dword chunks.
* That's why we have to create a dedicated read-method to copy data from there
* to the passed buffer.
*/
static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t len)
{
size_t shift, chunk;
u32 data;
/*
* We split the copying up into the next three stages: unaligned head,
* aligned body, unaligned tail.
*/
shift = (size_t)from & 0x3;
if (shift) {
chunk = min_t(size_t, 4 - shift, len);
data = readl_relaxed(from - shift);
memcpy(to, (char *)&data + shift, chunk);
from += chunk;
to += chunk;
len -= chunk;
}
while (len >= 4) {
data = readl_relaxed(from);
memcpy(to, &data, 4);
from += 4;
to += 4;
len -= 4;
}
if (len) {
data = readl_relaxed(from);
memcpy(to, &data, len);
}
}
static ssize_t dw_spi_bt1_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{
struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller);
struct dw_spi *dws = &dwsbt1->dws;
struct spi_mem *mem = desc->mem;
struct dw_spi_cfg cfg;
int ret;
/*
* Make sure the requested operation length is valid. Truncate the
* length if it's greater than the length of the MMIO region.
*/
if (offs >= dwsbt1->map_len || !len)
return 0;
len = min_t(size_t, len, dwsbt1->map_len - offs);
/* Collect the controller configuration required by the operation */
cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD;
cfg.dfs = 8;
cfg.ndf = 4;
cfg.freq = mem->spi->max_speed_hz;
/* Make sure the corresponding CS is de-asserted on transmission */
dw_spi_set_cs(mem->spi, false);
dw_spi_enable_chip(dws, 0);
dw_spi_update_config(dws, mem->spi, &cfg);
dw_spi_umask_intr(dws, DW_SPI_INT_RXFI);
dw_spi_enable_chip(dws, 1);
/*
* Enable the transparent mode of the System Boot Controller.
* The SPI core IO should have been locked before calling this method
* so noone would be touching the controller' registers during the
* dirmap operation.
*/
ret = mux_control_select(dwsbt1->mux, BT1_BOOT_DIRMAP);
if (ret)
return ret;
dw_spi_bt1_dirmap_copy_from_map(buf, dwsbt1->map + offs, len);
mux_control_deselect(dwsbt1->mux);
dw_spi_set_cs(mem->spi, true);
ret = dw_spi_check_status(dws, true);
return ret ?: len;
}
#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
static int dw_spi_bt1_std_init(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1)
{
struct dw_spi *dws = &dwsbt1->dws;
dws->irq = platform_get_irq(pdev, 0);
if (dws->irq < 0)
return dws->irq;
dws->num_cs = 4;
/*
* Baikal-T1 Normal SPI Controllers don't always keep up with full SPI
* bus speed especially when it comes to the concurrent access to the
* APB bus resources. Thus we have no choice but to set a constraint on
* the SPI bus frequency for the memory operations which require to
* read/write data as fast as possible.
*/
dws->max_mem_freq = 20000000U;
dw_spi_dma_setup_generic(dws);
return 0;
}
static int dw_spi_bt1_sys_init(struct platform_device *pdev,
struct dw_spi_bt1 *dwsbt1)
{
struct resource *mem __maybe_unused;
struct dw_spi *dws = &dwsbt1->dws;
/*
* Baikal-T1 System Boot Controller is equipped with a mux, which
* switches between the directly mapped SPI flash access mode and
* IO access to the DW APB SSI registers. Note the mux controller
* must be setup to preserve the registers being accessible by default
* (on idle-state).
*/
dwsbt1->mux = devm_mux_control_get(&pdev->dev, NULL);
if (IS_ERR(dwsbt1->mux))
return PTR_ERR(dwsbt1->mux);
/*
* Directly mapped SPI flash memory is a 16MB MMIO region, which can be
* used to access a peripheral memory device just by reading/writing
* data from/to it. Note the system APB bus will stall during each IO
* from/to the dirmap region until the operation is finished. So don't
* use it concurrently with time-critical tasks (like the SPI memory
* operations implemented in the DW APB SSI driver).
*/
#ifdef CONFIG_SPI_DW_BT1_DIRMAP
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (mem) {
dwsbt1->map = devm_ioremap_resource(&pdev->dev, mem);
if (!IS_ERR(dwsbt1->map)) {
dwsbt1->map_len = resource_size(mem);
dws->mem_ops.dirmap_create = dw_spi_bt1_dirmap_create;
dws->mem_ops.dirmap_read = dw_spi_bt1_dirmap_read;
} else {
dwsbt1->map = NULL;
}
}
#endif /* CONFIG_SPI_DW_BT1_DIRMAP */
/*
* There is no IRQ, no DMA and just one CS available on the System Boot
* SPI controller.
*/
dws->irq = IRQ_NOTCONNECTED;
dws->num_cs = 1;
/*
* Baikal-T1 System Boot SPI Controller doesn't keep up with the full
* SPI bus speed due to relatively slow APB bus and races for it'
* resources from different CPUs. The situation is worsen by a small
* FIFOs depth (just 8 words). It works better in a single CPU mode
* though, but still tends to be not fast enough at low CPU
* frequencies.
*/
if (num_possible_cpus() > 1)
dws->max_mem_freq = 10000000U;
else
dws->max_mem_freq = 20000000U;
return 0;
}
static int dw_spi_bt1_probe(struct platform_device *pdev)
{
dw_spi_bt1_init_cb init_func;
struct dw_spi_bt1 *dwsbt1;
struct resource *mem;
struct dw_spi *dws;
int ret;
dwsbt1 = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_bt1), GFP_KERNEL);
if (!dwsbt1)
return -ENOMEM;
dws = &dwsbt1->dws;
dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(dws->regs))
return PTR_ERR(dws->regs);
dws->paddr = mem->start;
dwsbt1->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dwsbt1->clk))
return PTR_ERR(dwsbt1->clk);
dws->bus_num = pdev->id;
dws->reg_io_width = 4;
dws->max_freq = clk_get_rate(dwsbt1->clk);
if (!dws->max_freq)
return -EINVAL;
init_func = device_get_match_data(&pdev->dev);
ret = init_func(pdev, dwsbt1);
if (ret)
return ret;
pm_runtime_enable(&pdev->dev);
ret = dw_spi_add_controller(&pdev->dev, dws);
if (ret) {
pm_runtime_disable(&pdev->dev);
return ret;
}
platform_set_drvdata(pdev, dwsbt1);
return 0;
}
static void dw_spi_bt1_remove(struct platform_device *pdev)
{
struct dw_spi_bt1 *dwsbt1 = platform_get_drvdata(pdev);
dw_spi_remove_controller(&dwsbt1->dws);
pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id dw_spi_bt1_of_match[] = {
{ .compatible = "baikal,bt1-ssi", .data = dw_spi_bt1_std_init},
{ .compatible = "baikal,bt1-sys-ssi", .data = dw_spi_bt1_sys_init},
{ }
};
MODULE_DEVICE_TABLE(of, dw_spi_bt1_of_match);
static struct platform_driver dw_spi_bt1_driver = {
.probe = dw_spi_bt1_probe,
.remove = dw_spi_bt1_remove,
.driver = {
.name = "bt1-sys-ssi",
.of_match_table = dw_spi_bt1_of_match,
},
};
module_platform_driver(dw_spi_bt1_driver);
MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
MODULE_DESCRIPTION("Baikal-T1 System Boot SPI Controller driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS("SPI_DW_CORE");

View file

@ -936,8 +936,6 @@ int dw_spi_add_controller(struct device *dev, struct dw_spi *dws)
if (!ctlr)
return -ENOMEM;
device_set_node(&ctlr->dev, dev_fwnode(dev));
dws->ctlr = ctlr;
dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);

View file

@ -104,10 +104,8 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
return -ENOMEM;
dwsmscc->spi_mst = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwsmscc->spi_mst)) {
dev_err(&pdev->dev, "SPI_MST region map failed\n");
if (IS_ERR(dwsmscc->spi_mst))
return PTR_ERR(dwsmscc->spi_mst);
}
dwsmscc->syscon = syscon_regmap_lookup_by_compatible(cpu_syscon);
if (IS_ERR(dwsmscc->syscon))
@ -392,6 +390,38 @@ out_reset:
return ret;
}
static int dw_spi_mmio_suspend(struct device *dev)
{
struct dw_spi_mmio *dwsmmio = dev_get_drvdata(dev);
int ret;
ret = dw_spi_suspend_controller(&dwsmmio->dws);
if (ret)
return ret;
reset_control_assert(dwsmmio->rstc);
clk_disable_unprepare(dwsmmio->pclk);
clk_disable_unprepare(dwsmmio->clk);
return 0;
}
static int dw_spi_mmio_resume(struct device *dev)
{
struct dw_spi_mmio *dwsmmio = dev_get_drvdata(dev);
clk_prepare_enable(dwsmmio->clk);
clk_prepare_enable(dwsmmio->pclk);
reset_control_deassert(dwsmmio->rstc);
return dw_spi_resume_controller(&dwsmmio->dws);
}
static DEFINE_SIMPLE_DEV_PM_OPS(dw_spi_mmio_pm_ops,
dw_spi_mmio_suspend, dw_spi_mmio_resume);
static void dw_spi_mmio_remove(struct platform_device *pdev)
{
struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
@ -435,6 +465,7 @@ static struct platform_driver dw_spi_mmio_driver = {
.name = DRIVER_NAME,
.of_match_table = dw_spi_mmio_of_match,
.acpi_match_table = ACPI_PTR(dw_spi_mmio_acpi_match),
.pm = pm_sleep_ptr(&dw_spi_mmio_pm_ops),
},
};
module_platform_driver(dw_spi_mmio_driver);

View file

@ -689,7 +689,6 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
/* make sure that the hardware is disabled */
writel(0, espi->mmio + SSPCR1);
device_set_node(&host->dev, dev_fwnode(&pdev->dev));
error = devm_spi_register_controller(&pdev->dev, host);
if (error) {
dev_err(&pdev->dev, "failed to register SPI host\n");

View file

@ -405,7 +405,6 @@ static int falcon_sflash_probe(struct platform_device *pdev)
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->setup = falcon_sflash_setup;
host->transfer_one_message = falcon_sflash_xfer_one;
host->dev.of_node = pdev->dev.of_node;
ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)

View file

@ -531,7 +531,6 @@ static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
static int fsi_spi_probe(struct device *dev)
{
int rc;
struct device_node *np;
int num_controllers_registered = 0;
struct fsi2spi *bridge;
struct fsi_device *fsi = to_fsi_dev(dev);
@ -547,7 +546,7 @@ static int fsi_spi_probe(struct device *dev)
bridge->fsi = fsi;
mutex_init(&bridge->lock);
for_each_available_child_of_node(dev->of_node, np) {
for_each_available_child_of_node_scoped(dev->of_node, np) {
u32 base;
struct fsi_spi *ctx;
struct spi_controller *ctlr;
@ -556,10 +555,8 @@ static int fsi_spi_probe(struct device *dev)
continue;
ctlr = spi_alloc_host(dev, sizeof(*ctx));
if (!ctlr) {
of_node_put(np);
if (!ctlr)
break;
}
ctlr->dev.of_node = np;
ctlr->num_chipselect = of_get_available_child_count(np) ?: 1;

View file

@ -1555,7 +1555,6 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->setup = dspi_setup;
ctlr->transfer_one_message = dspi_transfer_one_message;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
ctlr->target_abort = dspi_target_abort;

View file

@ -675,7 +675,6 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem,
host->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
SPI_LSB_FIRST | SPI_LOOP;
host->dev.of_node = dev->of_node;
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
host->setup = fsl_espi_setup;
host->cleanup = fsl_espi_cleanup;

View file

@ -91,7 +91,6 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
| SPI_LSB_FIRST | SPI_LOOP;
ctlr->dev.of_node = dev->of_node;
mpc8xxx_spi = spi_controller_get_devdata(ctlr);
mpc8xxx_spi->dev = dev;

View file

@ -281,7 +281,8 @@ static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi)
fsl_lpspi->rx(fsl_lpspi);
}
static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
struct spi_device *spi)
{
u32 temp = 0;
@ -303,6 +304,13 @@ static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi)
temp |= TCR_CONTC;
}
}
if (spi->mode & SPI_CPOL)
temp |= TCR_CPOL;
if (spi->mode & SPI_CPHA)
temp |= TCR_CPHA;
writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
@ -486,22 +494,47 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller,
fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
}
/*
* t->len is 'unsigned' and txfifosize and watermrk is 'u8', force
* type cast is inevitable. When len > 255, len will be truncated in min_t(),
* it caused wrong watermark set. 'unsigned int' is as the designated type
* for min_t() to avoid truncation.
*/
fsl_lpspi->watermark = min_t(unsigned int,
fsl_lpspi->txfifosize,
t->len);
fsl_lpspi->watermark = min(fsl_lpspi->txfifosize, t->len);
return fsl_lpspi_config(fsl_lpspi);
}
static int fsl_lpspi_prepare_message(struct spi_controller *controller,
struct spi_message *msg)
{
struct fsl_lpspi_data *fsl_lpspi =
spi_controller_get_devdata(controller);
struct spi_device *spi = msg->spi;
struct spi_transfer *t;
int ret;
t = list_first_entry_or_null(&msg->transfers, struct spi_transfer,
transfer_list);
if (!t)
return 0;
fsl_lpspi->is_first_byte = true;
fsl_lpspi->usedma = false;
ret = fsl_lpspi_setup_transfer(controller, spi, t);
if (fsl_lpspi_can_dma(controller, spi, t))
fsl_lpspi->usedma = true;
else
fsl_lpspi->usedma = false;
return fsl_lpspi_config(fsl_lpspi);
if (ret < 0)
return ret;
fsl_lpspi_set_cmd(fsl_lpspi, spi);
/* No IRQs */
writel(0, fsl_lpspi->base + IMX7ULP_IER);
/* Controller disable, clear FIFOs, clear status */
writel(CR_RRF | CR_RTF, fsl_lpspi->base + IMX7ULP_CR);
writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR);
return 0;
}
static int fsl_lpspi_target_abort(struct spi_controller *controller)
@ -761,14 +794,18 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller,
spi_controller_get_devdata(controller);
int ret;
fsl_lpspi->is_first_byte = true;
if (fsl_lpspi_can_dma(controller, spi, t))
fsl_lpspi->usedma = true;
else
fsl_lpspi->usedma = false;
ret = fsl_lpspi_setup_transfer(controller, spi, t);
if (ret < 0)
return ret;
t->effective_speed_hz = fsl_lpspi->config.effective_speed_hz;
fsl_lpspi_set_cmd(fsl_lpspi);
fsl_lpspi_set_cmd(fsl_lpspi, spi);
fsl_lpspi->is_first_byte = false;
if (fsl_lpspi->usedma)
@ -952,12 +989,12 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
}
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
controller->prepare_message = fsl_lpspi_prepare_message;
controller->transfer_one = fsl_lpspi_transfer_one;
controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
controller->dev.of_node = pdev->dev.of_node;
controller->bus_num = pdev->id;
controller->num_chipselect = num_cs;
controller->target_abort = fsl_lpspi_target_abort;

View file

@ -82,6 +82,7 @@ struct spi_geni_master {
u32 fifo_width_bits;
u32 tx_wm;
u32 last_mode;
u8 last_cs;
unsigned long cur_speed_hz;
unsigned long cur_sclk_hz;
unsigned int cur_bits_per_word;
@ -145,8 +146,7 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret;
}
static void handle_se_timeout(struct spi_controller *spi,
struct spi_message *msg)
static void handle_se_timeout(struct spi_controller *spi)
{
struct spi_geni_master *mas = spi_controller_get_devdata(spi);
unsigned long time_left;
@ -160,24 +160,20 @@ static void handle_se_timeout(struct spi_controller *spi,
xfer = mas->cur_xfer;
mas->cur_xfer = NULL;
if (spi->target) {
/*
* skip CMD Cancel sequnece since spi target
* doesn`t support CMD Cancel sequnece
*/
/* The controller doesn't support the Cancel commnand in target mode */
if (!spi->target) {
reinit_completion(&mas->cancel_done);
geni_se_cancel_m_cmd(se);
spin_unlock_irq(&mas->lock);
goto reset_if_dma;
time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
if (time_left)
goto reset_if_dma;
spin_lock_irq(&mas->lock);
}
reinit_completion(&mas->cancel_done);
geni_se_cancel_m_cmd(se);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
if (time_left)
goto reset_if_dma;
spin_lock_irq(&mas->lock);
reinit_completion(&mas->abort_done);
geni_se_abort_m_cmd(se);
spin_unlock_irq(&mas->lock);
@ -225,7 +221,7 @@ reset_if_dma:
}
}
static void handle_gpi_timeout(struct spi_controller *spi, struct spi_message *msg)
static void handle_gpi_timeout(struct spi_controller *spi)
{
struct spi_geni_master *mas = spi_controller_get_devdata(spi);
@ -240,10 +236,10 @@ static void spi_geni_handle_err(struct spi_controller *spi, struct spi_message *
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
case GENI_SE_DMA:
handle_se_timeout(spi, msg);
handle_se_timeout(spi);
break;
case GENI_GPI_DMA:
handle_gpi_timeout(spi, msg);
handle_gpi_timeout(spi);
break;
default:
dev_err(mas->dev, "Abort on Mode:%d not supported", mas->cur_xfer_mode);
@ -284,55 +280,6 @@ static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller);
struct spi_controller *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned long time_left;
if (!(slv->mode & SPI_CS_HIGH))
set_flag = !set_flag;
if (set_flag == mas->cs_flag)
return;
pm_runtime_get_sync(mas->dev);
if (spi_geni_is_abort_still_pending(mas)) {
dev_err(mas->dev, "Can't set chip select\n");
goto exit;
}
spin_lock_irq(&mas->lock);
if (mas->cur_xfer) {
dev_err(mas->dev, "Can't set CS when prev xfer running\n");
spin_unlock_irq(&mas->lock);
goto exit;
}
mas->cs_flag = set_flag;
/* set xfer_mode to FIFO to complete cs_done in isr */
mas->cur_xfer_mode = GENI_SE_FIFO;
geni_se_select_mode(se, mas->cur_xfer_mode);
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
else
geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
if (!time_left) {
dev_warn(mas->dev, "Timeout setting chip select\n");
handle_se_timeout(spi, NULL);
}
exit:
pm_runtime_put(mas->dev);
}
static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
unsigned int bits_per_word)
{
@ -399,36 +346,27 @@ static int setup_fifo_params(struct spi_device *spi_slv,
{
struct spi_geni_master *mas = spi_controller_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0;
u32 demux_sel;
u8 chipselect = spi_get_chipselect(spi_slv, 0);
bool cs_changed = (mas->last_cs != chipselect);
u32 mode_changed = mas->last_mode ^ spi_slv->mode;
if (mas->last_mode != spi_slv->mode) {
if (spi_slv->mode & SPI_LOOP)
loopback_cfg = LOOPBACK_ENABLE;
mas->last_cs = chipselect;
mas->last_mode = spi_slv->mode;
if (spi_slv->mode & SPI_CPOL)
cpol = CPOL;
if (mode_changed & SPI_LSB_FIRST)
mas->cur_bits_per_word = 0; /* force next setup_se_xfer to call spi_setup_word_len */
if (mode_changed & SPI_LOOP)
writel((spi_slv->mode & SPI_LOOP) ? LOOPBACK_ENABLE : 0, se->base + SE_SPI_LOOPBACK);
if (cs_changed)
writel(chipselect, se->base + SE_SPI_DEMUX_SEL);
if (mode_changed & SE_SPI_CPHA)
writel((spi_slv->mode & SPI_CPHA) ? CPHA : 0, se->base + SE_SPI_CPHA);
if (mode_changed & SE_SPI_CPOL)
writel((spi_slv->mode & SPI_CPOL) ? CPOL : 0, se->base + SE_SPI_CPOL);
if ((mode_changed & SPI_CS_HIGH) || (cs_changed && (spi_slv->mode & SPI_CS_HIGH)))
writel((spi_slv->mode & SPI_CS_HIGH) ? BIT(chipselect) : 0, se->base + SE_SPI_DEMUX_OUTPUT_INV);
if (spi_slv->mode & SPI_CPHA)
cpha = CPHA;
if (spi_slv->mode & SPI_CS_HIGH)
demux_output_inv = BIT(spi_get_chipselect(spi_slv, 0));
demux_sel = spi_get_chipselect(spi_slv, 0);
mas->cur_bits_per_word = spi_slv->bits_per_word;
spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
writel(cpha, se->base + SE_SPI_CPHA);
writel(cpol, se->base + SE_SPI_CPOL);
writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
mas->last_mode = spi_slv->mode;
}
return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
return 0;
}
static void
@ -548,10 +486,10 @@ static u32 get_xfer_len_in_words(struct spi_transfer *xfer,
{
u32 len;
if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
if (!(xfer->bits_per_word % MIN_WORD_LEN))
len = xfer->len * BITS_PER_BYTE / xfer->bits_per_word;
else
len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
len = xfer->len / (xfer->bits_per_word / BITS_PER_BYTE + 1);
len &= TRANS_LEN_MSK;
return len;
@ -571,7 +509,7 @@ static bool geni_can_dma(struct spi_controller *ctlr,
return true;
len = get_xfer_len_in_words(xfer, mas);
fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / xfer->bits_per_word;
if (len > fifo_size)
return true;
@ -724,11 +662,17 @@ static int spi_geni_init(struct spi_geni_master *mas)
case 0:
mas->cur_xfer_mode = GENI_SE_FIFO;
geni_se_select_mode(se, GENI_SE_FIFO);
/* setup_fifo_params assumes that these registers start with a zero value */
writel(0, se->base + SE_SPI_LOOPBACK);
writel(0, se->base + SE_SPI_DEMUX_SEL);
writel(0, se->base + SE_SPI_CPHA);
writel(0, se->base + SE_SPI_CPOL);
writel(0, se->base + SE_SPI_DEMUX_OUTPUT_INV);
ret = 0;
break;
}
/* We always control CS manually */
/* We never control CS manually */
if (!spi->target) {
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
spi_tx_cfg &= ~CS_TOGGLE;
@ -841,6 +785,7 @@ static int setup_se_xfer(struct spi_transfer *xfer,
u16 mode, struct spi_controller *spi)
{
u32 m_cmd = 0;
u32 m_params = 0;
u32 len;
struct geni_se *se = &mas->se;
int ret;
@ -904,12 +849,17 @@ static int setup_se_xfer(struct spi_transfer *xfer,
mas->cur_xfer_mode = GENI_SE_DMA;
geni_se_select_mode(se, mas->cur_xfer_mode);
if (!xfer->cs_change) {
if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
m_params = FRAGMENTATION;
}
/*
* Lock around right before we start the transfer since our
* interrupt could come in at any time now.
*/
spin_lock_irq(&mas->lock);
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
geni_se_setup_m_cmd(se, m_cmd, m_params);
if (mas->cur_xfer_mode == GENI_SE_DMA) {
if (m_cmd & SPI_RX_ONLY)
@ -1053,6 +1003,17 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
return IRQ_HANDLED;
}
static int spi_geni_target_abort(struct spi_controller *spi)
{
if (!spi->cur_msg)
return 0;
handle_se_timeout(spi);
spi_finalize_current_transfer(spi);
return 0;
}
static int spi_geni_probe(struct platform_device *pdev)
{
int ret, irq;
@ -1078,7 +1039,11 @@ static int spi_geni_probe(struct platform_device *pdev)
if (IS_ERR(clk))
return PTR_ERR(clk);
spi = devm_spi_alloc_host(dev, sizeof(*mas));
if (device_property_read_bool(dev, "spi-slave"))
spi = devm_spi_alloc_target(dev, sizeof(*mas));
else
spi = devm_spi_alloc_host(dev, sizeof(*mas));
if (!spi)
return -ENOMEM;
@ -1102,7 +1067,6 @@ static int spi_geni_probe(struct platform_device *pdev)
}
spi->bus_num = -1;
spi->dev.of_node = dev->of_node;
spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
spi->num_chipselect = 4;
@ -1123,6 +1087,9 @@ static int spi_geni_probe(struct platform_device *pdev)
init_completion(&mas->rx_reset_done);
spin_lock_init(&mas->lock);
if (spi->target)
spi->target_abort = spi_geni_target_abort;
ret = geni_icc_get(&mas->se, NULL);
if (ret)
return ret;
@ -1133,9 +1100,6 @@ static int spi_geni_probe(struct platform_device *pdev)
if (ret)
return ret;
if (device_property_read_bool(&pdev->dev, "spi-slave"))
spi->target = true;
/* Set the bus quota to a reasonable value for register access */
mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
@ -1148,14 +1112,6 @@ static int spi_geni_probe(struct platform_device *pdev)
if (ret)
return ret;
/*
* check the mode supported and set_cs for fifo mode only
* for dma (gsi) mode, the gsi will set cs based on params passed in
* TRE
*/
if (!spi->target && mas->cur_xfer_mode == GENI_SE_FIFO)
spi->set_cs = spi_geni_set_cs;
/*
* TX is required per GSI spec, see setup_gsi_xfer().
*/

View file

@ -351,7 +351,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
if (fwnode) {
device_set_node(&host->dev, fwnode);
host->use_gpio_descriptors = true;
} else {
status = spi_gpio_probe_pdata(pdev, host);

View file

@ -284,7 +284,6 @@ static int gxp_spifi_probe(struct platform_device *pdev)
ctlr->mem_ops = &gxp_spi_mem_ops;
ctlr->setup = gxp_spi_setup;
ctlr->num_chipselect = data->max_cs;
ctlr->dev.of_node = dev->of_node;
ret = devm_spi_register_controller(dev, ctlr);
if (ret) {

View file

@ -495,7 +495,6 @@ static int hisi_spi_probe(struct platform_device *pdev)
host->cleanup = hisi_spi_cleanup;
host->transfer_one = hisi_spi_transfer_one;
host->handle_err = hisi_spi_handle_err;
host->dev.fwnode = dev->fwnode;
host->min_speed_hz = DIV_ROUND_UP(host->max_speed_hz, CLK_DIV_MAX);
hisi_spi_hw_init(hs);

View file

@ -587,7 +587,6 @@ static int img_spfi_probe(struct platform_device *pdev)
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
host->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
host->dev.of_node = pdev->dev.of_node;
host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
host->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
host->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;

View file

@ -60,6 +60,7 @@ MODULE_PARM_DESC(polling_limit_us,
#define MX51_ECSPI_CTRL_MAX_BURST 512
/* The maximum bytes that IMX53_ECSPI can transfer in target mode.*/
#define MX53_MAX_TRANSFER_BYTES 512
#define BYTES_PER_32BITS_WORD 4
enum spi_imx_devtype {
IMX1_CSPI,
@ -95,6 +96,16 @@ struct spi_imx_devtype_data {
enum spi_imx_devtype devtype;
};
struct dma_data_package {
u32 cmd_word;
void *dma_rx_buf;
void *dma_tx_buf;
dma_addr_t dma_tx_addr;
dma_addr_t dma_rx_addr;
int dma_len;
int data_len;
};
struct spi_imx_data {
struct spi_controller *controller;
struct device *dev;
@ -130,6 +141,9 @@ struct spi_imx_data {
u32 wml;
struct completion dma_rx_completion;
struct completion dma_tx_completion;
size_t dma_package_num;
struct dma_data_package *dma_data;
int rx_offset;
const struct spi_imx_devtype_data *devtype_data;
};
@ -189,6 +203,9 @@ MXC_SPI_BUF_TX(u16)
MXC_SPI_BUF_RX(u32)
MXC_SPI_BUF_TX(u32)
/* Align to cache line to avoid swiotlo bounce */
#define DMA_CACHE_ALIGNED_LEN(x) ALIGN((x), dma_get_cache_alignment())
/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
* (which is currently not the case in this driver)
*/
@ -247,12 +264,26 @@ static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device
if (!controller->dma_rx)
return false;
if (spi_imx->target_mode)
/*
* Due to Freescale errata ERR003775 "eCSPI: Burst completion by Chip
* Select (SS) signal in Slave mode is not functional" burst size must
* be set exactly to the size of the transfer. This limit SPI transaction
* with maximum 2^12 bits.
*/
if (transfer->len > MX53_MAX_TRANSFER_BYTES && spi_imx->target_mode)
return false;
if (transfer->len < spi_imx->devtype_data->fifo_size)
return false;
/* DMA only can transmit data in bytes */
if (spi_imx->bits_per_word != 8 && spi_imx->bits_per_word != 16 &&
spi_imx->bits_per_word != 32)
return false;
if (transfer->len >= MAX_SDMA_BD_BYTES)
return false;
spi_imx->dynamic_burst = 0;
return true;
@ -1282,50 +1313,6 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
static int spi_imx_dma_configure(struct spi_controller *controller)
{
int ret;
enum dma_slave_buswidth buswidth;
struct dma_slave_config rx = {}, tx = {};
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
case 4:
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
case 2:
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case 1:
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
default:
return -EINVAL;
}
tx.direction = DMA_MEM_TO_DEV;
tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
tx.dst_addr_width = buswidth;
tx.dst_maxburst = spi_imx->wml;
ret = dmaengine_slave_config(controller->dma_tx, &tx);
if (ret) {
dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
return ret;
}
rx.direction = DMA_DEV_TO_MEM;
rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
rx.src_addr_width = buswidth;
rx.src_maxburst = spi_imx->wml;
ret = dmaengine_slave_config(controller->dma_rx, &rx);
if (ret) {
dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
return ret;
}
return 0;
}
static int spi_imx_setupxfer(struct spi_device *spi,
struct spi_transfer *t)
{
@ -1442,8 +1429,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
init_completion(&spi_imx->dma_rx_completion);
init_completion(&spi_imx->dma_tx_completion);
controller->can_dma = spi_imx_can_dma;
controller->max_dma_len = MAX_SDMA_BD_BYTES;
spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX |
SPI_CONTROLLER_MUST_TX;
@ -1481,31 +1466,445 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
return secs_to_jiffies(2 * timeout);
}
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
struct spi_transfer *transfer)
static void spi_imx_dma_unmap(struct spi_imx_data *spi_imx,
struct dma_data_package *dma_data)
{
struct device *tx_dev = spi_imx->controller->dma_tx->device->dev;
struct device *rx_dev = spi_imx->controller->dma_rx->device->dev;
dma_unmap_single(tx_dev, dma_data->dma_tx_addr,
DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
DMA_TO_DEVICE);
dma_unmap_single(rx_dev, dma_data->dma_rx_addr,
DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
DMA_FROM_DEVICE);
}
static void spi_imx_dma_rx_data_handle(struct spi_imx_data *spi_imx,
struct dma_data_package *dma_data, void *rx_buf,
bool word_delay)
{
void *copy_ptr;
int unaligned;
/*
* On little-endian CPUs, adjust byte order:
* - Swap bytes when bpw = 8
* - Swap half-words when bpw = 16
* This ensures correct data ordering for DMA transfers.
*/
#ifdef __LITTLE_ENDIAN
if (!word_delay) {
unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
u32 *temp = dma_data->dma_rx_buf;
for (int i = 0; i < DIV_ROUND_UP(dma_data->dma_len, sizeof(*temp)); i++) {
if (bytes_per_word == 1)
swab32s(temp + i);
else if (bytes_per_word == 2)
swahw32s(temp + i);
}
}
#endif
/*
* When dynamic burst enabled, DMA RX always receives 32-bit words from RXFIFO with
* buswidth = 4, but when data_len is not 4-bytes alignment, the RM shows when
* burst length = 32*n + m bits, a SPI burst contains the m LSB in first word and all
* 32 bits in other n words. So if garbage bytes in the first word, trim first word then
* copy the actual data to rx_buf.
*/
if (dma_data->data_len % BYTES_PER_32BITS_WORD && !word_delay) {
unaligned = dma_data->data_len % BYTES_PER_32BITS_WORD;
copy_ptr = (u8 *)dma_data->dma_rx_buf + BYTES_PER_32BITS_WORD - unaligned;
} else {
copy_ptr = dma_data->dma_rx_buf;
}
memcpy(rx_buf, copy_ptr, dma_data->data_len);
}
static int spi_imx_dma_map(struct spi_imx_data *spi_imx,
struct dma_data_package *dma_data)
{
struct spi_controller *controller = spi_imx->controller;
struct device *tx_dev = controller->dma_tx->device->dev;
struct device *rx_dev = controller->dma_rx->device->dev;
int ret;
dma_data->dma_tx_addr = dma_map_single(tx_dev, dma_data->dma_tx_buf,
DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
DMA_TO_DEVICE);
ret = dma_mapping_error(tx_dev, dma_data->dma_tx_addr);
if (ret < 0) {
dev_err(spi_imx->dev, "DMA TX map failed %d\n", ret);
return ret;
}
dma_data->dma_rx_addr = dma_map_single(rx_dev, dma_data->dma_rx_buf,
DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
DMA_FROM_DEVICE);
ret = dma_mapping_error(rx_dev, dma_data->dma_rx_addr);
if (ret < 0) {
dev_err(spi_imx->dev, "DMA RX map failed %d\n", ret);
dma_unmap_single(tx_dev, dma_data->dma_tx_addr,
DMA_CACHE_ALIGNED_LEN(dma_data->dma_len),
DMA_TO_DEVICE);
return ret;
}
return 0;
}
static int spi_imx_dma_tx_data_handle(struct spi_imx_data *spi_imx,
struct dma_data_package *dma_data,
const void *tx_buf,
bool word_delay)
{
void *copy_ptr;
int unaligned;
if (word_delay) {
dma_data->dma_len = dma_data->data_len;
} else {
/*
* As per the reference manual, when burst length = 32*n + m bits, ECSPI
* sends m LSB bits in the first word, followed by n full 32-bit words.
* Since actual data may not be 4-byte aligned, allocate DMA TX/RX buffers
* to ensure alignment. For TX, DMA pushes 4-byte aligned words to TXFIFO,
* while ECSPI uses BURST_LENGTH settings to maintain correct bit count.
* For RX, DMA always receives 32-bit words from RXFIFO, when data len is
* not 4-byte aligned, trim the first word to drop garbage bytes, then group
* all transfer DMA bounse buffer and copy all valid data to rx_buf.
*/
dma_data->dma_len = ALIGN(dma_data->data_len, BYTES_PER_32BITS_WORD);
}
dma_data->dma_tx_buf = kzalloc(dma_data->dma_len, GFP_KERNEL);
if (!dma_data->dma_tx_buf)
return -ENOMEM;
dma_data->dma_rx_buf = kzalloc(dma_data->dma_len, GFP_KERNEL);
if (!dma_data->dma_rx_buf) {
kfree(dma_data->dma_tx_buf);
return -ENOMEM;
}
if (dma_data->data_len % BYTES_PER_32BITS_WORD && !word_delay) {
unaligned = dma_data->data_len % BYTES_PER_32BITS_WORD;
copy_ptr = (u8 *)dma_data->dma_tx_buf + BYTES_PER_32BITS_WORD - unaligned;
} else {
copy_ptr = dma_data->dma_tx_buf;
}
memcpy(copy_ptr, tx_buf, dma_data->data_len);
/*
* When word_delay is enabled, DMA transfers an entire word in one minor loop.
* In this case, no data requires additional handling.
*/
if (word_delay)
return 0;
#ifdef __LITTLE_ENDIAN
/*
* On little-endian CPUs, adjust byte order:
* - Swap bytes when bpw = 8
* - Swap half-words when bpw = 16
* This ensures correct data ordering for DMA transfers.
*/
unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word);
u32 *temp = dma_data->dma_tx_buf;
for (int i = 0; i < DIV_ROUND_UP(dma_data->dma_len, sizeof(*temp)); i++) {
if (bytes_per_word == 1)
swab32s(temp + i);
else if (bytes_per_word == 2)
swahw32s(temp + i);
}
#endif
return 0;
}
static int spi_imx_dma_data_prepare(struct spi_imx_data *spi_imx,
struct spi_transfer *transfer,
bool word_delay)
{
u32 pre_bl, tail_bl;
u32 ctrl;
int ret;
/*
* ECSPI supports a maximum burst of 512 bytes. When xfer->len exceeds 512
* and is not a multiple of 512, a tail transfer is required. BURST_LEGTH
* is used for SPI HW to maintain correct bit count. BURST_LENGTH should
* update with data length. After DMA request submit, SPI can not update the
* BURST_LENGTH, in this case, we must split two package, update the register
* then setup second DMA transfer.
*/
ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
if (word_delay) {
/*
* When SPI IMX need to support word delay, according to "Sample Period Control
* Register" shows, The Sample Period Control Register (ECSPI_PERIODREG)
* provides software a way to insert delays (wait states) between consecutive
* SPI transfers. As a result, ECSPI can only transfer one word per frame, and
* the delay occurs between frames.
*/
spi_imx->dma_package_num = 1;
pre_bl = spi_imx->bits_per_word - 1;
} else if (transfer->len <= MX51_ECSPI_CTRL_MAX_BURST) {
spi_imx->dma_package_num = 1;
pre_bl = transfer->len * BITS_PER_BYTE - 1;
} else if (!(transfer->len % MX51_ECSPI_CTRL_MAX_BURST)) {
spi_imx->dma_package_num = 1;
pre_bl = MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1;
} else {
spi_imx->dma_package_num = 2;
pre_bl = MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1;
tail_bl = (transfer->len % MX51_ECSPI_CTRL_MAX_BURST) * BITS_PER_BYTE - 1;
}
spi_imx->dma_data = kmalloc_array(spi_imx->dma_package_num,
sizeof(struct dma_data_package),
GFP_KERNEL | __GFP_ZERO);
if (!spi_imx->dma_data) {
dev_err(spi_imx->dev, "Failed to allocate DMA package buffer!\n");
return -ENOMEM;
}
if (spi_imx->dma_package_num == 1) {
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
ctrl |= pre_bl << MX51_ECSPI_CTRL_BL_OFFSET;
spi_imx->dma_data[0].cmd_word = ctrl;
spi_imx->dma_data[0].data_len = transfer->len;
ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[0], transfer->tx_buf,
word_delay);
if (ret) {
kfree(spi_imx->dma_data);
return ret;
}
} else {
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
ctrl |= pre_bl << MX51_ECSPI_CTRL_BL_OFFSET;
spi_imx->dma_data[0].cmd_word = ctrl;
spi_imx->dma_data[0].data_len = round_down(transfer->len,
MX51_ECSPI_CTRL_MAX_BURST);
ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[0], transfer->tx_buf,
false);
if (ret) {
kfree(spi_imx->dma_data);
return ret;
}
ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
ctrl |= tail_bl << MX51_ECSPI_CTRL_BL_OFFSET;
spi_imx->dma_data[1].cmd_word = ctrl;
spi_imx->dma_data[1].data_len = transfer->len % MX51_ECSPI_CTRL_MAX_BURST;
ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[1],
transfer->tx_buf + spi_imx->dma_data[0].data_len,
false);
if (ret) {
kfree(spi_imx->dma_data[0].dma_tx_buf);
kfree(spi_imx->dma_data[0].dma_rx_buf);
kfree(spi_imx->dma_data);
}
}
return 0;
}
static int spi_imx_dma_submit(struct spi_imx_data *spi_imx,
struct dma_data_package *dma_data,
struct spi_transfer *transfer)
{
struct spi_controller *controller = spi_imx->controller;
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
unsigned long transfer_timeout;
unsigned long time_left;
struct spi_controller *controller = spi_imx->controller;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
unsigned int bytes_per_word, i;
int ret;
dma_cookie_t cookie;
/*
* The TX DMA setup starts the transfer, so make sure RX is configured
* before TX.
*/
desc_rx = dmaengine_prep_slave_single(controller->dma_rx, dma_data->dma_rx_addr,
dma_data->dma_len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
transfer->error |= SPI_TRANS_FAIL_NO_START;
return -EINVAL;
}
desc_rx->callback = spi_imx_dma_rx_callback;
desc_rx->callback_param = (void *)spi_imx;
cookie = dmaengine_submit(desc_rx);
if (dma_submit_error(cookie)) {
dev_err(spi_imx->dev, "submitting DMA RX failed\n");
transfer->error |= SPI_TRANS_FAIL_NO_START;
goto dmaengine_terminate_rx;
}
reinit_completion(&spi_imx->dma_rx_completion);
dma_async_issue_pending(controller->dma_rx);
desc_tx = dmaengine_prep_slave_single(controller->dma_tx, dma_data->dma_tx_addr,
dma_data->dma_len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx)
goto dmaengine_terminate_rx;
desc_tx->callback = spi_imx_dma_tx_callback;
desc_tx->callback_param = (void *)spi_imx;
cookie = dmaengine_submit(desc_tx);
if (dma_submit_error(cookie)) {
dev_err(spi_imx->dev, "submitting DMA TX failed\n");
goto dmaengine_terminate_tx;
}
reinit_completion(&spi_imx->dma_tx_completion);
dma_async_issue_pending(controller->dma_tx);
spi_imx->devtype_data->trigger(spi_imx);
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
if (!spi_imx->target_mode) {
/* Wait SDMA to finish the data transfer.*/
time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
transfer_timeout);
if (!time_left) {
dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
return -ETIMEDOUT;
}
time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
transfer_timeout);
if (!time_left) {
dev_err(&controller->dev, "I/O Error in DMA RX\n");
spi_imx->devtype_data->reset(spi_imx);
dmaengine_terminate_all(controller->dma_rx);
return -ETIMEDOUT;
}
} else {
spi_imx->target_aborted = false;
if (wait_for_completion_interruptible(&spi_imx->dma_tx_completion) ||
READ_ONCE(spi_imx->target_aborted)) {
dev_dbg(spi_imx->dev, "I/O Error in DMA TX interrupted\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
return -EINTR;
}
if (wait_for_completion_interruptible(&spi_imx->dma_rx_completion) ||
READ_ONCE(spi_imx->target_aborted)) {
dev_dbg(spi_imx->dev, "I/O Error in DMA RX interrupted\n");
dmaengine_terminate_all(controller->dma_rx);
return -EINTR;
}
/*
* ECSPI has a HW issue when works in Target mode, after 64 words
* writtern to TXFIFO, even TXFIFO becomes empty, ECSPI_TXDATA keeps
* shift out the last word data, so we have to disable ECSPI when in
* target mode after the transfer completes.
*/
if (spi_imx->devtype_data->disable)
spi_imx->devtype_data->disable(spi_imx);
}
return 0;
dmaengine_terminate_tx:
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_rx:
dmaengine_terminate_all(controller->dma_rx);
return -EINVAL;
}
static void spi_imx_dma_max_wml_find(struct spi_imx_data *spi_imx,
struct dma_data_package *dma_data,
bool word_delay)
{
unsigned int bytes_per_word = word_delay ?
spi_imx_bytes_per_word(spi_imx->bits_per_word) :
BYTES_PER_32BITS_WORD;
unsigned int i;
/* Get the right burst length from the last sg to ensure no tail data */
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
if (!dma_data->dma_len % (i * bytes_per_word))
break;
}
/* Use 1 as wml in case no available burst length got */
if (i == 0)
i = 1;
spi_imx->wml = i;
spi_imx->wml = i;
}
ret = spi_imx_dma_configure(controller);
static int spi_imx_dma_configure(struct spi_controller *controller, bool word_delay)
{
int ret;
enum dma_slave_buswidth buswidth;
struct dma_slave_config rx = {}, tx = {};
struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller);
if (word_delay) {
switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) {
case 4:
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
case 2:
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case 1:
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
default:
return -EINVAL;
}
} else {
buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
}
tx.direction = DMA_MEM_TO_DEV;
tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
tx.dst_addr_width = buswidth;
tx.dst_maxburst = spi_imx->wml;
ret = dmaengine_slave_config(controller->dma_tx, &tx);
if (ret) {
dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
return ret;
}
rx.direction = DMA_DEV_TO_MEM;
rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
rx.src_addr_width = buswidth;
rx.src_maxburst = spi_imx->wml;
ret = dmaengine_slave_config(controller->dma_rx, &rx);
if (ret) {
dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
return ret;
}
return 0;
}
static int spi_imx_dma_package_transfer(struct spi_imx_data *spi_imx,
struct dma_data_package *dma_data,
struct spi_transfer *transfer,
bool word_delay)
{
struct spi_controller *controller = spi_imx->controller;
int ret;
spi_imx_dma_max_wml_find(spi_imx, dma_data, word_delay);
ret = spi_imx_dma_configure(controller, word_delay);
if (ret)
goto dma_failure_no_start;
@ -1516,61 +1915,16 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
}
spi_imx->devtype_data->setup_wml(spi_imx);
/*
* The TX DMA setup starts the transfer, so make sure RX is configured
* before TX.
*/
desc_rx = dmaengine_prep_slave_sg(controller->dma_rx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EINVAL;
goto dma_failure_no_start;
}
ret = spi_imx_dma_submit(spi_imx, dma_data, transfer);
if (ret)
return ret;
desc_rx->callback = spi_imx_dma_rx_callback;
desc_rx->callback_param = (void *)spi_imx;
dmaengine_submit(desc_rx);
reinit_completion(&spi_imx->dma_rx_completion);
dma_async_issue_pending(controller->dma_rx);
desc_tx = dmaengine_prep_slave_sg(controller->dma_tx,
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
return -EINVAL;
}
desc_tx->callback = spi_imx_dma_tx_callback;
desc_tx->callback_param = (void *)spi_imx;
dmaengine_submit(desc_tx);
reinit_completion(&spi_imx->dma_tx_completion);
dma_async_issue_pending(controller->dma_tx);
spi_imx->devtype_data->trigger(spi_imx);
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
/* Wait SDMA to finish the data transfer.*/
time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
transfer_timeout);
if (!time_left) {
dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
dmaengine_terminate_all(controller->dma_tx);
dmaengine_terminate_all(controller->dma_rx);
return -ETIMEDOUT;
}
time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
transfer_timeout);
if (!time_left) {
dev_err(&controller->dev, "I/O Error in DMA RX\n");
spi_imx->devtype_data->reset(spi_imx);
dmaengine_terminate_all(controller->dma_rx);
return -ETIMEDOUT;
}
/* Trim the DMA RX buffer and copy the actual data to rx_buf */
dma_sync_single_for_cpu(controller->dma_rx->device->dev, dma_data->dma_rx_addr,
dma_data->dma_len, DMA_FROM_DEVICE);
spi_imx_dma_rx_data_handle(spi_imx, dma_data, transfer->rx_buf + spi_imx->rx_offset,
word_delay);
spi_imx->rx_offset += dma_data->data_len;
return 0;
/* fallback to pio */
@ -1579,6 +1933,57 @@ dma_failure_no_start:
return ret;
}
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
struct spi_transfer *transfer)
{
bool word_delay = transfer->word_delay.value != 0 && !spi_imx->target_mode;
int ret;
int i;
ret = spi_imx_dma_data_prepare(spi_imx, transfer, word_delay);
if (ret < 0) {
transfer->error |= SPI_TRANS_FAIL_NO_START;
dev_err(spi_imx->dev, "DMA data prepare fail\n");
goto fallback_pio;
}
spi_imx->rx_offset = 0;
/* Each dma_package performs a separate DMA transfer once */
for (i = 0; i < spi_imx->dma_package_num; i++) {
ret = spi_imx_dma_map(spi_imx, &spi_imx->dma_data[i]);
if (ret < 0) {
if (i == 0)
transfer->error |= SPI_TRANS_FAIL_NO_START;
dev_err(spi_imx->dev, "DMA map fail\n");
break;
}
/* Update the CTRL register BL field */
writel(spi_imx->dma_data[i].cmd_word, spi_imx->base + MX51_ECSPI_CTRL);
ret = spi_imx_dma_package_transfer(spi_imx, &spi_imx->dma_data[i],
transfer, word_delay);
/* Whether the dma transmission is successful or not, dma unmap is necessary */
spi_imx_dma_unmap(spi_imx, &spi_imx->dma_data[i]);
if (ret < 0) {
dev_dbg(spi_imx->dev, "DMA %d transfer not really finish\n", i);
break;
}
}
for (int j = 0; j < spi_imx->dma_package_num; j++) {
kfree(spi_imx->dma_data[j].dma_tx_buf);
kfree(spi_imx->dma_data[j].dma_rx_buf);
}
kfree(spi_imx->dma_data);
fallback_pio:
return ret;
}
static int spi_imx_pio_transfer(struct spi_device *spi,
struct spi_transfer *transfer)
{
@ -1737,7 +2142,7 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
while (spi_imx->devtype_data->rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA);
if (spi_imx->target_mode)
if (spi_imx->target_mode && !spi_imx->usedma)
return spi_imx_pio_transfer_target(spi, transfer);
/*
@ -1745,9 +2150,17 @@ static int spi_imx_transfer_one(struct spi_controller *controller,
* transfer, the SPI transfer has already been mapped, so we
* have to do the DMA transfer here.
*/
if (spi_imx->usedma)
return spi_imx_dma_transfer(spi_imx, transfer);
if (spi_imx->usedma) {
ret = spi_imx_dma_transfer(spi_imx, transfer);
if (transfer->error & SPI_TRANS_FAIL_NO_START) {
spi_imx->usedma = false;
if (spi_imx->target_mode)
return spi_imx_pio_transfer_target(spi, transfer);
else
return spi_imx_pio_transfer(spi, transfer);
}
return ret;
}
/* run in polling mode for short transfers */
if (transfer->len == 1 || (polling_limit_us &&
spi_imx_transfer_estimate_time_us(transfer) < polling_limit_us))
@ -1955,7 +2368,6 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
controller->dev.of_node = pdev->dev.of_node;
ret = spi_register_controller(controller);
if (ret) {
dev_err_probe(&pdev->dev, ret, "register controller failed\n");

View file

@ -442,7 +442,6 @@ static int spi_ingenic_probe(struct platform_device *pdev)
ctlr->use_gpio_descriptors = true;
ctlr->max_native_cs = pdata->max_native_cs;
ctlr->num_chipselect = num_cs;
ctlr->dev.of_node = pdev->dev.of_node;
if (spi_ingenic_request_dma(ctlr, dev))
dev_warn(dev, "DMA not available.\n");

View file

@ -962,7 +962,6 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
spi->bits_per_word = 8;
spi->speed_hz = 0;
host->dev.of_node = pdev->dev.of_node;
host->num_chipselect = num_cs;
host->use_gpio_descriptors = true;
host->setup = lantiq_ssc_setup;

View file

@ -238,7 +238,6 @@ static int ljca_spi_probe(struct auxiliary_device *auxdev,
controller->auto_runtime_pm = false;
controller->max_speed_hz = LJCA_SPI_BUS_MAX_HZ;
device_set_node(&ljca_spi->controller->dev, dev_fwnode(&auxdev->dev));
auxiliary_set_drvdata(auxdev, controller);
ret = spi_register_controller(controller);

View file

@ -210,7 +210,6 @@ int loongson_spi_init_controller(struct device *dev, void __iomem *regs)
controller->unprepare_message = loongson_spi_unprepare_message;
controller->set_cs = loongson_spi_set_cs;
controller->num_chipselect = 4;
device_set_node(&controller->dev, dev_fwnode(dev));
dev_set_drvdata(dev, controller);
spi = spi_controller_get_devdata(controller);

View file

@ -200,7 +200,6 @@ spi_lp8841_rtc_probe(struct platform_device *pdev)
host->transfer_one = spi_lp8841_rtc_transfer_one;
host->bits_per_word_mask = SPI_BPW_MASK(8);
#ifdef CONFIG_OF
host->dev.of_node = pdev->dev.of_node;
#endif
data = spi_controller_get_devdata(host);

View file

@ -178,8 +178,19 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
if (op->data.swap16 && !spi_mem_controller_is_capable(ctlr, swap16))
return false;
if (op->cmd.nbytes != 2)
return false;
/* Extra 8D-8D-8D limitations */
if (op->cmd.dtr && op->cmd.buswidth == 8) {
if (op->cmd.nbytes != 2)
return false;
if ((op->addr.nbytes % 2) ||
(op->dummy.nbytes % 2) ||
(op->data.nbytes % 2)) {
dev_err(&ctlr->dev,
"Even byte numbers not allowed in octal DTR operations\n");
return false;
}
}
} else {
if (op->cmd.nbytes != 1)
return false;
@ -708,9 +719,18 @@ spi_mem_dirmap_create(struct spi_mem *mem,
desc->mem = mem;
desc->info = *info;
if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) {
ret = spi_mem_access_start(mem);
if (ret) {
kfree(desc);
return ERR_PTR(ret);
}
ret = ctlr->mem_ops->dirmap_create(desc);
spi_mem_access_end(mem);
}
if (ret) {
desc->nodirmap = true;
if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))

View file

@ -1054,7 +1054,6 @@ static int meson_spicc_probe(struct platform_device *pdev)
device_reset_optional(&pdev->dev);
host->num_chipselect = 4;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LOOP;
host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX);
host->min_speed_hz = spicc->data->min_speed_hz;

View file

@ -322,7 +322,6 @@ static int meson_spifc_probe(struct platform_device *pdev)
rate = clk_get_rate(spifc->clk);
host->num_chipselect = 1;
host->dev.of_node = pdev->dev.of_node;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->auto_runtime_pm = true;
host->transfer_one = meson_spifc_transfer_one;

View file

@ -161,7 +161,7 @@ static int mchp_corespi_setup(struct spi_device *spi)
return -EOPNOTSUPP;
}
if (spi->mode & SPI_MODE_X_MASK & ~spi->controller->mode_bits) {
if ((spi->mode ^ spi->controller->mode_bits) & SPI_MODE_X_MASK) {
dev_err(&spi->dev, "incompatible CPOL/CPHA, must match controller's Motorola mode\n");
return -EINVAL;
}
@ -360,7 +360,6 @@ static int mchp_corespi_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
host->transfer_one = mchp_corespi_transfer_one;
host->set_cs = mchp_corespi_set_cs;
host->dev.of_node = dev->of_node;
ret = of_property_read_u32(dev->of_node, "fifo-depth", &spi->fifo_depth);
if (ret)

View file

@ -480,8 +480,6 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *pdev)
host->use_gpio_descriptors = true;
host->cleanup = mpc512x_psc_spi_cleanup;
device_set_node(&host->dev, dev_fwnode(dev));
tempp = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(tempp))
return dev_err_probe(dev, PTR_ERR(tempp), "could not ioremap I/O port range\n");

View file

@ -319,8 +319,6 @@ static int mpc52xx_psc_spi_of_probe(struct platform_device *pdev)
host->transfer_one_message = mpc52xx_psc_spi_transfer_one_message;
host->cleanup = mpc52xx_psc_spi_cleanup;
device_set_node(&host->dev, dev_fwnode(dev));
mps->psc = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mps->psc))
return dev_err_probe(dev, PTR_ERR(mps->psc), "could not ioremap I/O port range\n");

View file

@ -430,7 +430,6 @@ static int mpc52xx_spi_probe(struct platform_device *op)
host->transfer = mpc52xx_spi_transfer;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->dev.of_node = op->dev.of_node;
platform_set_drvdata(op, host);

View file

@ -550,7 +550,6 @@ static int mpfs_spi_probe(struct platform_device *pdev)
host->transfer_one = mpfs_spi_transfer_one;
host->prepare_message = mpfs_spi_prepare_message;
host->set_cs = mpfs_spi_set_cs;
host->dev.of_node = pdev->dev.of_node;
spi = spi_controller_get_devdata(host);

View file

@ -1184,7 +1184,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
return -ENOMEM;
host->auto_runtime_pm = true;
host->dev.of_node = dev->of_node;
host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
host->set_cs = mtk_spi_set_cs;

View file

@ -348,7 +348,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
host->set_cs = mt7621_spi_set_native_cs;
host->transfer_one = mt7621_spi_transfer_one;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->dev.of_node = pdev->dev.of_node;
host->max_native_cs = MT7621_NATIVE_CS_COUNT;
host->num_chipselect = MT7621_NATIVE_CS_COUNT;
host->use_gpio_descriptors = true;

View file

@ -851,7 +851,6 @@ static int mtk_nor_probe(struct platform_device *pdev)
}
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->max_message_size = mtk_max_msg_size;
ctlr->mem_ops = &mtk_nor_mem_ops;
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;

View file

@ -1448,7 +1448,6 @@ static int mtk_snand_probe(struct platform_device *pdev)
ctlr->mem_caps = &mtk_snand_mem_caps;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
ctlr->dev.of_node = pdev->dev.of_node;
ret = spi_register_controller(ctlr);
if (ret) {
dev_err(&pdev->dev, "spi_register_controller failed.\n");

View file

@ -161,7 +161,6 @@ static int spi_mux_probe(struct spi_device *spi)
ctlr->setup = spi_mux_setup;
ctlr->num_chipselect = mux_control_states(priv->mux);
ctlr->bus_num = -1;
ctlr->dev.of_node = spi->dev.of_node;
ctlr->must_async = true;
ctlr->defer_optimize_message = true;

View file

@ -768,7 +768,6 @@ static int mxic_spi_probe(struct platform_device *pdev)
mxic = spi_controller_get_devdata(host);
mxic->dev = &pdev->dev;
host->dev.of_node = pdev->dev.of_node;
mxic->ps_clk = devm_clk_get(&pdev->dev, "ps_clk");
if (IS_ERR(mxic->ps_clk))

View file

@ -746,7 +746,6 @@ static int npcm_fiu_probe(struct platform_device *pdev)
ctrl->bus_num = -1;
ctrl->mem_ops = &npcm_fiu_mem_ops;
ctrl->num_chipselect = fiu->info->max_cs;
ctrl->dev.of_node = dev->of_node;
return devm_spi_register_controller(dev, ctrl);
}

View file

@ -401,7 +401,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
host->max_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MIN_CLK_DIVIDER);
host->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
host->mode_bits = SPI_CPHA | SPI_CPOL;
host->dev.of_node = pdev->dev.of_node;
host->bus_num = -1;
host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
host->transfer_one = npcm_pspi_transfer_one;

View file

@ -1383,8 +1383,6 @@ static int nxp_fspi_probe(struct platform_device *pdev)
else
ctlr->mem_caps = &nxp_fspi_mem_caps;
device_set_node(&ctlr->dev, fwnode);
ret = devm_add_action_or_reset(dev, nxp_fspi_cleanup, f);
if (ret)
return ret;

1384
drivers/spi/spi-nxp-xspi.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -192,7 +192,6 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
if (!np)
return 0;
hw->bitbang.ctlr->dev.of_node = pdev->dev.of_node;
if (!of_property_read_u32(np, "clock-frequency", &val))
hw->freq = val;
if (!of_property_read_u32(np, "baud-width", &val))

View file

@ -780,7 +780,6 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status < 0)
goto out_rel_pm;
host->dev.of_node = pdev->dev.of_node;
status = spi_register_controller(host);
if (status < 0)
goto out_rel_pm;

View file

@ -1893,7 +1893,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
host->handle_err = pl022_handle_err;
host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
host->rt = platform_info->rt;
host->dev.of_node = dev->of_node;
host->use_gpio_descriptors = true;
/*

View file

@ -1290,8 +1290,6 @@ int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp,
drv_data->controller_info = platform_info;
drv_data->ssp = ssp;
device_set_node(&controller->dev, dev_fwnode(dev));
/* The spi->mode bits understood by this driver: */
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;

View file

@ -763,7 +763,6 @@ static int qcom_qspi_probe(struct platform_device *pdev)
host->dma_alignment = QSPI_ALIGN_REQ;
host->num_chipselect = QSPI_NUM_CS;
host->bus_num = -1;
host->dev.of_node = pdev->dev.of_node;
host->mode_bits = SPI_MODE_0 |
SPI_TX_DUAL | SPI_RX_DUAL |
SPI_TX_QUAD | SPI_RX_QUAD;

View file

@ -850,8 +850,6 @@ static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
snandc->regs->exec = cpu_to_le32(1);
qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
qcom_clear_bam_transaction(snandc);
qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
@ -941,8 +939,6 @@ static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
snandc->regs->exec = cpu_to_le32(1);
qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
@ -1587,7 +1583,6 @@ static int qcom_spi_probe(struct platform_device *pdev)
ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
ctlr->mem_ops = &qcom_spi_mem_ops;
ctlr->mem_caps = &qcom_spi_mem_caps;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
SPI_TX_QUAD | SPI_RX_QUAD;

View file

@ -1091,7 +1091,6 @@ static int spi_qup_probe(struct platform_device *pdev)
host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
host->max_speed_hz = max_freq;
host->transfer_one = spi_qup_transfer_one;
host->dev.of_node = pdev->dev.of_node;
host->auto_runtime_pm = true;
host->dma_alignment = dma_get_cache_alignment();
host->max_dma_len = SPI_MAX_XFER;

View file

@ -160,7 +160,6 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
if (IS_ERR(ahb_clk))
return PTR_ERR(ahb_clk);
host->dev.of_node = pdev->dev.of_node;
host->bus_num = 0;
host->num_chipselect = 3;
host->mode_bits = SPI_TX_DUAL;

Some files were not shown because too many files have changed in this diff Show more