diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml index 0ce2ea13583d..c35d4f2ab9a4 100644 --- a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml +++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml @@ -34,8 +34,9 @@ properties: spi-cpol: true spi-rx-bus-width: - minimum: 0 - maximum: 1 + items: + minimum: 0 + maximum: 1 dc-gpios: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml index 54e7349317b7..e22d518135f2 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4030.yaml @@ -37,7 +37,15 @@ properties: maximum: 102040816 spi-rx-bus-width: - enum: [1, 2, 4] + maxItems: 2 + # all lanes must have the same width + oneOf: + - contains: + const: 1 + - contains: + const: 2 + - contains: + const: 4 vdd-5v-supply: true vdd-1v8-supply: true @@ -88,6 +96,18 @@ oneOf: unevaluatedProperties: false +allOf: + - if: + properties: + compatible: + enum: + - adi,ad4030-24 + - adi,ad4032-24 + then: + properties: + spi-rx-bus-width: + maxItems: 1 + examples: - | #include @@ -108,3 +128,23 @@ examples: reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>; }; }; + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + adc@0 { + compatible = "adi,ad4630-24"; + reg = <0>; + spi-max-frequency = <80000000>; + spi-rx-bus-width = <4>, <4>; + vdd-5v-supply = <&supply_5V>; + vdd-1v8-supply = <&supply_1_8V>; + vio-supply = <&supply_1_8V>; + ref-supply = <&supply_5V>; + cnv-gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml index cbde7a0505d2..ae8d0b5f328b 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad4695.yaml @@ -38,8 +38,9 @@ properties: spi-cpha: true spi-rx-bus-width: - minimum: 1 - maximum: 4 + items: + minimum: 1 + maximum: 4 avdd-supply: description: Analog power supply. diff --git a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml index 4b3828eda6cb..0f2448371f17 100644 --- a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml +++ b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.yaml @@ -70,6 +70,21 @@ required: unevaluatedProperties: false +patternProperties: + "^.*@[0-9a-f]+": + type: object + + properties: + spi-rx-bus-width: + maxItems: 8 + items: + enum: [0, 1] + + spi-tx-bus-width: + maxItems: 8 + items: + enum: [0, 1] + examples: - | spi@44a00000 { diff --git a/Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml b/Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml index e1ab3f523ad6..a34e6471dbe8 100644 --- a/Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml +++ b/Documentation/devicetree/bindings/spi/allwinner,sun4i-a10-spi.yaml @@ -55,10 +55,12 @@ patternProperties: maximum: 4 spi-rx-bus-width: - const: 1 + items: + - const: 1 spi-tx-bus-width: - const: 1 + items: + - const: 1 required: - compatible diff --git a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml index 1b91d1566c95..a6067030c5ed 100644 --- a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml +++ b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml @@ -81,10 +81,12 @@ patternProperties: maximum: 4 spi-rx-bus-width: - const: 1 + items: + - const: 1 spi-tx-bus-width: - const: 1 + items: + - const: 1 required: - compatible diff --git a/Documentation/devicetree/bindings/spi/andestech,ae350-spi.yaml b/Documentation/devicetree/bindings/spi/andestech,ae350-spi.yaml new file mode 100644 index 000000000000..8e441742cee6 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/andestech,ae350-spi.yaml @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/spi/andestech,ae350-spi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Andes ATCSPI200 SPI controller + +maintainers: + - CL Wang + +properties: + compatible: + oneOf: + - items: + - enum: + - andestech,qilai-spi + - const: andestech,ae350-spi + - const: andestech,ae350-spi + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + num-cs: + description: Number of chip selects supported + maxItems: 1 + + dmas: + items: + - description: Transmit FIFO DMA channel + - description: Receive FIFO DMA channel + + dma-names: + items: + - const: tx + - const: rx + +patternProperties: + "@[0-9a-f]+$": + type: object + additionalProperties: true + + properties: + spi-rx-bus-width: + items: + - enum: [1, 4] + + spi-tx-bus-width: + items: + - enum: [1, 4] + +allOf: + - $ref: spi-controller.yaml# + +required: + - compatible + - reg + - clocks + - dmas + - dma-names + +unevaluatedProperties: false + +examples: + - | + spi@f0b00000 { + compatible = "andestech,ae350-spi"; + reg = <0xf0b00000 0x100>; + clocks = <&clk_spi>; + dmas = <&dma0 0>, <&dma0 1>; + dma-names = "tx", "rx"; + + #address-cells = <1>; + #size-cells = <0>; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + spi-cpol; + spi-cpha; + }; + }; diff --git a/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml b/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml index 11885d0cc209..a8539b68a2f3 100644 --- a/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml +++ b/Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml @@ -19,6 +19,7 @@ properties: - const: atmel,at91rm9200-spi - items: - enum: + - microchip,lan9691-spi - microchip,sam9x60-spi - microchip,sam9x7-spi - microchip,sama7d65-spi diff --git a/Documentation/devicetree/bindings/spi/axiado,ax3000-spi.yaml b/Documentation/devicetree/bindings/spi/axiado,ax3000-spi.yaml new file mode 100644 index 000000000000..cd2aac66fca2 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/axiado,ax3000-spi.yaml @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/spi/axiado,ax3000-spi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Axiado AX3000 SoC SPI controller + +maintainers: + - Vladimir Moravcevic + - Tzu-Hao Wei + - Swark Yang + - Prasad Bolisetty + +allOf: + - $ref: spi-controller.yaml# + +properties: + compatible: + enum: + - axiado,ax3000-spi + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clock-names: + items: + - const: ref + - const: pclk + + clocks: + maxItems: 2 + + num-cs: + description: | + Number of chip selects used. + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 1 + maximum: 4 + default: 4 + +required: + - compatible + - reg + - interrupts + - clock-names + - clocks + +unevaluatedProperties: false + +examples: + - | + #include + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + + spi@80510000 { + compatible = "axiado,ax3000-spi"; + reg = <0x00 0x80510000 0x00 0x1000>; + clock-names = "ref", "pclk"; + clocks = <&spi_clk>, <&apb_pclk>; + interrupt-parent = <&gic500>; + interrupts = ; + num-cs = <4>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml index 53a52fb8b819..891f578b5ac4 100644 --- a/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml +++ b/Documentation/devicetree/bindings/spi/cdns,qspi-nor.yaml @@ -61,6 +61,20 @@ allOf: cdns,fifo-depth: enum: [ 128, 256 ] default: 128 + - if: + properties: + compatible: + contains: + const: renesas,rzn1-qspi + then: + properties: + cdns,trigger-address: false + cdns,fifo-depth: false + cdns,fifo-width: false + else: + required: + - cdns,trigger-address + - cdns,fifo-depth properties: compatible: @@ -80,6 +94,9 @@ properties: # controllers are meant to be used with flashes of all kinds, # ie. also NAND flashes, not only NOR flashes. - const: cdns,qspi-nor + - items: + - const: renesas,r9a06g032-qspi + - const: renesas,rzn1-qspi - const: cdns,qspi-nor deprecated: true @@ -163,8 +180,6 @@ required: - reg - interrupts - clocks - - cdns,fifo-width - - cdns,trigger-address - '#address-cells' - '#size-cells' @@ -172,7 +187,7 @@ unevaluatedProperties: false examples: - | - qspi: spi@ff705000 { + spi@ff705000 { compatible = "intel,socfpga-qspi", "cdns,qspi-nor"; #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/spi/faraday,ftssp010.yaml b/Documentation/devicetree/bindings/spi/faraday,ftssp010.yaml new file mode 100644 index 000000000000..678598de3400 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/faraday,ftssp010.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/spi/faraday,ftssp010.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Faraday FTSSP010 SPI Controller + +maintainers: + - Linus Walleij + +properties: + compatible: + const: faraday,ftssp010 + + interrupts: + maxItems: 1 + + reg: + maxItems: 1 + + cs-gpios: true + +required: + - compatible + - interrupts + - reg + +allOf: + - $ref: spi-controller.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + spi@4a000000 { + compatible = "faraday,ftssp010"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x4a000000 0x1000>; + interrupts = <0>; + }; diff --git a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml index 8b3640280559..909c204b8adf 100644 --- a/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml +++ b/Documentation/devicetree/bindings/spi/nvidia,tegra210-quad.yaml @@ -54,10 +54,12 @@ patternProperties: properties: spi-rx-bus-width: - enum: [1, 2, 4] + items: + - enum: [1, 2, 4] spi-tx-bus-width: - enum: [1, 2, 4] + items: + - enum: [1, 2, 4] required: - compatible diff --git a/Documentation/devicetree/bindings/spi/nxp,imx94-xspi.yaml b/Documentation/devicetree/bindings/spi/nxp,imx94-xspi.yaml new file mode 100644 index 000000000000..16a0598c6d03 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/nxp,imx94-xspi.yaml @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/spi/nxp,imx94-xspi.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP External Serial Peripheral Interface (xSPI) + +maintainers: + - Haibo Chen + - Han Xu + +properties: + compatible: + oneOf: + - enum: + - nxp,imx94-xspi + - items: + - enum: + - nxp,imx952-xspi + - const: nxp,imx94-xspi + + reg: + items: + - description: registers address space + - description: memory mapped address space + + reg-names: + items: + - const: base + - const: mmap + + interrupts: + items: + - description: interrupt for EENV0 + - description: interrupt for EENV1 + - description: interrupt for EENV2 + - description: interrupt for EENV3 + - description: interrupt for EENV4 + + clocks: + items: + - description: SPI serial clock + + clock-names: + items: + - const: per + +required: + - compatible + - reg + - reg-names + - interrupts + - clocks + - clock-names + +allOf: + - $ref: spi-controller.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + + spi@42b90000 { + compatible = "nxp,imx94-xspi"; + reg = <0x0 0x42b90000 0x0 0x50000>, <0x0 0x28000000 0x0 0x08000000>; + reg-names = "base", "mmap"; + interrupts = , + , + , + , + ; + #address-cells = <1>; + #size-cells = <0>; + clocks = <&scmi_1>; + clock-names = "per"; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <200000000>; + spi-rx-bus-width = <8>; + spi-tx-bus-width = <8>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/spi/nxp,lpc3220-spi.yaml b/Documentation/devicetree/bindings/spi/nxp,lpc3220-spi.yaml index d5f780912f21..789e26e40927 100644 --- a/Documentation/devicetree/bindings/spi/nxp,lpc3220-spi.yaml +++ b/Documentation/devicetree/bindings/spi/nxp,lpc3220-spi.yaml @@ -20,6 +20,12 @@ properties: clocks: maxItems: 1 + dmas: + maxItems: 1 + + dma-names: + const: rx-tx + allOf: - $ref: spi-controller.yaml# @@ -38,6 +44,8 @@ examples: compatible = "nxp,lpc3220-spi"; reg = <0x20088000 0x1000>; clocks = <&clk LPC32XX_CLK_SPI1>; + dmas = <&dmamux 11 1 0>; + dma-names = "rx-tx"; #address-cells = <1>; #size-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/spi/renesas,rzv2h-rspi.yaml b/Documentation/devicetree/bindings/spi/renesas,rzv2h-rspi.yaml index 069557a587b5..a588b112e11e 100644 --- a/Documentation/devicetree/bindings/spi/renesas,rzv2h-rspi.yaml +++ b/Documentation/devicetree/bindings/spi/renesas,rzv2h-rspi.yaml @@ -57,6 +57,14 @@ properties: - const: presetn - const: tresetn + dmas: + maxItems: 2 + + dma-names: + items: + - const: rx + - const: tx + power-domains: maxItems: 1 diff --git a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml index 8b6e8fc009db..880a9f624566 100644 --- a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml +++ b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml @@ -64,9 +64,23 @@ properties: description: Bus width to the SPI bus used for read transfers. If 0 is provided, then no RX will be possible on this device. - $ref: /schemas/types.yaml#/definitions/uint32 - enum: [0, 1, 2, 4, 8] - default: 1 + + Some SPI peripherals and controllers may have multiple data lanes for + receiving two or more words at the same time. If this is the case, each + index in the array represents the lane on both the SPI peripheral and + controller. Additional mapping properties may be needed if a lane is + skipped on either side. + $ref: /schemas/types.yaml#/definitions/uint32-array + items: + enum: [0, 1, 2, 4, 8] + default: [1] + + spi-rx-lane-map: + description: Mapping of peripheral SDO lanes to controller SDI lanes. + Each index in the array represents a peripheral SDO lane, and the value + at that index represents the corresponding controller SDI lane. + $ref: /schemas/types.yaml#/definitions/uint32-array + default: [0, 1, 2, 3, 4, 5, 6, 7] spi-rx-delay-us: description: @@ -81,9 +95,23 @@ properties: description: Bus width to the SPI bus used for write transfers. If 0 is provided, then no TX will be possible on this device. - $ref: /schemas/types.yaml#/definitions/uint32 - enum: [0, 1, 2, 4, 8] - default: 1 + + Some SPI peripherals and controllers may have multiple data lanes for + transmitting two or more words at the same time. If this is the case, each + index in the array represents the lane on both the SPI peripheral and + controller. Additional mapping properties may be needed if a lane is + skipped on either side. + $ref: /schemas/types.yaml#/definitions/uint32-array + items: + enum: [0, 1, 2, 4, 8] + default: [1] + + spi-tx-lane-map: + description: Mapping of peripheral SDI lanes to controller SDO lanes. + Each index in the array represents a peripheral SDI lane, and the value + at that index represents the corresponding controller SDO lane. + $ref: /schemas/types.yaml#/definitions/uint32-array + default: [0, 1, 2, 3, 4, 5, 6, 7] spi-tx-delay-us: description: diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.yaml b/Documentation/devicetree/bindings/spi/spi-xilinx.yaml index 4beb3af0416d..24e62530d432 100644 --- a/Documentation/devicetree/bindings/spi/spi-xilinx.yaml +++ b/Documentation/devicetree/bindings/spi/spi-xilinx.yaml @@ -38,7 +38,6 @@ properties: required: - compatible - reg - - interrupts unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml index ca880a226afa..472e92974714 100644 --- a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml +++ b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml @@ -96,6 +96,9 @@ properties: The region should be defined as child node of the AHB SRAM node as per the generic bindings in Documentation/devicetree/bindings/sram/sram.yaml + power-domains: + maxItems: 1 + access-controllers: minItems: 1 maxItems: 2 diff --git a/Documentation/spi/index.rst b/Documentation/spi/index.rst index 20d4a4185ab9..ac0c2233ce48 100644 --- a/Documentation/spi/index.rst +++ b/Documentation/spi/index.rst @@ -9,6 +9,7 @@ Serial Peripheral Interface (SPI) spi-summary spidev + multiple-data-lanes butterfly spi-lm70llp spi-sc18is602 diff --git a/Documentation/spi/multiple-data-lanes.rst b/Documentation/spi/multiple-data-lanes.rst new file mode 100644 index 000000000000..69cb532d052f --- /dev/null +++ b/Documentation/spi/multiple-data-lanes.rst @@ -0,0 +1,217 @@ +==================================== +SPI devices with multiple data lanes +==================================== + +Some specialized SPI controllers and peripherals support multiple data lanes +that allow reading more than one word at a time in parallel. This is different +from dual/quad/octal SPI where multiple bits of a single word are transferred +simultaneously. + +For example, controllers that support parallel flash memories have this feature +as do some simultaneous-sampling ADCs where each channel has its own data lane. + +--------------------- +Describing the wiring +--------------------- + +The ``spi-tx-bus-width`` and ``spi-rx-bus-width`` properties in the devicetree +are used to describe how many data lanes are connected between the controller +and how wide each lane is. The number of items in the array indicates how many +lanes there are, and the value of each item indicates how many bits wide that +lane is. + +For example, a dual-simultaneous-sampling ADC with two 4-bit lanes might be +wired up like this:: + + +--------------+ +----------+ + | SPI | | AD4630 | + | Controller | | ADC | + | | | | + | CS0 |--->| CS | + | SCK |--->| SCK | + | SDO |--->| SDI | + | | | | + | SDIA0 |<---| SDOA0 | + | SDIA1 |<---| SDOA1 | + | SDIA2 |<---| SDOA2 | + | SDIA3 |<---| SDOA3 | + | | | | + | SDIB0 |<---| SDOB0 | + | SDIB1 |<---| SDOB1 | + | SDIB2 |<---| SDOB2 | + | SDIB3 |<---| SDOB3 | + | | | | + +--------------+ +----------+ + +It is described in a devicetree like this:: + + spi { + compatible = "my,spi-controller"; + + ... + + adc@0 { + compatible = "adi,ad4630"; + reg = <0>; + ... + spi-rx-bus-width = <4>, <4>; /* 2 lanes of 4 bits each */ + ... + }; + }; + +In most cases, lanes will be wired up symmetrically (A to A, B to B, etc). If +this isn't the case, extra ``spi-rx-lane-map`` and ``spi-tx-lane-map`` +properties are needed to provide a mapping between controller lanes and the +physical lane wires. + +Here is an example where a multi-lane SPI controller has each lane wired to +separate single-lane peripherals:: + + +--------------+ +----------+ + | SPI | | Thing 1 | + | Controller | | | + | | | | + | CS0 |--->| CS | + | SDO0 |--->| SDI | + | SDI0 |<---| SDO | + | SCLK0 |--->| SCLK | + | | | | + | | +----------+ + | | + | | +----------+ + | | | Thing 2 | + | | | | + | CS1 |--->| CS | + | SDO1 |--->| SDI | + | SDI1 |<---| SDO | + | SCLK1 |--->| SCLK | + | | | | + +--------------+ +----------+ + +This is described in a devicetree like this:: + + spi { + compatible = "my,spi-controller"; + + ... + + thing1@0 { + compatible = "my,thing1"; + reg = <0>; + ... + }; + + thing2@1 { + compatible = "my,thing2"; + reg = <1>; + ... + spi-tx-lane-map = <1>; /* lane 0 is not used, lane 1 is used for tx wire */ + spi-rx-lane-map = <1>; /* lane 0 is not used, lane 1 is used for rx wire */ + ... + }; + }; + + +The default values of ``spi-rx-bus-width`` and ``spi-tx-bus-width`` are ``<1>``, +so these properties can still be omitted even when ``spi-rx-lane-map`` and +``spi-tx-lane-map`` are used. + +---------------------------- +Usage in a peripheral driver +---------------------------- + +These types of SPI controllers generally do not support arbitrary use of the +multiple lanes. Instead, they operate in one of a few defined modes. Peripheral +drivers should set the :c:type:`struct spi_transfer.multi_lane_mode ` +field to indicate which mode they want to use for a given transfer. + +The possible values for this field have the following semantics: + +- :c:macro:`SPI_MULTI_BUS_MODE_SINGLE`: Only use the first lane. Other lanes are + ignored. This means that it is operating just like a conventional SPI + peripheral. This is the default, so it does not need to be explicitly set. + + Example:: + + tx_buf[0] = 0x88; + + struct spi_transfer xfer = { + .tx_buf = tx_buf, + .len = 1, + }; + + spi_sync_transfer(spi, &xfer, 1); + + Assuming the controller is sending the MSB first, the sequence of bits + sent over the tx wire would be (right-most bit is sent first):: + + controller > data bits > peripheral + ---------- ---------------- ---------- + SDO 0 0-0-0-1-0-0-0-1 SDI 0 + +- :c:macro:`SPI_MULTI_BUS_MODE_MIRROR`: Send a single data word over all of the + lanes at the same time. This only makes sense for writes and not + for reads. + + Example:: + + tx_buf[0] = 0x88; + + struct spi_transfer xfer = { + .tx_buf = tx_buf, + .len = 1, + .multi_lane_mode = SPI_MULTI_BUS_MODE_MIRROR, + }; + + spi_sync_transfer(spi, &xfer, 1); + + The data is mirrored on each tx wire:: + + controller > data bits > peripheral + ---------- ---------------- ---------- + SDO 0 0-0-0-1-0-0-0-1 SDI 0 + SDO 1 0-0-0-1-0-0-0-1 SDI 1 + +- :c:macro:`SPI_MULTI_BUS_MODE_STRIPE`: Send or receive two different data words + at the same time, one on each lane. This means that the buffer needs to be + sized to hold data for all lanes. Data is interleaved in the buffer, with + the first word corresponding to lane 0, the second to lane 1, and so on. + Once the last lane is used, the next word in the buffer corresponds to lane + 0 again. Accordingly, the buffer size must be a multiple of the number of + lanes. This mode works for both reads and writes. + + Example:: + + struct spi_transfer xfer = { + .rx_buf = rx_buf, + .len = 2, + .multi_lane_mode = SPI_MULTI_BUS_MODE_STRIPE, + }; + + spi_sync_transfer(spi, &xfer, 1); + + Each rx wire has a different data word sent simultaneously:: + + controller < data bits < peripheral + ---------- ---------------- ---------- + SDI 0 0-0-0-1-0-0-0-1 SDO 0 + SDI 1 1-0-0-0-1-0-0-0 SDO 1 + + After the transfer, ``rx_buf[0] == 0x11`` (word from SDO 0) and + ``rx_buf[1] == 0x88`` (word from SDO 1). + + +----------------------------- +SPI controller driver support +----------------------------- + +To support multiple data lanes, SPI controller drivers need to set +:c:type:`struct spi_controller.num_data_lanes ` to a value +greater than 1. + +Then the part of the driver that handles SPI transfers needs to check the +:c:type:`struct spi_transfer.multi_lane_mode ` field and implement +the appropriate behavior for each supported mode and return an error for +unsupported modes. + +The core SPI code should handle the rest. diff --git a/MAINTAINERS b/MAINTAINERS index c7d4308dbca2..1198dd72073e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1821,6 +1821,12 @@ S: Supported F: drivers/clk/analogbits/* F: include/linux/clk/analogbits* +ANDES ATCSPI200 SPI DRIVER +M: CL Wang +S: Supported +F: Documentation/devicetree/bindings/spi/andestech,ae350-spi.yaml +F: drivers/spi/spi-atcspi200.c + ANDROID DRIVERS M: Greg Kroah-Hartman M: Arve Hjønnevåg @@ -4277,6 +4283,17 @@ W: https://ez.analog.com/linux-software-drivers F: Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml F: drivers/pwm/pwm-axi-pwmgen.c +AXIADO SPI DB DRIVER +M: Vladimir Moravcevic +M: Tzu-Hao Wei +M: Swark Yang +M: Prasad Bolisetty +L: linux-spi@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/spi/axiado,ax3000-spi.yaml +F: drivers/spi/spi-axiado.c +F: drivers/spi/spi-axiado.h + AYANEO PLATFORM EC DRIVER M: Antheas Kapenekakis L: platform-driver-x86@vger.kernel.org @@ -18991,6 +19008,15 @@ S: Maintained F: Documentation/devicetree/bindings/sound/trivial-codec.yaml F: sound/soc/codecs/tfa9879* +NXP XSPI DRIVER +M: Han Xu +M: Haibo Chen +L: linux-spi@vger.kernel.org +L: imx@lists.linux.dev +S: Maintained +F: Documentation/devicetree/bindings/spi/nxp,imx94-xspi.yaml +F: drivers/spi/spi-nxp-xspi.c + NXP-NCI NFC DRIVER S: Orphan F: Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 5520403896fc..c3b2f02f5912 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -136,6 +136,15 @@ config SPI_AR934X This enables support for the SPI controller present on the Qualcomm Atheros AR934X/QCA95XX SoCs. +config SPI_ATCSPI200 + tristate "Andes ATCSPI200 SPI controller" + depends on ARCH_ANDES + help + SPI driver for Andes ATCSPI200 SPI controller. + ATCSPI200 controller supports DMA and PIO modes. When DMA + is not available, the driver automatically falls back to + PIO mode. + config SPI_ATH79 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" depends on ATH79 || COMPILE_TEST @@ -204,6 +213,17 @@ config SPI_AXI_SPI_ENGINE It is part of the SPI Engine framework that is used in some Analog Devices reference designs for FPGAs. +config SPI_AXIADO + tristate "Axiado DB-H SPI controller" + depends on SPI_MEM + depends on ARCH_AXIADO || COMPILE_TEST + help + Enable support for the SPI controller present on Axiado AX3000 SoCs. + + The implementation supports host-only mode and does not provide target + functionality. It is intended for use cases where the SoC acts as the SPI + host, communicating with peripheral devices such as flash memory. + config SPI_BCM2835 tristate "BCM2835 SPI controller" depends on GPIOLIB @@ -365,33 +385,6 @@ config SPI_DW_MMIO tristate "Memory-mapped io interface driver for DW SPI core" depends on HAS_IOMEM -config SPI_DW_BT1 - tristate "Baikal-T1 SPI driver for DW SPI core" - depends on MIPS_BAIKAL_T1 || COMPILE_TEST - select MULTIPLEXER - help - Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI - controllers. Two of them are pretty much normal: with IRQ, DMA, - FIFOs of 64 words depth, 4x CSs, but the third one as being a - part of the Baikal-T1 System Boot Controller has got a very - limited resources: no IRQ, no DMA, only a single native - chip-select and Tx/Rx FIFO with just 8 words depth available. - The later one is normally connected to an external SPI-nor flash - of 128Mb (in general can be of bigger size). - -config SPI_DW_BT1_DIRMAP - bool "Directly mapped Baikal-T1 Boot SPI flash support" - depends on SPI_DW_BT1 - help - Directly mapped SPI flash memory is an interface specific to the - Baikal-T1 System Boot Controller. It is a 16MB MMIO region, which - can be used to access a peripheral memory device just by - reading/writing data from/to it. Note that the system APB bus - will stall during each IO from/to the dirmap region until the - operation is finished. So try not to use it concurrently with - time-critical tasks (like the SPI memory operations implemented - in this driver). - endif config SPI_DLN2 @@ -481,6 +474,16 @@ config SPI_NXP_FLEXSPI This controller does not support generic SPI messages and only supports the high-level SPI memory interface. +config SPI_NXP_XSPI + tristate "NXP xSPI controller" + depends on ARCH_MXC || COMPILE_TEST + depends on HAS_IOMEM + help + This enables support for the xSPI controller. Up to two devices + can be connected to one host. + This controller does not support generic SPI messages and only + supports the high-level SPI memory interface. + config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GPIOLIB || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 863b628ff1ec..9d36190a9884 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -26,12 +26,14 @@ obj-$(CONFIG_SPI_APPLE) += spi-apple.o obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o obj-$(CONFIG_SPI_ASPEED_SMC) += spi-aspeed-smc.o +obj-$(CONFIG_SPI_ATCSPI200) += spi-atcspi200.o obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o obj-$(CONFIG_SPI_ATH79) += spi-ath79.o obj-$(CONFIG_SPI_AU1550) += spi-au1550.o obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o +obj-$(CONFIG_SPI_AXIADO) += spi-axiado.o obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o @@ -52,7 +54,6 @@ obj-$(CONFIG_SPI_DLN2) += spi-dln2.o obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o spi-dw-y := spi-dw-core.o spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o -obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o @@ -102,6 +103,7 @@ obj-$(CONFIG_SPI_WPCM_FIU) += spi-wpcm-fiu.o obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o +obj-$(CONFIG_SPI_NXP_XSPI) += spi-nxp-xspi.o obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index d7a3d85d00c2..aaf7f4c46b22 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -1382,7 +1382,6 @@ static int atmel_qspi_probe(struct platform_device *pdev) ctrl->bus_num = -1; ctrl->mem_ops = &atmel_qspi_mem_ops; ctrl->num_chipselect = 1; - ctrl->dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, ctrl); /* Map the registers */ diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c index 70327aebc26b..7b6c09f91fef 100644 --- a/drivers/spi/spi-airoha-snfi.c +++ b/drivers/spi/spi-airoha-snfi.c @@ -1124,7 +1124,6 @@ static int airoha_snand_probe(struct platform_device *pdev) ctrl->bits_per_word_mask = SPI_BPW_MASK(8); ctrl->mode_bits = SPI_RX_DUAL; ctrl->setup = airoha_snand_setup; - device_set_node(&ctrl->dev, dev_fwnode(dev)); err = airoha_snand_nfi_init(as_ctrl); if (err) diff --git a/drivers/spi/spi-altera-platform.c b/drivers/spi/spi-altera-platform.c index e163774fd65b..fc81de2610ef 100644 --- a/drivers/spi/spi-altera-platform.c +++ b/drivers/spi/spi-altera-platform.c @@ -67,8 +67,6 @@ static int altera_spi_probe(struct platform_device *pdev) host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); } - host->dev.of_node = pdev->dev.of_node; - hw = spi_controller_get_devdata(host); hw->dev = &pdev->dev; diff --git a/drivers/spi/spi-amlogic-spifc-a1.c b/drivers/spi/spi-amlogic-spifc-a1.c index eb503790017b..7ee4c92e6e09 100644 --- a/drivers/spi/spi-amlogic-spifc-a1.c +++ b/drivers/spi/spi-amlogic-spifc-a1.c @@ -358,7 +358,6 @@ static int amlogic_spifc_a1_probe(struct platform_device *pdev) return ret; ctrl->num_chipselect = 1; - ctrl->dev.of_node = pdev->dev.of_node; ctrl->bits_per_word_mask = SPI_BPW_MASK(8); ctrl->auto_runtime_pm = true; ctrl->mem_ops = &amlogic_spifc_a1_mem_ops; diff --git a/drivers/spi/spi-amlogic-spisg.c b/drivers/spi/spi-amlogic-spisg.c index bcd7ec291ad0..1509df2b17ae 100644 --- a/drivers/spi/spi-amlogic-spisg.c +++ b/drivers/spi/spi-amlogic-spisg.c @@ -781,7 +781,6 @@ static int aml_spisg_probe(struct platform_device *pdev) pm_runtime_resume_and_get(&spisg->pdev->dev); ctlr->num_chipselect = 4; - ctlr->dev.of_node = pdev->dev.of_node; ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST | SPI_3WIRE | SPI_TX_QUAD | SPI_RX_QUAD; ctlr->max_speed_hz = 1000 * 1000 * 100; diff --git a/drivers/spi/spi-apple.c b/drivers/spi/spi-apple.c index 2fee7057ecc9..61eefb08d2a7 100644 --- a/drivers/spi/spi-apple.c +++ b/drivers/spi/spi-apple.c @@ -485,7 +485,6 @@ static int apple_spi_probe(struct platform_device *pdev) if (ret) return dev_err_probe(&pdev->dev, ret, "Unable to bind to interrupt\n"); - ctlr->dev.of_node = pdev->dev.of_node; ctlr->bus_num = pdev->id; ctlr->num_chipselect = 1; ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST; diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c index 86c54fff9d6e..2210186feab8 100644 --- a/drivers/spi/spi-ar934x.c +++ b/drivers/spi/spi-ar934x.c @@ -195,7 +195,6 @@ static int ar934x_spi_probe(struct platform_device *pdev) ctlr->transfer_one_message = ar934x_spi_transfer_one_message; ctlr->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); - ctlr->dev.of_node = pdev->dev.of_node; ctlr->num_chipselect = 3; dev_set_drvdata(&pdev->dev, ctlr); diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 02c1e625742d..78248729d3e9 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -813,7 +813,6 @@ MODULE_DEVICE_TABLE(of, a3700_spi_dt_ids); static int a3700_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *of_node = dev->of_node; struct spi_controller *host; struct a3700_spi *spi; u32 num_cs = 0; @@ -826,14 +825,13 @@ static int a3700_spi_probe(struct platform_device *pdev) goto out; } - if (of_property_read_u32(of_node, "num-cs", &num_cs)) { + if (of_property_read_u32(dev->of_node, "num-cs", &num_cs)) { dev_err(dev, "could not find num-cs\n"); ret = -ENXIO; goto error; } host->bus_num = pdev->id; - host->dev.of_node = of_node; host->mode_bits = SPI_MODE_3; host->num_chipselect = num_cs; host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32); diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c index db3e096f2eb0..9c286e534bf0 100644 --- a/drivers/spi/spi-aspeed-smc.c +++ b/drivers/spi/spi-aspeed-smc.c @@ -48,6 +48,8 @@ /* CEx Address Decoding Range Register */ #define CE0_SEGMENT_ADDR_REG 0x30 +#define FULL_DUPLEX_RX_DATA 0x1e4 + /* CEx Read timing compensation register */ #define CE0_TIMING_COMPENSATION_REG 0x94 @@ -81,6 +83,7 @@ struct aspeed_spi_data { u32 hclk_mask; u32 hdiv_max; u32 min_window_size; + bool full_duplex; phys_addr_t (*segment_start)(struct aspeed_spi *aspi, u32 reg); phys_addr_t (*segment_end)(struct aspeed_spi *aspi, u32 reg); @@ -105,6 +108,7 @@ struct aspeed_spi { struct clk *clk; u32 clk_freq; + u8 cs_change; struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS]; }; @@ -280,7 +284,8 @@ stop_user: } /* support for 1-1-1, 1-1-2 or 1-1-4 */ -static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) +static bool aspeed_spi_supports_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) { if (op->cmd.buswidth > 1) return false; @@ -305,7 +310,8 @@ static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op static const struct aspeed_spi_data ast2400_spi_data; -static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +static int do_aspeed_spi_exec_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) { struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller); struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)]; @@ -367,11 +373,12 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o return ret; } -static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +static int aspeed_spi_exec_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) { int ret; - ret = do_aspeed_spi_exec_op(mem, op); + ret = do_aspeed_spi_exec_mem_op(mem, op); if (ret) dev_err(&mem->spi->dev, "operation failed: %d\n", ret); return ret; @@ -773,8 +780,8 @@ static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc, } static const struct spi_controller_mem_ops aspeed_spi_mem_ops = { - .supports_op = aspeed_spi_supports_op, - .exec_op = aspeed_spi_exec_op, + .supports_op = aspeed_spi_supports_mem_op, + .exec_op = aspeed_spi_exec_mem_op, .get_name = aspeed_spi_get_name, .dirmap_create = aspeed_spi_dirmap_create, .dirmap_read = aspeed_spi_dirmap_read, @@ -843,6 +850,110 @@ static void aspeed_spi_enable(struct aspeed_spi *aspi, bool enable) aspeed_spi_chip_enable(aspi, cs, enable); } +static int aspeed_spi_user_prepare_msg(struct spi_controller *ctlr, + struct spi_message *msg) +{ + struct aspeed_spi *aspi = + (struct aspeed_spi *)spi_controller_get_devdata(ctlr); + const struct aspeed_spi_data *data = aspi->data; + struct spi_device *spi = msg->spi; + u32 cs = spi_get_chipselect(spi, 0); + struct aspeed_spi_chip *chip = &aspi->chips[cs]; + u32 ctrl_val; + u32 clk_div = data->get_clk_div(chip, spi->max_speed_hz); + + ctrl_val = chip->ctl_val[ASPEED_SPI_BASE]; + ctrl_val &= ~CTRL_IO_MODE_MASK & data->hclk_mask; + ctrl_val |= clk_div; + chip->ctl_val[ASPEED_SPI_BASE] = ctrl_val; + + if (aspi->cs_change == 0) + aspeed_spi_start_user(chip); + + return 0; +} + +static int aspeed_spi_user_unprepare_msg(struct spi_controller *ctlr, + struct spi_message *msg) +{ + struct aspeed_spi *aspi = + (struct aspeed_spi *)spi_controller_get_devdata(ctlr); + struct spi_device *spi = msg->spi; + u32 cs = spi_get_chipselect(spi, 0); + struct aspeed_spi_chip *chip = &aspi->chips[cs]; + + if (aspi->cs_change == 0) + aspeed_spi_stop_user(chip); + + return 0; +} + +static void aspeed_spi_user_transfer_tx(struct aspeed_spi *aspi, + struct spi_device *spi, + const u8 *tx_buf, u8 *rx_buf, + void *dst, u32 len) +{ + const struct aspeed_spi_data *data = aspi->data; + bool full_duplex_transfer = data->full_duplex && tx_buf == rx_buf; + u32 i; + + if (full_duplex_transfer && + !!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | + SPI_RX_DUAL | SPI_RX_QUAD))) { + dev_err(aspi->dev, + "full duplex is only supported for single IO mode\n"); + return; + } + + for (i = 0; i < len; i++) { + writeb(tx_buf[i], dst); + if (full_duplex_transfer) + rx_buf[i] = readb(aspi->regs + FULL_DUPLEX_RX_DATA); + } +} + +static int aspeed_spi_user_transfer(struct spi_controller *ctlr, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + struct aspeed_spi *aspi = + (struct aspeed_spi *)spi_controller_get_devdata(ctlr); + u32 cs = spi_get_chipselect(spi, 0); + struct aspeed_spi_chip *chip = &aspi->chips[cs]; + void __iomem *ahb_base = aspi->chips[cs].ahb_base; + const u8 *tx_buf = xfer->tx_buf; + u8 *rx_buf = xfer->rx_buf; + + dev_dbg(aspi->dev, + "[cs%d] xfer: width %d, len %u, tx %p, rx %p\n", + cs, xfer->bits_per_word, xfer->len, + tx_buf, rx_buf); + + if (tx_buf) { + if (spi->mode & SPI_TX_DUAL) + aspeed_spi_set_io_mode(chip, CTRL_IO_DUAL_DATA); + else if (spi->mode & SPI_TX_QUAD) + aspeed_spi_set_io_mode(chip, CTRL_IO_QUAD_DATA); + + aspeed_spi_user_transfer_tx(aspi, spi, tx_buf, rx_buf, + (void *)ahb_base, xfer->len); + } + + if (rx_buf && rx_buf != tx_buf) { + if (spi->mode & SPI_RX_DUAL) + aspeed_spi_set_io_mode(chip, CTRL_IO_DUAL_DATA); + else if (spi->mode & SPI_RX_QUAD) + aspeed_spi_set_io_mode(chip, CTRL_IO_QUAD_DATA); + + ioread8_rep(ahb_base, rx_buf, xfer->len); + } + + xfer->error = 0; + aspi->cs_change = xfer->cs_change; + + return 0; +} + static int aspeed_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -898,7 +1009,9 @@ static int aspeed_spi_probe(struct platform_device *pdev) ctlr->setup = aspeed_spi_setup; ctlr->cleanup = aspeed_spi_cleanup; ctlr->num_chipselect = of_get_available_child_count(dev->of_node); - ctlr->dev.of_node = dev->of_node; + ctlr->prepare_message = aspeed_spi_user_prepare_msg; + ctlr->unprepare_message = aspeed_spi_user_unprepare_msg; + ctlr->transfer_one = aspeed_spi_user_transfer; aspi->num_cs = ctlr->num_chipselect; @@ -1455,6 +1568,7 @@ static const struct aspeed_spi_data ast2400_fmc_data = { .hclk_mask = 0xfffff0ff, .hdiv_max = 1, .min_window_size = 0x800000, + .full_duplex = false, .calibrate = aspeed_spi_calibrate, .get_clk_div = aspeed_get_clk_div_ast2400, .segment_start = aspeed_spi_segment_start, @@ -1471,6 +1585,7 @@ static const struct aspeed_spi_data ast2400_spi_data = { .timing = 0x14, .hclk_mask = 0xfffff0ff, .hdiv_max = 1, + .full_duplex = false, .get_clk_div = aspeed_get_clk_div_ast2400, .calibrate = aspeed_spi_calibrate, /* No segment registers */ @@ -1485,6 +1600,7 @@ static const struct aspeed_spi_data ast2500_fmc_data = { .hclk_mask = 0xffffd0ff, .hdiv_max = 1, .min_window_size = 0x800000, + .full_duplex = false, .get_clk_div = aspeed_get_clk_div_ast2500, .calibrate = aspeed_spi_calibrate, .segment_start = aspeed_spi_segment_start, @@ -1502,6 +1618,7 @@ static const struct aspeed_spi_data ast2500_spi_data = { .hclk_mask = 0xffffd0ff, .hdiv_max = 1, .min_window_size = 0x800000, + .full_duplex = false, .get_clk_div = aspeed_get_clk_div_ast2500, .calibrate = aspeed_spi_calibrate, .segment_start = aspeed_spi_segment_start, @@ -1520,6 +1637,7 @@ static const struct aspeed_spi_data ast2600_fmc_data = { .hclk_mask = 0xf0fff0ff, .hdiv_max = 2, .min_window_size = 0x200000, + .full_duplex = false, .get_clk_div = aspeed_get_clk_div_ast2600, .calibrate = aspeed_spi_ast2600_calibrate, .segment_start = aspeed_spi_segment_ast2600_start, @@ -1538,6 +1656,7 @@ static const struct aspeed_spi_data ast2600_spi_data = { .hclk_mask = 0xf0fff0ff, .hdiv_max = 2, .min_window_size = 0x200000, + .full_duplex = false, .get_clk_div = aspeed_get_clk_div_ast2600, .calibrate = aspeed_spi_ast2600_calibrate, .segment_start = aspeed_spi_segment_ast2600_start, @@ -1556,6 +1675,7 @@ static const struct aspeed_spi_data ast2700_fmc_data = { .hclk_mask = 0xf0fff0ff, .hdiv_max = 2, .min_window_size = 0x10000, + .full_duplex = true, .get_clk_div = aspeed_get_clk_div_ast2600, .calibrate = aspeed_spi_ast2600_calibrate, .segment_start = aspeed_spi_segment_ast2700_start, @@ -1573,6 +1693,7 @@ static const struct aspeed_spi_data ast2700_spi_data = { .hclk_mask = 0xf0fff0ff, .hdiv_max = 2, .min_window_size = 0x10000, + .full_duplex = true, .get_clk_div = aspeed_get_clk_div_ast2600, .calibrate = aspeed_spi_ast2600_calibrate, .segment_start = aspeed_spi_segment_ast2700_start, diff --git a/drivers/spi/spi-atcspi200.c b/drivers/spi/spi-atcspi200.c new file mode 100644 index 000000000000..60a37ff5c6f5 --- /dev/null +++ b/drivers/spi/spi-atcspi200.c @@ -0,0 +1,679 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for Andes ATCSPI200 SPI Controller + * + * Copyright (C) 2025 Andes Technology Corporation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Register definitions */ +#define ATCSPI_TRANS_FMT 0x10 /* SPI transfer format register */ +#define ATCSPI_TRANS_CTRL 0x20 /* SPI transfer control register */ +#define ATCSPI_CMD 0x24 /* SPI command register */ +#define ATCSPI_ADDR 0x28 /* SPI address register */ +#define ATCSPI_DATA 0x2C /* SPI data register */ +#define ATCSPI_CTRL 0x30 /* SPI control register */ +#define ATCSPI_STATUS 0x34 /* SPI status register */ +#define ATCSPI_TIMING 0x40 /* SPI interface timing register */ +#define ATCSPI_CONFIG 0x7C /* SPI configuration register */ + +/* Transfer format register */ +#define TRANS_FMT_CPHA BIT(0) +#define TRANS_FMT_CPOL BIT(1) +#define TRANS_FMT_DATA_MERGE_EN BIT(7) +#define TRANS_FMT_DATA_LEN_MASK GENMASK(12, 8) +#define TRANS_FMT_ADDR_LEN_MASK GENMASK(17, 16) +#define TRANS_FMT_DATA_LEN(x) FIELD_PREP(TRANS_FMT_DATA_LEN_MASK, (x) - 1) +#define TRANS_FMT_ADDR_LEN(x) FIELD_PREP(TRANS_FMT_ADDR_LEN_MASK, (x) - 1) + +/* Transfer control register */ +#define TRANS_MODE_MASK GENMASK(27, 24) +#define TRANS_MODE_W_ONLY FIELD_PREP(TRANS_MODE_MASK, 1) +#define TRANS_MODE_R_ONLY FIELD_PREP(TRANS_MODE_MASK, 2) +#define TRANS_MODE_NONE_DATA FIELD_PREP(TRANS_MODE_MASK, 7) +#define TRANS_MODE_DMY_READ FIELD_PREP(TRANS_MODE_MASK, 9) +#define TRANS_FIELD_DECNZ(m, x) ((x) ? FIELD_PREP(m, (x) - 1) : 0) +#define TRANS_RD_TRANS_CNT(x) TRANS_FIELD_DECNZ(GENMASK(8, 0), x) +#define TRANS_DUMMY_CNT(x) TRANS_FIELD_DECNZ(GENMASK(10, 9), x) +#define TRANS_WR_TRANS_CNT(x) TRANS_FIELD_DECNZ(GENMASK(20, 12), x) +#define TRANS_DUAL_QUAD(x) FIELD_PREP(GENMASK(23, 22), (x)) +#define TRANS_ADDR_FMT BIT(28) +#define TRANS_ADDR_EN BIT(29) +#define TRANS_CMD_EN BIT(30) + +/* Control register */ +#define CTRL_SPI_RST BIT(0) +#define CTRL_RX_FIFO_RST BIT(1) +#define CTRL_TX_FIFO_RST BIT(2) +#define CTRL_RX_DMA_EN BIT(3) +#define CTRL_TX_DMA_EN BIT(4) + +/* Status register */ +#define ATCSPI_ACTIVE BIT(0) +#define ATCSPI_RX_EMPTY BIT(14) +#define ATCSPI_TX_FULL BIT(23) + +/* Interface timing setting */ +#define TIMING_SCLK_DIV_MASK GENMASK(7, 0) +#define TIMING_SCLK_DIV_MAX 0xFE + +/* Configuration register */ +#define RXFIFO_SIZE(x) FIELD_GET(GENMASK(3, 0), (x)) +#define TXFIFO_SIZE(x) FIELD_GET(GENMASK(7, 4), (x)) + +/* driver configurations */ +#define ATCSPI_MAX_TRANS_LEN 512 +#define ATCSPI_MAX_SPEED_HZ 50000000 +#define ATCSPI_RDY_TIMEOUT_US 1000000 +#define ATCSPI_XFER_TIMEOUT(n) ((n) * 10) +#define ATCSPI_MAX_CS_NUM 1 +#define ATCSPI_DMA_THRESHOLD 256 +#define ATCSPI_BITS_PER_UINT 8 +#define ATCSPI_DATA_MERGE_EN 1 +#define ATCSPI_DMA_SUPPORT 1 + +/** + * struct atcspi_dev - Andes ATCSPI200 SPI controller private data + * @host: Pointer to the SPI controller structure. + * @mutex_lock: A mutex to protect concurrent access to the controller. + * @dma_completion: A completion to signal the end of a DMA transfer. + * @dev: Pointer to the device structure. + * @regmap: Register map for accessing controller registers. + * @clk: Pointer to the controller's functional clock. + * @dma_addr: The physical address of the SPI data register for DMA. + * @clk_rate: The cached frequency of the functional clock. + * @sclk_rate: The target frequency for the SPI clock (SCLK). + * @txfifo_size: The size of the transmit FIFO in bytes. + * @rxfifo_size: The size of the receive FIFO in bytes. + * @data_merge: A flag indicating if the data merge mode is enabled for + * the current transfer. + * @use_dma: Enable DMA mode if ATCSPI_DMA_SUPPORT is set and DMA is + * successfully configured. + */ +struct atcspi_dev { + struct spi_controller *host; + struct mutex mutex_lock; + struct completion dma_completion; + struct device *dev; + struct regmap *regmap; + struct clk *clk; + dma_addr_t dma_addr; + unsigned int clk_rate; + unsigned int sclk_rate; + unsigned int txfifo_size; + unsigned int rxfifo_size; + bool data_merge; + bool use_dma; +}; + +static int atcspi_wait_fifo_ready(struct atcspi_dev *spi, + enum spi_mem_data_dir dir) +{ + unsigned int val; + unsigned int mask; + int ret; + + mask = (dir == SPI_MEM_DATA_OUT) ? ATCSPI_TX_FULL : ATCSPI_RX_EMPTY; + ret = regmap_read_poll_timeout(spi->regmap, + ATCSPI_STATUS, + val, + !(val & mask), + 0, + ATCSPI_RDY_TIMEOUT_US); + if (ret) + dev_info(spi->dev, "Timed out waiting for FIFO ready\n"); + + return ret; +} + +static int atcspi_xfer_data_poll(struct atcspi_dev *spi, + const struct spi_mem_op *op) +{ + void *rx_buf = op->data.buf.in; + const void *tx_buf = op->data.buf.out; + unsigned int val; + int trans_bytes = op->data.nbytes; + int num_byte; + int ret = 0; + + num_byte = spi->data_merge ? 4 : 1; + while (trans_bytes) { + if (op->data.dir == SPI_MEM_DATA_OUT) { + ret = atcspi_wait_fifo_ready(spi, SPI_MEM_DATA_OUT); + if (ret) + return ret; + + if (spi->data_merge) + val = *(unsigned int *)tx_buf; + else + val = *(unsigned char *)tx_buf; + regmap_write(spi->regmap, ATCSPI_DATA, val); + tx_buf = (unsigned char *)tx_buf + num_byte; + } else { + ret = atcspi_wait_fifo_ready(spi, SPI_MEM_DATA_IN); + if (ret) + return ret; + + regmap_read(spi->regmap, ATCSPI_DATA, &val); + if (spi->data_merge) + *(unsigned int *)rx_buf = val; + else + *(unsigned char *)rx_buf = (unsigned char)val; + rx_buf = (unsigned char *)rx_buf + num_byte; + } + trans_bytes -= num_byte; + } + + return ret; +} + +static void atcspi_set_trans_ctl(struct atcspi_dev *spi, + const struct spi_mem_op *op) +{ + unsigned int tc = 0; + + if (op->cmd.nbytes) + tc |= TRANS_CMD_EN; + if (op->addr.nbytes) + tc |= TRANS_ADDR_EN; + if (op->addr.buswidth > 1) + tc |= TRANS_ADDR_FMT; + if (op->data.nbytes) { + tc |= TRANS_DUAL_QUAD(ffs(op->data.buswidth) - 1); + if (op->data.dir == SPI_MEM_DATA_IN) { + if (op->dummy.nbytes) + tc |= TRANS_MODE_DMY_READ | + TRANS_DUMMY_CNT(op->dummy.nbytes); + else + tc |= TRANS_MODE_R_ONLY; + tc |= TRANS_RD_TRANS_CNT(op->data.nbytes); + } else { + tc |= TRANS_MODE_W_ONLY | + TRANS_WR_TRANS_CNT(op->data.nbytes); + } + } else { + tc |= TRANS_MODE_NONE_DATA; + } + regmap_write(spi->regmap, ATCSPI_TRANS_CTRL, tc); +} + +static void atcspi_set_trans_fmt(struct atcspi_dev *spi, + const struct spi_mem_op *op) +{ + unsigned int val; + + regmap_read(spi->regmap, ATCSPI_TRANS_FMT, &val); + if (op->data.nbytes) { + if (ATCSPI_DATA_MERGE_EN && ATCSPI_BITS_PER_UINT == 8 && + !(op->data.nbytes % 4)) { + val |= TRANS_FMT_DATA_MERGE_EN; + spi->data_merge = true; + } else { + val &= ~TRANS_FMT_DATA_MERGE_EN; + spi->data_merge = false; + } + } + + val = (val & ~TRANS_FMT_ADDR_LEN_MASK) | + TRANS_FMT_ADDR_LEN(op->addr.nbytes); + regmap_write(spi->regmap, ATCSPI_TRANS_FMT, val); +} + +static void atcspi_prepare_trans(struct atcspi_dev *spi, + const struct spi_mem_op *op) +{ + atcspi_set_trans_fmt(spi, op); + atcspi_set_trans_ctl(spi, op); + if (op->addr.nbytes) + regmap_write(spi->regmap, ATCSPI_ADDR, op->addr.val); + regmap_write(spi->regmap, ATCSPI_CMD, op->cmd.opcode); +} + +static int atcspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct atcspi_dev *spi; + + spi = spi_controller_get_devdata(mem->spi->controller); + op->data.nbytes = min(op->data.nbytes, ATCSPI_MAX_TRANS_LEN); + + /* DMA needs to be aligned to 4 byte */ + if (spi->use_dma && op->data.nbytes >= ATCSPI_DMA_THRESHOLD) + op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 4); + + return 0; +} + +static int atcspi_dma_config(struct atcspi_dev *spi, bool is_rx) +{ + struct dma_slave_config conf = { 0 }; + struct dma_chan *chan; + + if (is_rx) { + chan = spi->host->dma_rx; + conf.direction = DMA_DEV_TO_MEM; + conf.src_addr = spi->dma_addr; + } else { + chan = spi->host->dma_tx; + conf.direction = DMA_MEM_TO_DEV; + conf.dst_addr = spi->dma_addr; + } + conf.dst_maxburst = spi->rxfifo_size / 2; + conf.src_maxburst = spi->txfifo_size / 2; + + if (spi->data_merge) { + conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + } else { + conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + } + + return dmaengine_slave_config(chan, &conf); +} + +static void atcspi_dma_callback(void *arg) +{ + struct completion *dma_completion = arg; + + complete(dma_completion); +} + +static int atcspi_dma_trans(struct atcspi_dev *spi, + const struct spi_mem_op *op) +{ + struct dma_async_tx_descriptor *desc; + struct dma_chan *dma_ch; + struct sg_table sgt; + enum dma_transfer_direction dma_dir; + dma_cookie_t cookie; + unsigned int ctrl; + int timeout; + int ret; + + regmap_read(spi->regmap, ATCSPI_CTRL, &ctrl); + ctrl |= CTRL_TX_DMA_EN | CTRL_RX_DMA_EN; + regmap_write(spi->regmap, ATCSPI_CTRL, ctrl); + if (op->data.dir == SPI_MEM_DATA_IN) { + ret = atcspi_dma_config(spi, TRUE); + dma_dir = DMA_DEV_TO_MEM; + dma_ch = spi->host->dma_rx; + } else { + ret = atcspi_dma_config(spi, FALSE); + dma_dir = DMA_MEM_TO_DEV; + dma_ch = spi->host->dma_tx; + } + if (ret) + return ret; + + ret = spi_controller_dma_map_mem_op_data(spi->host, op, &sgt); + if (ret) + return ret; + + desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents, dma_dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + ret = -ENOMEM; + goto exit_unmap; + } + + reinit_completion(&spi->dma_completion); + desc->callback = atcspi_dma_callback; + desc->callback_param = &spi->dma_completion; + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) + goto exit_unmap; + + dma_async_issue_pending(dma_ch); + timeout = msecs_to_jiffies(ATCSPI_XFER_TIMEOUT(op->data.nbytes)); + if (!wait_for_completion_timeout(&spi->dma_completion, timeout)) { + ret = -ETIMEDOUT; + dmaengine_terminate_all(dma_ch); + } + +exit_unmap: + spi_controller_dma_unmap_mem_op_data(spi->host, op, &sgt); + + return ret; +} + +static int atcspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct spi_device *spi_dev = mem->spi; + struct atcspi_dev *spi; + unsigned int val; + int ret; + + spi = spi_controller_get_devdata(spi_dev->controller); + mutex_lock(&spi->mutex_lock); + atcspi_prepare_trans(spi, op); + if (op->data.nbytes) { + if (spi->use_dma && op->data.nbytes >= ATCSPI_DMA_THRESHOLD) + ret = atcspi_dma_trans(spi, op); + else + ret = atcspi_xfer_data_poll(spi, op); + if (ret) { + dev_info(spi->dev, "SPI transmission failed\n"); + goto exec_mem_exit; + } + } + + ret = regmap_read_poll_timeout(spi->regmap, + ATCSPI_STATUS, + val, + !(val & ATCSPI_ACTIVE), + 0, + ATCSPI_RDY_TIMEOUT_US); + if (ret) + dev_info(spi->dev, "Timed out waiting for ATCSPI_ACTIVE\n"); + +exec_mem_exit: + mutex_unlock(&spi->mutex_lock); + + return ret; +} + +static const struct spi_controller_mem_ops atcspi_mem_ops = { + .exec_op = atcspi_exec_mem_op, + .adjust_op_size = atcspi_adjust_op_size, +}; + +static int atcspi_setup(struct atcspi_dev *spi) +{ + unsigned int ctrl_val; + unsigned int val; + int actual_spi_sclk_f; + int ret; + unsigned char div; + + ctrl_val = CTRL_TX_FIFO_RST | CTRL_RX_FIFO_RST | CTRL_SPI_RST; + regmap_write(spi->regmap, ATCSPI_CTRL, ctrl_val); + ret = regmap_read_poll_timeout(spi->regmap, + ATCSPI_CTRL, + val, + !(val & ctrl_val), + 0, + ATCSPI_RDY_TIMEOUT_US); + if (ret) + return dev_err_probe(spi->dev, ret, + "Timed out waiting for ATCSPI_CTRL\n"); + + val = TRANS_FMT_DATA_LEN(ATCSPI_BITS_PER_UINT) | + TRANS_FMT_CPHA | TRANS_FMT_CPOL; + regmap_write(spi->regmap, ATCSPI_TRANS_FMT, val); + + regmap_read(spi->regmap, ATCSPI_CONFIG, &val); + spi->txfifo_size = BIT(TXFIFO_SIZE(val) + 1); + spi->rxfifo_size = BIT(RXFIFO_SIZE(val) + 1); + + regmap_read(spi->regmap, ATCSPI_TIMING, &val); + val &= ~TIMING_SCLK_DIV_MASK; + + /* + * The SCLK_DIV value 0xFF is special and indicates that the + * SCLK rate should be the same as the SPI clock rate. + */ + if (spi->sclk_rate >= spi->clk_rate) { + div = TIMING_SCLK_DIV_MASK; + } else { + /* + * The divider value is determined as follows: + * 1. If the divider can generate the exact target frequency, + * use that setting. + * 2. If an exact match is not possible, select the closest + * available setting that is lower than the target frequency. + */ + div = (spi->clk_rate + (spi->sclk_rate * 2 - 1)) / + (spi->sclk_rate * 2) - 1; + + /* Check if the actual SPI clock is lower than the target */ + actual_spi_sclk_f = spi->clk_rate / ((div + 1) * 2); + if (actual_spi_sclk_f < spi->sclk_rate) + dev_info(spi->dev, + "Clock adjusted %d to %d due to divider limitation", + spi->sclk_rate, actual_spi_sclk_f); + + if (div > TIMING_SCLK_DIV_MAX) + return dev_err_probe(spi->dev, -EINVAL, + "Unsupported SPI clock %d\n", + spi->sclk_rate); + } + val |= div; + regmap_write(spi->regmap, ATCSPI_TIMING, val); + + return ret; +} + +static int atcspi_init_resources(struct platform_device *pdev, + struct atcspi_dev *spi, + struct resource **mem_res) +{ + void __iomem *base; + const struct regmap_config atcspi_regmap_cfg = { + .name = "atcspi", + .reg_bits = 32, + .val_bits = 32, + .cache_type = REGCACHE_NONE, + .reg_stride = 4, + .pad_bits = 0, + .max_register = ATCSPI_CONFIG + }; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, mem_res); + if (IS_ERR(base)) + return dev_err_probe(spi->dev, PTR_ERR(base), + "Failed to get ioremap resource\n"); + + spi->regmap = devm_regmap_init_mmio(spi->dev, base, + &atcspi_regmap_cfg); + if (IS_ERR(spi->regmap)) + return dev_err_probe(spi->dev, PTR_ERR(spi->regmap), + "Failed to init regmap\n"); + + spi->clk = devm_clk_get(spi->dev, NULL); + if (IS_ERR(spi->clk)) + return dev_err_probe(spi->dev, PTR_ERR(spi->clk), + "Failed to get SPI clock\n"); + + spi->sclk_rate = ATCSPI_MAX_SPEED_HZ; + return 0; +} + +static int atcspi_configure_dma(struct atcspi_dev *spi) +{ + struct dma_chan *dma_chan; + int ret = 0; + + dma_chan = devm_dma_request_chan(spi->dev, "rx"); + if (IS_ERR(dma_chan)) { + ret = PTR_ERR(dma_chan); + goto err_exit; + } + spi->host->dma_rx = dma_chan; + + dma_chan = devm_dma_request_chan(spi->dev, "tx"); + if (IS_ERR(dma_chan)) { + ret = PTR_ERR(dma_chan); + goto free_rx; + } + spi->host->dma_tx = dma_chan; + init_completion(&spi->dma_completion); + + return ret; + +free_rx: + dma_release_channel(spi->host->dma_rx); + spi->host->dma_rx = NULL; +err_exit: + return ret; +} + +static int atcspi_enable_clk(struct atcspi_dev *spi) +{ + int ret; + + ret = clk_prepare_enable(spi->clk); + if (ret) + return dev_err_probe(spi->dev, ret, + "Failed to enable clock\n"); + + spi->clk_rate = clk_get_rate(spi->clk); + if (!spi->clk_rate) + return dev_err_probe(spi->dev, -EINVAL, + "Failed to get SPI clock rate\n"); + + return 0; +} + +static void atcspi_init_controller(struct platform_device *pdev, + struct atcspi_dev *spi, + struct spi_controller *host, + struct resource *mem_res) +{ + /* Get the physical address of the data register for DMA transfers. */ + spi->dma_addr = (dma_addr_t)(mem_res->start + ATCSPI_DATA); + + /* Initialize controller properties */ + host->bus_num = pdev->id; + host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_QUAD | SPI_TX_QUAD; + host->num_chipselect = ATCSPI_MAX_CS_NUM; + host->mem_ops = &atcspi_mem_ops; + host->max_speed_hz = spi->sclk_rate; +} + +static int atcspi_probe(struct platform_device *pdev) +{ + struct spi_controller *host; + struct atcspi_dev *spi; + struct resource *mem_res; + int ret; + + host = spi_alloc_host(&pdev->dev, sizeof(*spi)); + if (!host) + return -ENOMEM; + + spi = spi_controller_get_devdata(host); + spi->host = host; + spi->dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, host); + + ret = atcspi_init_resources(pdev, spi, &mem_res); + if (ret) + goto free_controller; + + ret = atcspi_enable_clk(spi); + if (ret) + goto free_controller; + + atcspi_init_controller(pdev, spi, host, mem_res); + + ret = atcspi_setup(spi); + if (ret) + goto disable_clk; + + ret = devm_spi_register_controller(&pdev->dev, host); + if (ret) { + dev_err_probe(spi->dev, ret, + "Failed to register SPI controller\n"); + goto disable_clk; + } + + spi->use_dma = false; + if (ATCSPI_DMA_SUPPORT) { + ret = atcspi_configure_dma(spi); + if (ret) + dev_info(spi->dev, + "Failed to init DMA, fallback to PIO mode\n"); + else + spi->use_dma = true; + } + mutex_init(&spi->mutex_lock); + + return 0; + +disable_clk: + clk_disable_unprepare(spi->clk); + +free_controller: + spi_controller_put(host); + return ret; +} + +static int atcspi_suspend(struct device *dev) +{ + struct spi_controller *host = dev_get_drvdata(dev); + struct atcspi_dev *spi = spi_controller_get_devdata(host); + + spi_controller_suspend(host); + + clk_disable_unprepare(spi->clk); + + return 0; +} + +static int atcspi_resume(struct device *dev) +{ + struct spi_controller *host = dev_get_drvdata(dev); + struct atcspi_dev *spi = spi_controller_get_devdata(host); + int ret; + + ret = clk_prepare_enable(spi->clk); + if (ret) + return ret; + + ret = atcspi_setup(spi); + if (ret) + goto disable_clk; + + ret = spi_controller_resume(host); + if (ret) + goto disable_clk; + + return ret; + +disable_clk: + clk_disable_unprepare(spi->clk); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(atcspi_pm_ops, atcspi_suspend, atcspi_resume); + +static const struct of_device_id atcspi_of_match[] = { + { .compatible = "andestech,qilai-spi", }, + { .compatible = "andestech,ae350-spi", }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, atcspi_of_match); + +static struct platform_driver atcspi_driver = { + .probe = atcspi_probe, + .driver = { + .name = "atcspi200", + .owner = THIS_MODULE, + .of_match_table = atcspi_of_match, + .pm = pm_sleep_ptr(&atcspi_pm_ops) + } +}; +module_platform_driver(atcspi_driver); + +MODULE_AUTHOR("CL Wang "); +MODULE_DESCRIPTION("Andes ATCSPI200 SPI controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 9a705a9fddd2..2f61e5b9943c 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -180,7 +180,6 @@ static int ath79_spi_probe(struct platform_device *pdev) } sp = spi_controller_get_devdata(host); - host->dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, sp); host->use_gpio_descriptors = true; diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 89977bff76d2..d71c0dbf1f38 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1536,7 +1536,6 @@ static int atmel_spi_probe(struct platform_device *pdev) host->use_gpio_descriptors = true; host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); - host->dev.of_node = pdev->dev.of_node; host->bus_num = pdev->id; host->num_chipselect = 4; host->setup = atmel_spi_setup; diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index e06f412190fd..c75e8da049f7 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -23,6 +23,9 @@ #include #include +#define SPI_ENGINE_REG_DATA_WIDTH 0x0C +#define SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK GENMASK(23, 16) +#define SPI_ENGINE_REG_DATA_WIDTH_MASK GENMASK(15, 0) #define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10 #define SPI_ENGINE_REG_RESET 0x40 @@ -75,6 +78,8 @@ #define SPI_ENGINE_CMD_REG_CLK_DIV 0x0 #define SPI_ENGINE_CMD_REG_CONFIG 0x1 #define SPI_ENGINE_CMD_REG_XFER_BITS 0x2 +#define SPI_ENGINE_CMD_REG_SDI_MASK 0x3 +#define SPI_ENGINE_CMD_REG_SDO_MASK 0x4 #define SPI_ENGINE_MISC_SYNC 0x0 #define SPI_ENGINE_MISC_SLEEP 0x1 @@ -105,6 +110,10 @@ #define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16 #define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16 +/* Extending SPI_MULTI_LANE_MODE values for optimizing messages. */ +#define SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN -1 +#define SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING -2 + struct spi_engine_program { unsigned int length; uint16_t instructions[] __counted_by(length); @@ -142,6 +151,11 @@ struct spi_engine_offload { unsigned long flags; unsigned int offload_num; unsigned int spi_mode_config; + unsigned int multi_lane_mode; + u8 rx_primary_lane_mask; + u8 tx_primary_lane_mask; + u8 rx_all_lanes_mask; + u8 tx_all_lanes_mask; u8 bits_per_word; }; @@ -165,6 +179,25 @@ struct spi_engine { bool offload_requires_sync; }; +static void spi_engine_primary_lane_flag(struct spi_device *spi, + u8 *rx_lane_flags, u8 *tx_lane_flags) +{ + *rx_lane_flags = BIT(spi->rx_lane_map[0]); + *tx_lane_flags = BIT(spi->tx_lane_map[0]); +} + +static void spi_engine_all_lanes_flags(struct spi_device *spi, + u8 *rx_lane_flags, u8 *tx_lane_flags) +{ + int i; + + for (i = 0; i < spi->num_rx_lanes; i++) + *rx_lane_flags |= BIT(spi->rx_lane_map[i]); + + for (i = 0; i < spi->num_tx_lanes; i++) + *tx_lane_flags |= BIT(spi->tx_lane_map[i]); +} + static void spi_engine_program_add_cmd(struct spi_engine_program *p, bool dry, uint16_t cmd) { @@ -193,7 +226,7 @@ static unsigned int spi_engine_get_config(struct spi_device *spi) } static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, - struct spi_transfer *xfer) + struct spi_transfer *xfer, u32 num_lanes) { unsigned int len; @@ -204,6 +237,9 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, else len = xfer->len / 4; + if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) + len /= num_lanes; + while (len) { unsigned int n = min(len, 256U); unsigned int flags = 0; @@ -269,6 +305,7 @@ static int spi_engine_precompile_message(struct spi_message *msg) { unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz; struct spi_transfer *xfer; + int multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN; u8 min_bits_per_word = U8_MAX; u8 max_bits_per_word = 0; @@ -284,6 +321,24 @@ static int spi_engine_precompile_message(struct spi_message *msg) min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word); max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word); } + + if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM || + xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) { + switch (xfer->multi_lane_mode) { + case SPI_MULTI_LANE_MODE_SINGLE: + case SPI_MULTI_LANE_MODE_STRIPE: + break; + default: + /* Other modes, like mirror not supported */ + return -EINVAL; + } + + /* If all xfers have the same multi-lane mode, we can optimize. */ + if (multi_lane_mode == SPI_ENGINE_MULTI_BUS_MODE_UNKNOWN) + multi_lane_mode = xfer->multi_lane_mode; + else if (multi_lane_mode != xfer->multi_lane_mode) + multi_lane_mode = SPI_ENGINE_MULTI_BUS_MODE_CONFLICTING; + } } /* @@ -297,6 +352,14 @@ static int spi_engine_precompile_message(struct spi_message *msg) priv->bits_per_word = min_bits_per_word; else priv->bits_per_word = 0; + + priv->multi_lane_mode = multi_lane_mode; + spi_engine_primary_lane_flag(msg->spi, + &priv->rx_primary_lane_mask, + &priv->tx_primary_lane_mask); + spi_engine_all_lanes_flags(msg->spi, + &priv->rx_all_lanes_mask, + &priv->tx_all_lanes_mask); } return 0; @@ -310,6 +373,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, struct spi_engine_offload *priv; struct spi_transfer *xfer; int clk_div, new_clk_div, inst_ns; + int prev_multi_lane_mode = SPI_MULTI_LANE_MODE_SINGLE; bool keep_cs = false; u8 bits_per_word = 0; @@ -334,6 +398,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, * in the same way. */ bits_per_word = priv->bits_per_word; + prev_multi_lane_mode = priv->multi_lane_mode; } else { spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, @@ -344,6 +409,28 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, spi_engine_gen_cs(p, dry, spi, !xfer->cs_off); list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->rx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM || + xfer->tx_buf || xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM) { + if (xfer->multi_lane_mode != prev_multi_lane_mode) { + u8 tx_lane_flags, rx_lane_flags; + + if (xfer->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) + spi_engine_all_lanes_flags(spi, &rx_lane_flags, + &tx_lane_flags); + else + spi_engine_primary_lane_flag(spi, &rx_lane_flags, + &tx_lane_flags); + + spi_engine_program_add_cmd(p, dry, + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, + rx_lane_flags)); + spi_engine_program_add_cmd(p, dry, + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, + tx_lane_flags)); + } + prev_multi_lane_mode = xfer->multi_lane_mode; + } + new_clk_div = host->max_speed_hz / xfer->effective_speed_hz; if (new_clk_div != clk_div) { clk_div = new_clk_div; @@ -360,7 +447,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, bits_per_word)); } - spi_engine_gen_xfer(p, dry, xfer); + spi_engine_gen_xfer(p, dry, xfer, spi->num_rx_lanes); spi_engine_gen_sleep(p, dry, spi_delay_to_ns(&xfer->delay, xfer), inst_ns, xfer->effective_speed_hz); @@ -394,6 +481,19 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, if (clk_div != 1) spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV, 0)); + + /* Restore single lane mode unless offload disable will restore it later. */ + if (prev_multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE && + (!msg->offload || priv->multi_lane_mode != SPI_MULTI_LANE_MODE_STRIPE)) { + u8 rx_lane_flags, tx_lane_flags; + + spi_engine_primary_lane_flag(spi, &rx_lane_flags, &tx_lane_flags); + + spi_engine_program_add_cmd(p, dry, + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, rx_lane_flags)); + spi_engine_program_add_cmd(p, dry, + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, tx_lane_flags)); + } } static void spi_engine_xfer_next(struct spi_message *msg, @@ -799,6 +899,19 @@ static int spi_engine_setup(struct spi_device *device) writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv), spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + if (host->num_data_lanes > 1) { + u8 rx_lane_flags, tx_lane_flags; + + spi_engine_primary_lane_flag(device, &rx_lane_flags, &tx_lane_flags); + + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, + rx_lane_flags), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, + tx_lane_flags), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + } + /* * In addition to setting the flags, we have to do a CS assert command * to make the new setting actually take effect. @@ -902,6 +1015,15 @@ static int spi_engine_trigger_enable(struct spi_offload *offload) priv->bits_per_word), spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) { + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, + priv->rx_all_lanes_mask), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, + priv->tx_all_lanes_mask), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + } + writel_relaxed(SPI_ENGINE_CMD_SYNC(1), spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); @@ -929,6 +1051,16 @@ static void spi_engine_trigger_disable(struct spi_offload *offload) reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE; writel_relaxed(reg, spi_engine->base + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); + + /* Restore single-lane mode. */ + if (priv->multi_lane_mode == SPI_MULTI_LANE_MODE_STRIPE) { + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDI_MASK, + priv->rx_primary_lane_mask), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_SDO_MASK, + priv->tx_primary_lane_mask), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + } } static struct dma_chan @@ -973,7 +1105,7 @@ static int spi_engine_probe(struct platform_device *pdev) { struct spi_engine *spi_engine; struct spi_controller *host; - unsigned int version; + unsigned int version, data_width_reg_val; int irq, ret; irq = platform_get_irq(pdev, 0); @@ -1042,7 +1174,7 @@ static int spi_engine_probe(struct platform_device *pdev) return PTR_ERR(spi_engine->base); version = readl(spi_engine->base + ADI_AXI_REG_VERSION); - if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) { + if (ADI_AXI_PCORE_VER_MAJOR(version) > 2) { dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n", ADI_AXI_PCORE_VER_MAJOR(version), ADI_AXI_PCORE_VER_MINOR(version), @@ -1050,6 +1182,8 @@ static int spi_engine_probe(struct platform_device *pdev) return -ENODEV; } + data_width_reg_val = readl(spi_engine->base + SPI_ENGINE_REG_DATA_WIDTH); + if (adi_axi_pcore_ver_gteq(version, 1, 1)) { unsigned int sizes = readl(spi_engine->base + SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH); @@ -1080,7 +1214,6 @@ static int spi_engine_probe(struct platform_device *pdev) if (ret) return ret; - host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE; host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2; @@ -1097,6 +1230,9 @@ static int spi_engine_probe(struct platform_device *pdev) } if (adi_axi_pcore_ver_gteq(version, 1, 3)) host->mode_bits |= SPI_MOSI_IDLE_LOW | SPI_MOSI_IDLE_HIGH; + if (adi_axi_pcore_ver_gteq(version, 2, 0)) + host->num_data_lanes = FIELD_GET(SPI_ENGINE_REG_DATA_WIDTH_NUM_OF_SDIO_MASK, + data_width_reg_val); if (host->max_speed_hz == 0) return dev_err_probe(&pdev->dev, -EINVAL, "spi_clk rate is 0"); diff --git a/drivers/spi/spi-axiado.c b/drivers/spi/spi-axiado.c new file mode 100644 index 000000000000..8cea81432c5b --- /dev/null +++ b/drivers/spi/spi-axiado.c @@ -0,0 +1,1007 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// +// Axiado SPI controller driver (Host mode only) +// +// Copyright (C) 2022-2025 Axiado Corporation (or its affiliates). +// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-axiado.h" + +/** + * ax_spi_read - Register Read - 32 bit per word + * @xspi: Pointer to the ax_spi structure + * @offset: Register offset address + * + * @return: Returns the value of that register + */ +static inline u32 ax_spi_read(struct ax_spi *xspi, u32 offset) +{ + return readl_relaxed(xspi->regs + offset); +} + +/** + * ax_spi_write - Register write - 32 bit per word + * @xspi: Pointer to the ax_spi structure + * @offset: Register offset address + * @val: Value to write into that register + */ +static inline void ax_spi_write(struct ax_spi *xspi, u32 offset, u32 val) +{ + writel_relaxed(val, xspi->regs + offset); +} + +/** + * ax_spi_write_b - Register Read - 8 bit per word + * @xspi: Pointer to the ax_spi structure + * @offset: Register offset address + * @val: Value to write into that register + */ +static inline void ax_spi_write_b(struct ax_spi *xspi, u32 offset, u8 val) +{ + writeb_relaxed(val, xspi->regs + offset); +} + +/** + * ax_spi_init_hw - Initialize the hardware and configure the SPI controller + * @xspi: Pointer to the ax_spi structure + * + * * On reset the SPI controller is configured to be in host mode. + * In host mode baud rate divisor is set to 4, threshold value for TX FIFO + * not full interrupt is set to 1 and size of the word to be transferred as 8 bit. + * + * This function initializes the SPI controller to disable and clear all the + * interrupts, enable manual target select and manual start, deselect all the + * chip select lines, and enable the SPI controller. + */ +static void ax_spi_init_hw(struct ax_spi *xspi) +{ + u32 reg_value; + + /* Clear CR1 */ + ax_spi_write(xspi, AX_SPI_CR1, AX_SPI_CR1_CLR); + + /* CR1 - CPO CHP MSS SCE SCR */ + reg_value = ax_spi_read(xspi, AX_SPI_CR1); + reg_value |= AX_SPI_CR1_SCR | AX_SPI_CR1_SCE; + + ax_spi_write(xspi, AX_SPI_CR1, reg_value); + + /* CR2 - MTE SRD SWD SSO */ + reg_value = ax_spi_read(xspi, AX_SPI_CR2); + reg_value |= AX_SPI_CR2_SWD | AX_SPI_CR2_SRD; + + ax_spi_write(xspi, AX_SPI_CR2, reg_value); + + /* CR3 - Reserverd bits S3W SDL */ + ax_spi_write(xspi, AX_SPI_CR3, AX_SPI_CR3_SDL); + + /* SCDR - Reserved bits SCS SCD */ + ax_spi_write(xspi, AX_SPI_SCDR, (AX_SPI_SCDR_SCS | AX_SPI_SCD_DEFAULT)); + + /* IMR */ + ax_spi_write(xspi, AX_SPI_IMR, AX_SPI_IMR_CLR); + + /* ISR - Clear all the interrupt */ + ax_spi_write(xspi, AX_SPI_ISR, AX_SPI_ISR_CLR); +} + +/** + * ax_spi_chipselect - Select or deselect the chip select line + * @spi: Pointer to the spi_device structure + * @is_high: Select(0) or deselect (1) the chip select line + */ +static void ax_spi_chipselect(struct spi_device *spi, bool is_high) +{ + struct ax_spi *xspi = spi_controller_get_devdata(spi->controller); + u32 ctrl_reg; + + ctrl_reg = ax_spi_read(xspi, AX_SPI_CR2); + /* Reset the chip select */ + ctrl_reg &= ~AX_SPI_DEFAULT_TS_MASK; + ctrl_reg |= spi_get_chipselect(spi, 0); + + ax_spi_write(xspi, AX_SPI_CR2, ctrl_reg); +} + +/** + * ax_spi_config_clock_mode - Sets clock polarity and phase + * @spi: Pointer to the spi_device structure + * + * Sets the requested clock polarity and phase. + */ +static void ax_spi_config_clock_mode(struct spi_device *spi) +{ + struct ax_spi *xspi = spi_controller_get_devdata(spi->controller); + u32 ctrl_reg, new_ctrl_reg; + + new_ctrl_reg = ax_spi_read(xspi, AX_SPI_CR1); + ctrl_reg = new_ctrl_reg; + + /* Set the SPI clock phase and clock polarity */ + new_ctrl_reg &= ~(AX_SPI_CR1_CPHA | AX_SPI_CR1_CPOL); + if (spi->mode & SPI_CPHA) + new_ctrl_reg |= AX_SPI_CR1_CPHA; + if (spi->mode & SPI_CPOL) + new_ctrl_reg |= AX_SPI_CR1_CPOL; + + if (new_ctrl_reg != ctrl_reg) + ax_spi_write(xspi, AX_SPI_CR1, new_ctrl_reg); + ax_spi_write(xspi, AX_SPI_CR1, 0x03); +} + +/** + * ax_spi_config_clock_freq - Sets clock frequency + * @spi: Pointer to the spi_device structure + * @transfer: Pointer to the spi_transfer structure which provides + * information about next transfer setup parameters + * + * Sets the requested clock frequency. + * Note: If the requested frequency is not an exact match with what can be + * obtained using the prescalar value the driver sets the clock frequency which + * is lower than the requested frequency (maximum lower) for the transfer. If + * the requested frequency is higher or lower than that is supported by the SPI + * controller the driver will set the highest or lowest frequency supported by + * controller. + */ +static void ax_spi_config_clock_freq(struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct ax_spi *xspi = spi_controller_get_devdata(spi->controller); + + ax_spi_write(xspi, AX_SPI_SCDR, (AX_SPI_SCDR_SCS | AX_SPI_SCD_DEFAULT)); +} + +/** + * ax_spi_setup_transfer - Configure SPI controller for specified transfer + * @spi: Pointer to the spi_device structure + * @transfer: Pointer to the spi_transfer structure which provides + * information about next transfer setup parameters + * + * Sets the operational mode of SPI controller for the next SPI transfer and + * sets the requested clock frequency. + * + */ +static void ax_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct ax_spi *xspi = spi_controller_get_devdata(spi->controller); + + ax_spi_config_clock_freq(spi, transfer); + + dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n", + __func__, spi->mode, spi->bits_per_word, + xspi->speed_hz); +} + +/** + * ax_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible + * @xspi: Pointer to the ax_spi structure + */ +static void ax_spi_fill_tx_fifo(struct ax_spi *xspi) +{ + unsigned long trans_cnt = 0; + + while ((trans_cnt < xspi->tx_fifo_depth) && + (xspi->tx_bytes > 0)) { + /* When xspi in busy condition, bytes may send failed, + * then spi control did't work thoroughly, add one byte delay + */ + if (ax_spi_read(xspi, AX_SPI_IVR) & AX_SPI_IVR_TFOV) + usleep_range(10, 10); + if (xspi->tx_buf) + ax_spi_write_b(xspi, AX_SPI_TXFIFO, *xspi->tx_buf++); + else + ax_spi_write_b(xspi, AX_SPI_TXFIFO, 0); + + xspi->tx_bytes--; + trans_cnt++; + } +} + +/** + * ax_spi_get_rx_byte - Gets a byte from the RX FIFO buffer + * @xspi: Controller private data (struct ax_spi *) + * + * This function handles the logic of extracting bytes from the 32-bit RX FIFO. + * It reads a new 32-bit word from AX_SPI_RXFIFO only when the current buffered + * word has been fully processed (all 4 bytes extracted). It then extracts + * bytes one by one, assuming the controller is little-endian. + * + * Returns: The next 8-bit byte read from the RX FIFO stream. + */ +static u8 ax_spi_get_rx_byte_for_irq(struct ax_spi *xspi) +{ + u8 byte_val; + + /* If all bytes from the current 32-bit word have been extracted, + * read a new word from the hardware RX FIFO. + */ + if (xspi->bytes_left_in_current_rx_word_for_irq == 0) { + xspi->current_rx_fifo_word_for_irq = ax_spi_read(xspi, AX_SPI_RXFIFO); + xspi->bytes_left_in_current_rx_word_for_irq = 4; // A new 32-bit word has 4 bytes + } + + /* Extract the least significant byte from the current 32-bit word */ + byte_val = (u8)(xspi->current_rx_fifo_word_for_irq & 0xFF); + + /* Shift the word right by 8 bits to prepare the next byte for extraction */ + xspi->current_rx_fifo_word_for_irq >>= 8; + xspi->bytes_left_in_current_rx_word_for_irq--; + + return byte_val; +} + +/** + * Helper function to process received bytes and check for transfer completion. + * This avoids code duplication and centralizes the completion logic. + * Returns true if the transfer was finalized. + */ +static bool ax_spi_process_rx_and_finalize(struct spi_controller *ctlr) +{ + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + + /* Process any remaining bytes in the RX FIFO */ + u32 avail_bytes = ax_spi_read(xspi, AX_SPI_RX_FBCAR); + + /* This loop handles bytes that are already staged from a previous word read */ + while (xspi->bytes_left_in_current_rx_word_for_irq && + (xspi->rx_copy_remaining || xspi->rx_discard)) { + u8 b = ax_spi_get_rx_byte_for_irq(xspi); + + if (xspi->rx_discard) { + xspi->rx_discard--; + } else { + *xspi->rx_buf++ = b; + xspi->rx_copy_remaining--; + } + } + + /* This loop processes new words directly from the FIFO */ + while (avail_bytes >= 4 && (xspi->rx_copy_remaining || xspi->rx_discard)) { + /* This function should handle reading from the FIFO */ + u8 b = ax_spi_get_rx_byte_for_irq(xspi); + + if (xspi->rx_discard) { + xspi->rx_discard--; + } else { + *xspi->rx_buf++ = b; + xspi->rx_copy_remaining--; + } + /* ax_spi_get_rx_byte_for_irq fetches a new word when needed + * and updates internal state. + */ + if (xspi->bytes_left_in_current_rx_word_for_irq == 3) + avail_bytes -= 4; + } + + /* Completion Check: The transfer is truly complete if all expected + * RX bytes have been copied or discarded. + */ + if (xspi->rx_copy_remaining == 0 && xspi->rx_discard == 0) { + /* Defensive drain: If for some reason there are leftover bytes + * in the HW FIFO after we've logically finished, + * read and discard them to prevent them from corrupting the next transfer. + * This should be a bounded operation. + */ + int safety_words = AX_SPI_RX_FIFO_DRAIN_LIMIT; // Limit to avoid getting stuck + + while (ax_spi_read(xspi, AX_SPI_RX_FBCAR) > 0 && safety_words-- > 0) + ax_spi_read(xspi, AX_SPI_RXFIFO); + + /* Disable all interrupts for this transfer and finalize. */ + ax_spi_write(xspi, AX_SPI_IMR, 0x00); + spi_finalize_current_transfer(ctlr); + return true; + } + + return false; +} + +/** + * ax_spi_irq - Interrupt service routine of the SPI controller + * @irq: IRQ number + * @dev_id: Pointer to the xspi structure + * + * This function handles RX FIFO almost full and Host Transfer Completed interrupts only. + * On RX FIFO amlost full interrupt this function reads the received data from RX FIFO and + * fills the TX FIFO if there is any data remaining to be transferred. + * On Host Transfer Completed interrupt this function indicates that transfer is completed, + * the SPI subsystem will clear MTC bit. + * + * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise. + */ +static irqreturn_t ax_spi_irq(int irq, void *dev_id) +{ + struct spi_controller *ctlr = dev_id; + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + u32 intr_status; + + intr_status = ax_spi_read(xspi, AX_SPI_IVR); + if (!intr_status) + return IRQ_NONE; + + /* Handle "Message Transfer Complete" interrupt. + * This means all bytes have been shifted out of the TX FIFO. + * It's time to harvest the final incoming bytes from the RX FIFO. + */ + if (intr_status & AX_SPI_IVR_MTCV) { + /* Clear the MTC interrupt flag immediately. */ + ax_spi_write(xspi, AX_SPI_ISR, AX_SPI_ISR_MTC); + + /* For a TX-only transfer, rx_buf would be NULL. + * In the spi-core, rx_copy_remaining would be 0. + * So we can finalize immediately. + */ + if (!xspi->rx_buf) { + ax_spi_write(xspi, AX_SPI_IMR, 0x00); + spi_finalize_current_transfer(ctlr); + return IRQ_HANDLED; + } + /* For a full-duplex transfer, process any remaining RX data. + * The helper function will handle finalization if everything is received. + */ + ax_spi_process_rx_and_finalize(ctlr); + return IRQ_HANDLED; + } + + /* Handle "RX FIFO Full / Threshold Met" interrupt. + * This means we need to make space in the RX FIFO by reading from it. + */ + if (intr_status & AX_SPI_IVR_RFFV) { + if (ax_spi_process_rx_and_finalize(ctlr)) { + /* Transfer was finalized inside the helper, we are done. */ + } else { + /* RX is not yet complete. If there are still TX bytes to send + * (for very long transfers), we can fill the TX FIFO again. + */ + if (xspi->tx_bytes) + ax_spi_fill_tx_fifo(xspi); + } + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int ax_prepare_message(struct spi_controller *ctlr, + struct spi_message *msg) +{ + ax_spi_config_clock_mode(msg->spi); + return 0; +} + +/** + * ax_transfer_one - Initiates the SPI transfer + * @ctlr: Pointer to spi_controller structure + * @spi: Pointer to the spi_device structure + * @transfer: Pointer to the spi_transfer structure which provides + * information about next transfer parameters + * + * This function fills the TX FIFO, starts the SPI transfer and + * returns a positive transfer count so that core will wait for completion. + * + * Return: Number of bytes transferred in the last transfer + */ +static int ax_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + int drain_limit; + + /* Pre-transfer cleanup:Flush the RX FIFO to discard any stale data. + * This is the crucial part. Before every new transfer, we must ensure + * the HW is in a clean state to avoid processing stale data + * from a previous, possibly failed or interrupted, transfer. + */ + drain_limit = AX_SPI_RX_FIFO_DRAIN_LIMIT; // Sane limit to prevent infinite loop on HW error + while (ax_spi_read(xspi, AX_SPI_RX_FBCAR) > 0 && drain_limit-- > 0) + ax_spi_read(xspi, AX_SPI_RXFIFO); // Read and discard + + if (drain_limit <= 0) + dev_warn(&ctlr->dev, "RX FIFO drain timeout before transfer\n"); + + /* Clear any stale interrupt flags from a previous transfer. + * This prevents an immediate, false interrupt trigger. + */ + ax_spi_write(xspi, AX_SPI_ISR, AX_SPI_ISR_CLR); + + xspi->tx_buf = transfer->tx_buf; + xspi->rx_buf = transfer->rx_buf; + xspi->tx_bytes = transfer->len; + xspi->rx_bytes = transfer->len; + + /* Reset RX 32-bit to byte buffer for each new transfer */ + if (transfer->tx_buf && !transfer->rx_buf) { + /* TX mode: discard all received data */ + xspi->rx_discard = transfer->len; + xspi->rx_copy_remaining = 0; + } else if ((!transfer->tx_buf && transfer->rx_buf) || + (transfer->tx_buf && transfer->rx_buf)) { + /* RX mode: generate clock by filling TX FIFO with dummy bytes + * Full-duplex mode: generate clock by filling TX FIFO + */ + xspi->rx_discard = 0; + xspi->rx_copy_remaining = transfer->len; + } else { + /* No TX and RX */ + xspi->rx_discard = 0; + xspi->rx_copy_remaining = transfer->len; + } + + ax_spi_setup_transfer(spi, transfer); + ax_spi_fill_tx_fifo(xspi); + ax_spi_write(xspi, AX_SPI_CR2, (AX_SPI_CR2_HTE | AX_SPI_CR2_SRD | AX_SPI_CR2_SWD)); + + ax_spi_write(xspi, AX_SPI_IMR, (AX_SPI_IMR_MTCM | AX_SPI_IMR_RFFM)); + return transfer->len; +} + +/** + * ax_prepare_transfer_hardware - Prepares hardware for transfer. + * @ctlr: Pointer to the spi_controller structure which provides + * information about the controller. + * + * This function enables SPI host controller. + * + * Return: 0 always + */ +static int ax_prepare_transfer_hardware(struct spi_controller *ctlr) +{ + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + + u32 reg_value; + + reg_value = ax_spi_read(xspi, AX_SPI_CR1); + reg_value |= AX_SPI_CR1_SCE; + + ax_spi_write(xspi, AX_SPI_CR1, reg_value); + + return 0; +} + +/** + * ax_unprepare_transfer_hardware - Relaxes hardware after transfer + * @ctlr: Pointer to the spi_controller structure which provides + * information about the controller. + * + * This function disables the SPI host controller when no target selected. + * + * Return: 0 always + */ +static int ax_unprepare_transfer_hardware(struct spi_controller *ctlr) +{ + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + + u32 reg_value; + + /* Disable the SPI if target is deselected */ + reg_value = ax_spi_read(xspi, AX_SPI_CR1); + reg_value &= ~AX_SPI_CR1_SCE; + + ax_spi_write(xspi, AX_SPI_CR1, reg_value); + + return 0; +} + +/** + * ax_spi_detect_fifo_depth - Detect the FIFO depth of the hardware + * @xspi: Pointer to the ax_spi structure + * + * The depth of the TX FIFO is a synthesis configuration parameter of the SPI + * IP. The FIFO threshold register is sized so that its maximum value can be the + * FIFO size - 1. This is used to detect the size of the FIFO. + */ +static void ax_spi_detect_fifo_depth(struct ax_spi *xspi) +{ + /* The MSBs will get truncated giving us the size of the FIFO */ + ax_spi_write(xspi, AX_SPI_TX_FAETR, ALMOST_EMPTY_TRESHOLD); + xspi->tx_fifo_depth = FIFO_DEPTH; + + /* Set the threshold limit */ + ax_spi_write(xspi, AX_SPI_TX_FAETR, ALMOST_EMPTY_TRESHOLD); + ax_spi_write(xspi, AX_SPI_RX_FAFTR, ALMOST_FULL_TRESHOLD); +} + +/* --- Internal Helper Function for 32-bit RX FIFO Read --- */ +/** + * ax_spi_get_rx_byte - Gets a byte from the RX FIFO buffer + * @xspi: Controller private data (struct ax_spi *) + * + * This function handles the logic of extracting bytes from the 32-bit RX FIFO. + * It reads a new 32-bit word from AX_SPI_RXFIFO only when the current buffered + * word has been fully processed (all 4 bytes extracted). It then extracts + * bytes one by one, assuming the controller is little-endian. + * + * Returns: The next 8-bit byte read from the RX FIFO stream. + */ +static u8 ax_spi_get_rx_byte(struct ax_spi *xspi) +{ + u8 byte_val; + + /* If all bytes from the current 32-bit word have been extracted, + * read a new word from the hardware RX FIFO. + */ + if (xspi->bytes_left_in_current_rx_word == 0) { + xspi->current_rx_fifo_word = ax_spi_read(xspi, AX_SPI_RXFIFO); + xspi->bytes_left_in_current_rx_word = 4; // A new 32-bit word has 4 bytes + } + + /* Extract the least significant byte from the current 32-bit word */ + byte_val = (u8)(xspi->current_rx_fifo_word & 0xFF); + + /* Shift the word right by 8 bits to prepare the next byte for extraction */ + xspi->current_rx_fifo_word >>= 8; + xspi->bytes_left_in_current_rx_word--; + + return byte_val; +} + +static int ax_spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct spi_device *spi = mem->spi; + struct ax_spi *xspi = spi_controller_get_devdata(spi->controller); + u32 reg_val; + int ret = 0; + u8 cmd_buf[AX_SPI_COMMAND_BUFFER_SIZE]; + int cmd_len = 0; + int i = 0, timeout = AX_SPI_TRX_FIFO_TIMEOUT; + int bytes_to_discard_from_rx; + u8 *rx_buf_ptr = (u8 *)op->data.buf.in; + u8 *tx_buf_ptr = (u8 *)op->data.buf.out; + u32 rx_count_reg = 0; + + dev_dbg(&spi->dev, + "%s: cmd:%02x mode:%d.%d.%d.%d addr:%llx len:%d\n", + __func__, op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, + op->dummy.buswidth, op->data.buswidth, op->addr.val, + op->data.nbytes); + + /* Validate operation parameters: Only 1-bit bus width supported */ + if (op->cmd.buswidth != 1 || + (op->addr.nbytes && op->addr.buswidth != 0 && + op->addr.buswidth != 1) || + (op->dummy.nbytes && op->dummy.buswidth != 0 && + op->dummy.buswidth != 1) || + (op->data.nbytes && op->data.buswidth != 1)) { + dev_err(&spi->dev, "Unsupported bus width, only 1-bit bus width supported\n"); + return -EOPNOTSUPP; + } + + /* Initialize controller hardware */ + ax_spi_init_hw(xspi); + + /* Assert chip select (pull low) */ + ax_spi_chipselect(spi, false); + + /* Build command phase: Copy opcode to cmd_buf */ + if (op->cmd.nbytes == 2) { + cmd_buf[cmd_len++] = (op->cmd.opcode >> 8) & 0xFF; + cmd_buf[cmd_len++] = op->cmd.opcode & 0xFF; + } else { + cmd_buf[cmd_len++] = op->cmd.opcode; + } + + /* Put address bytes to cmd_buf */ + if (op->addr.nbytes) { + for (i = op->addr.nbytes - 1; i >= 0; i--) { + cmd_buf[cmd_len] = (op->addr.val >> (i * 8)) & 0xFF; + cmd_len++; + } + } + + /* Configure controller for desired operation mode (write/read) */ + reg_val = ax_spi_read(xspi, AX_SPI_CR2); + reg_val |= AX_SPI_CR2_SWD | AX_SPI_CR2_SRI | AX_SPI_CR2_SRD; + ax_spi_write(xspi, AX_SPI_CR2, reg_val); + + /* Write command and address bytes to TX_FIFO */ + for (i = 0; i < cmd_len; i++) + ax_spi_write_b(xspi, AX_SPI_TXFIFO, cmd_buf[i]); + + /* Add dummy bytes (for clock generation) or actual data bytes to TX_FIFO */ + if (op->data.dir == SPI_MEM_DATA_IN) { + for (i = 0; i < op->dummy.nbytes; i++) + ax_spi_write_b(xspi, AX_SPI_TXFIFO, 0x00); + for (i = 0; i < op->data.nbytes; i++) + ax_spi_write_b(xspi, AX_SPI_TXFIFO, 0x00); + } else { + for (i = 0; i < op->data.nbytes; i++) + ax_spi_write_b(xspi, AX_SPI_TXFIFO, tx_buf_ptr[i]); + } + + /* Start the SPI transmission */ + reg_val = ax_spi_read(xspi, AX_SPI_CR2); + reg_val |= AX_SPI_CR2_HTE; + ax_spi_write(xspi, AX_SPI_CR2, reg_val); + + /* Wait for TX FIFO to become empty */ + while (timeout-- > 0) { + u32 tx_count_reg = ax_spi_read(xspi, AX_SPI_TX_FBCAR); + + if (tx_count_reg == 0) { + udelay(1); + break; + } + udelay(1); + } + + /* Handle Data Reception (for read operations) */ + if (op->data.dir == SPI_MEM_DATA_IN) { + /* Reset the internal RX byte buffer for this new operation. + * This ensures ax_spi_get_rx_byte starts fresh for each exec_op call. + */ + xspi->bytes_left_in_current_rx_word = 0; + xspi->current_rx_fifo_word = 0; + + timeout = AX_SPI_TRX_FIFO_TIMEOUT; + while (timeout-- > 0) { + rx_count_reg = ax_spi_read(xspi, AX_SPI_RX_FBCAR); + if (rx_count_reg >= op->data.nbytes) + break; + udelay(1); /* Small delay to prevent aggressive busy-waiting */ + } + + if (timeout < 0) { + ret = -ETIMEDOUT; + goto out_unlock; + } + + /* Calculate how many bytes we need to discard from the RX FIFO. + * Since we set SRI, we only need to discard the address bytes and + * dummy bytes from the RX FIFO. + */ + bytes_to_discard_from_rx = op->addr.nbytes + op->dummy.nbytes; + for (i = 0; i < bytes_to_discard_from_rx; i++) + ax_spi_get_rx_byte(xspi); + + /* Read actual data bytes into op->data.buf.in */ + for (i = 0; i < op->data.nbytes; i++) { + *rx_buf_ptr = ax_spi_get_rx_byte(xspi); + rx_buf_ptr++; + } + } else if (op->data.dir == SPI_MEM_DATA_OUT) { + timeout = AX_SPI_TRX_FIFO_TIMEOUT; + while (timeout-- > 0) { + u32 tx_fifo_level = ax_spi_read(xspi, AX_SPI_TX_FBCAR); + + if (tx_fifo_level == 0) + break; + udelay(1); + } + if (timeout < 0) { + ret = -ETIMEDOUT; + goto out_unlock; + } + } + +out_unlock: + /* Deassert chip select (pull high) */ + ax_spi_chipselect(spi, true); + + return ret; +} + +static int ax_spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct spi_device *spi = mem->spi; + struct ax_spi *xspi = spi_controller_get_devdata(spi->controller); + size_t max_transfer_payload_bytes; + size_t fifo_total_bytes; + size_t protocol_overhead_bytes; + + fifo_total_bytes = xspi->tx_fifo_depth; + /* Calculate protocol overhead bytes according to the real operation each time. */ + protocol_overhead_bytes = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; + + /* Calculate the maximum data payload that can fit into the FIFO. */ + if (fifo_total_bytes <= protocol_overhead_bytes) { + max_transfer_payload_bytes = 0; + dev_warn_once(&spi->dev, "SPI FIFO (%zu bytes) is too small for protocol overhead (%zu bytes)! Max data size forced to 0.\n", + fifo_total_bytes, protocol_overhead_bytes); + } else { + max_transfer_payload_bytes = fifo_total_bytes - protocol_overhead_bytes; + } + + /* Limit op->data.nbytes based on the calculated max payload and SZ_64K. + * This is the value that spi-mem will then use to split requests. + */ + if (op->data.nbytes > max_transfer_payload_bytes) { + op->data.nbytes = max_transfer_payload_bytes; + dev_dbg(&spi->dev, "%s %d: op->data.nbytes adjusted to %u due to FIFO overhead\n", + __func__, __LINE__, op->data.nbytes); + } + + /* Also apply the overall max transfer size */ + if (op->data.nbytes > SZ_64K) { + op->data.nbytes = SZ_64K; + dev_dbg(&spi->dev, "%s %d: op->data.nbytes adjusted to %u due to SZ_64K limit\n", + __func__, __LINE__, op->data.nbytes); + } + + return 0; +} + +static const struct spi_controller_mem_ops ax_spi_mem_ops = { + .exec_op = ax_spi_mem_exec_op, + .adjust_op_size = ax_spi_mem_adjust_op_size, +}; + +/** + * ax_spi_probe - Probe method for the SPI driver + * @pdev: Pointer to the platform_device structure + * + * This function initializes the driver data structures and the hardware. + * + * Return: 0 on success and error value on error + */ +static int ax_spi_probe(struct platform_device *pdev) +{ + int ret = 0, irq; + struct spi_controller *ctlr; + struct ax_spi *xspi; + u32 num_cs; + + ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*xspi)); + if (!ctlr) + return -ENOMEM; + + xspi = spi_controller_get_devdata(ctlr); + ctlr->dev.of_node = pdev->dev.of_node; + platform_set_drvdata(pdev, ctlr); + + xspi->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(xspi->regs)) { + ret = PTR_ERR(xspi->regs); + goto remove_ctlr; + } + + xspi->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(xspi->pclk)) { + dev_err(&pdev->dev, "pclk clock not found.\n"); + ret = PTR_ERR(xspi->pclk); + goto remove_ctlr; + } + + xspi->ref_clk = devm_clk_get(&pdev->dev, "ref"); + if (IS_ERR(xspi->ref_clk)) { + dev_err(&pdev->dev, "ref clock not found.\n"); + ret = PTR_ERR(xspi->ref_clk); + goto remove_ctlr; + } + + ret = clk_prepare_enable(xspi->pclk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable APB clock.\n"); + goto remove_ctlr; + } + + ret = clk_prepare_enable(xspi->ref_clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable device clock.\n"); + goto clk_dis_apb; + } + + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); + if (ret < 0) + ctlr->num_chipselect = AX_SPI_DEFAULT_NUM_CS; + else + ctlr->num_chipselect = num_cs; + + ax_spi_detect_fifo_depth(xspi); + + xspi->current_rx_fifo_word = 0; + xspi->bytes_left_in_current_rx_word = 0; + + /* Initialize IRQ-related variables */ + xspi->bytes_left_in_current_rx_word_for_irq = 0; + xspi->current_rx_fifo_word_for_irq = 0; + + /* SPI controller initializations */ + ax_spi_init_hw(xspi); + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + ret = -ENXIO; + goto clk_dis_all; + } + + ret = devm_request_irq(&pdev->dev, irq, ax_spi_irq, + 0, pdev->name, ctlr); + if (ret != 0) { + ret = -ENXIO; + dev_err(&pdev->dev, "request_irq failed\n"); + goto clk_dis_all; + } + + ctlr->use_gpio_descriptors = true; + ctlr->prepare_transfer_hardware = ax_prepare_transfer_hardware; + ctlr->prepare_message = ax_prepare_message; + ctlr->transfer_one = ax_transfer_one; + ctlr->unprepare_transfer_hardware = ax_unprepare_transfer_hardware; + ctlr->set_cs = ax_spi_chipselect; + ctlr->auto_runtime_pm = true; + ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + + xspi->clk_rate = clk_get_rate(xspi->ref_clk); + /* Set to default valid value */ + ctlr->max_speed_hz = xspi->clk_rate / 2; + xspi->speed_hz = ctlr->max_speed_hz; + + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + ctlr->mem_ops = &ax_spi_mem_ops; + + ret = spi_register_controller(ctlr); + if (ret) { + dev_err(&pdev->dev, "spi_register_controller failed\n"); + goto clk_dis_all; + } + + return ret; + +clk_dis_all: + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_disable(&pdev->dev); + clk_disable_unprepare(xspi->ref_clk); +clk_dis_apb: + clk_disable_unprepare(xspi->pclk); +remove_ctlr: + spi_controller_put(ctlr); + return ret; +} + +/** + * ax_spi_remove - Remove method for the SPI driver + * @pdev: Pointer to the platform_device structure + * + * This function is called if a device is physically removed from the system or + * if the driver module is being unloaded. It frees all resources allocated to + * the device. + */ +static void ax_spi_remove(struct platform_device *pdev) +{ + struct spi_controller *ctlr = platform_get_drvdata(pdev); + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + + spi_unregister_controller(ctlr); + + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + clk_disable_unprepare(xspi->ref_clk); + clk_disable_unprepare(xspi->pclk); +} + +/** + * ax_spi_suspend - Suspend method for the SPI driver + * @dev: Address of the platform_device structure + * + * This function disables the SPI controller and + * changes the driver state to "suspend" + * + * Return: 0 on success and error value on error + */ +static int __maybe_unused ax_spi_suspend(struct device *dev) +{ + struct spi_controller *ctlr = dev_get_drvdata(dev); + + return spi_controller_suspend(ctlr); +} + +/** + * ax_spi_resume - Resume method for the SPI driver + * @dev: Address of the platform_device structure + * + * This function changes the driver state to "ready" + * + * Return: 0 on success and error value on error + */ +static int __maybe_unused ax_spi_resume(struct device *dev) +{ + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + + ax_spi_init_hw(xspi); + return spi_controller_resume(ctlr); +} + +/** + * ax_spi_runtime_resume - Runtime resume method for the SPI driver + * @dev: Address of the platform_device structure + * + * This function enables the clocks + * + * Return: 0 on success and error value on error + */ +static int __maybe_unused ax_spi_runtime_resume(struct device *dev) +{ + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + int ret; + + ret = clk_prepare_enable(xspi->pclk); + if (ret) { + dev_err(dev, "Cannot enable APB clock.\n"); + return ret; + } + + ret = clk_prepare_enable(xspi->ref_clk); + if (ret) { + dev_err(dev, "Cannot enable device clock.\n"); + clk_disable_unprepare(xspi->pclk); + return ret; + } + return 0; +} + +/** + * ax_spi_runtime_suspend - Runtime suspend method for the SPI driver + * @dev: Address of the platform_device structure + * + * This function disables the clocks + * + * Return: Always 0 + */ +static int __maybe_unused ax_spi_runtime_suspend(struct device *dev) +{ + struct spi_controller *ctlr = dev_get_drvdata(dev); + struct ax_spi *xspi = spi_controller_get_devdata(ctlr); + + clk_disable_unprepare(xspi->ref_clk); + clk_disable_unprepare(xspi->pclk); + + return 0; +} + +static const struct dev_pm_ops ax_spi_dev_pm_ops = { + SET_RUNTIME_PM_OPS(ax_spi_runtime_suspend, + ax_spi_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(ax_spi_suspend, ax_spi_resume) +}; + +static const struct of_device_id ax_spi_of_match[] = { + { .compatible = "axiado,ax3000-spi" }, + { /* end of table */ } +}; +MODULE_DEVICE_TABLE(of, ax_spi_of_match); + +/* ax_spi_driver - This structure defines the SPI subsystem platform driver */ +static struct platform_driver ax_spi_driver = { + .probe = ax_spi_probe, + .remove = ax_spi_remove, + .driver = { + .name = AX_SPI_NAME, + .of_match_table = ax_spi_of_match, + .pm = &ax_spi_dev_pm_ops, + }, +}; + +module_platform_driver(ax_spi_driver); + +MODULE_AUTHOR("Axiado Corporation"); +MODULE_DESCRIPTION("Axiado SPI Host driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-axiado.h b/drivers/spi/spi-axiado.h new file mode 100644 index 000000000000..6cf0e5bf5879 --- /dev/null +++ b/drivers/spi/spi-axiado.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Axiado SPI controller driver (Host mode only) + * + * Copyright (C) 2022-2025 Axiado Corporation (or its affiliates). + */ + +#ifndef SPI_AXIADO_H +#define SPI_AXIADO_H + +/* Name of this driver */ +#define AX_SPI_NAME "axiado-db-spi" + +/* Axiado - SPI Digital Blocks IP design registers */ +#define AX_SPI_TX_FAETR 0x18 // TX-FAETR +#define ALMOST_EMPTY_TRESHOLD 0x00 // Programmed threshold value +#define AX_SPI_RX_FAFTR 0x28 // RX-FAETR +#define ALMOST_FULL_TRESHOLD 0x0c // Programmed threshold value +#define FIFO_DEPTH 256 // 256 bytes + +#define AX_SPI_CR1 0x00 // CR1 +#define AX_SPI_CR1_CLR 0x00 // CR1 - Clear +#define AX_SPI_CR1_SCR 0x01 // CR1 - controller reset +#define AX_SPI_CR1_SCE 0x02 // CR1 - Controller Enable/Disable +#define AX_SPI_CR1_CPHA 0x08 // CR1 - CPH +#define AX_SPI_CR1_CPOL 0x10 // CR1 - CPO + +#define AX_SPI_CR2 0x04 // CR2 +#define AX_SPI_CR2_SWD 0x04 // CR2 - Write Enabel/Disable +#define AX_SPI_CR2_SRD 0x08 // CR2 - Read Enable/Disable +#define AX_SPI_CR2_SRI 0x10 // CR2 - Read First Byte Ignore +#define AX_SPI_CR2_HTE 0x40 // CR2 - Host Transmit Enable +#define AX_SPI_CR3 0x08 // CR3 +#define AX_SPI_CR3_SDL 0x00 // CR3 - Data lines +#define AX_SPI_CR3_QUAD 0x02 // CR3 - Data lines + +/* As per Digital Blocks datasheet clock frequency range + * Min - 244KHz + * Max - 62.5MHz + * SCK Clock Divider Register Values + */ +#define AX_SPI_RX_FBCAR 0x24 // RX_FBCAR +#define AX_SPI_TX_FBCAR 0x14 // TX_FBCAR +#define AX_SPI_SCDR 0x2c // SCDR +#define AX_SPI_SCD_MIN 0x1fe // Valid SCD (SCK Clock Divider Register) +#define AX_SPI_SCD_DEFAULT 0x06 // Default SCD (SCK Clock Divider Register) +#define AX_SPI_SCD_MAX 0x00 // Valid SCD (SCK Clock Divider Register) +#define AX_SPI_SCDR_SCS 0x0200 // SCDR - AMBA Bus Clock source + +#define AX_SPI_IMR 0x34 // IMR +#define AX_SPI_IMR_CLR 0x00 // IMR - Clear +#define AX_SPI_IMR_TFOM 0x02 // IMR - TFO +#define AX_SPI_IMR_MTCM 0x40 // IMR - MTC +#define AX_SPI_IMR_TFEM 0x10 // IMR - TFE +#define AX_SPI_IMR_RFFM 0x20 // IMR - RFFM + +#define AX_SPI_ISR 0x30 // ISR +#define AX_SPI_ISR_CLR 0xff // ISR - Clear +#define AX_SPI_ISR_MTC 0x40 // ISR - MTC +#define AX_SPI_ISR_TFE 0x10 // ISR - TFE +#define AX_SPI_ISR_RFF 0x20 // ISR - RFF + +#define AX_SPI_IVR 0x38 // IVR +#define AX_SPI_IVR_TFOV 0x02 // IVR - TFOV +#define AX_SPI_IVR_MTCV 0x40 // IVR - MTCV +#define AX_SPI_IVR_TFEV 0x10 // IVR - TFEV +#define AX_SPI_IVR_RFFV 0x20 // IVR - RFFV + +#define AX_SPI_TXFIFO 0x0c // TX_FIFO +#define AX_SPI_TX_RX_FBCR 0x10 // TX_RX_FBCR +#define AX_SPI_RXFIFO 0x1c // RX_FIFO + +#define AX_SPI_TS0 0x00 // Target select 0 +#define AX_SPI_TS1 0x01 // Target select 1 +#define AX_SPI_TS2 0x10 // Target select 2 +#define AX_SPI_TS3 0x11 // Target select 3 + +#define SPI_AUTOSUSPEND_TIMEOUT 3000 + +/* Default number of chip select lines also used as maximum number of chip select lines */ +#define AX_SPI_DEFAULT_NUM_CS 4 + +/* Default number of command buffer size */ +#define AX_SPI_COMMAND_BUFFER_SIZE 16 //Command + address bytes + +/* Target select mask + * 00 – TS0 + * 01 – TS1 + * 10 – TS2 + * 11 – TS3 + */ +#define AX_SPI_DEFAULT_TS_MASK 0x03 + +#define AX_SPI_RX_FIFO_DRAIN_LIMIT 24 +#define AX_SPI_TRX_FIFO_TIMEOUT 1000 +/** + * struct ax_spi - This definition defines spi driver instance + * @regs: Virtual address of the SPI controller registers + * @ref_clk: Pointer to the peripheral clock + * @pclk: Pointer to the APB clock + * @speed_hz: Current SPI bus clock speed in Hz + * @txbuf: Pointer to the TX buffer + * @rxbuf: Pointer to the RX buffer + * @tx_bytes: Number of bytes left to transfer + * @rx_bytes: Number of bytes requested + * @tx_fifo_depth: Depth of the TX FIFO + * @current_rx_fifo_word: Buffers the 32-bit word read from RXFIFO + * @bytes_left_in_current_rx_word: Bytes to be extracted from current 32-bit word + * @current_rx_fifo_word_for_irq: Buffers the 32-bit word read from RXFIFO for IRQ + * @bytes_left_in_current_rx_word_for_irq: IRQ bytes to be extracted from current 32-bit word + * @rx_discard: Number of bytes to discard + * @rx_copy_remaining: Number of bytes to copy + */ +struct ax_spi { + void __iomem *regs; + struct clk *ref_clk; + struct clk *pclk; + unsigned int clk_rate; + u32 speed_hz; + const u8 *tx_buf; + u8 *rx_buf; + int tx_bytes; + int rx_bytes; + unsigned int tx_fifo_depth; + u32 current_rx_fifo_word; + int bytes_left_in_current_rx_word; + u32 current_rx_fifo_word_for_irq; + int bytes_left_in_current_rx_word_for_irq; + int rx_discard; + int rx_copy_remaining; +}; + +#endif /* SPI_AXIADO_H */ diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index cfdaa5eaec76..9c06ac562f3e 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1529,7 +1529,6 @@ int bcm_qspi_probe(struct platform_device *pdev, host->transfer_one = bcm_qspi_transfer_one; host->mem_ops = &bcm_qspi_mem_ops; host->cleanup = bcm_qspi_cleanup; - host->dev.of_node = dev->of_node; host->num_chipselect = NUM_CHIPSELECT; host->use_gpio_descriptors = true; diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 192cc5ef65fb..35ae50ca37ac 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1368,7 +1368,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev) ctlr->transfer_one = bcm2835_spi_transfer_one; ctlr->handle_err = bcm2835_spi_handle_err; ctlr->prepare_message = bcm2835_spi_prepare_message; - ctlr->dev.of_node = pdev->dev.of_node; bs = spi_controller_get_devdata(ctlr); bs->ctlr = ctlr; diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 90698d7d809d..f6847d3a76de 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -502,7 +502,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) host->handle_err = bcm2835aux_spi_handle_err; host->prepare_message = bcm2835aux_spi_prepare_message; host->unprepare_message = bcm2835aux_spi_unprepare_message; - host->dev.of_node = pdev->dev.of_node; host->use_gpio_descriptors = true; bs = spi_controller_get_devdata(host); diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index 18261cbd413b..612f8802e690 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -142,6 +142,7 @@ struct bcm63xx_hsspi { u32 wait_mode; u32 xfer_mode; u32 prepend_cnt; + u32 md_start; u8 *prepend_buf; }; @@ -268,18 +269,20 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host, { struct bcm63xx_hsspi *bs = spi_controller_get_devdata(host); - bool tx_only = false; + bool tx_only = false, multidata = false; struct spi_transfer *t; /* * Multiple transfers within a message may be combined into one transfer * to the controller using its prepend feature. A SPI message is prependable * only if the following are all true: - * 1. One or more half duplex write transfer in single bit mode - * 2. Optional full duplex read/write at the end - * 3. No delay and cs_change between transfers + * 1. One or more half duplex write transfers at the start + * 2. Optional switch from single to dual bit within the write transfers + * 3. Optional full duplex read/write at the end if all single bit + * 4. No delay and cs_change between transfers */ bs->prepend_cnt = 0; + bs->md_start = 0; list_for_each_entry(t, &msg->transfers, transfer_list) { if ((spi_delay_to_ns(&t->delay, t) > 0) || t->cs_change) { bcm63xx_prepend_printk_on_checkfail(bs, @@ -297,31 +300,44 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host, return false; } - if (t->tx_nbits > SPI_NBITS_SINGLE && - !list_is_last(&t->transfer_list, &msg->transfers)) { + if (t->tx_nbits == SPI_NBITS_SINGLE && + !list_is_last(&t->transfer_list, &msg->transfers) && + multidata) { bcm63xx_prepend_printk_on_checkfail(bs, - "multi-bit prepend buf not supported!\n"); + "single-bit after multi-bit not supported!\n"); return false; } - if (t->tx_nbits == SPI_NBITS_SINGLE) { - memcpy(bs->prepend_buf + bs->prepend_cnt, t->tx_buf, t->len); - bs->prepend_cnt += t->len; - } + if (t->tx_nbits > SPI_NBITS_SINGLE) + multidata = true; + + memcpy(bs->prepend_buf + bs->prepend_cnt, t->tx_buf, t->len); + bs->prepend_cnt += t->len; + + if (t->tx_nbits == SPI_NBITS_SINGLE) + bs->md_start += t->len; + } else { if (!list_is_last(&t->transfer_list, &msg->transfers)) { bcm63xx_prepend_printk_on_checkfail(bs, "rx/tx_rx transfer not supported when it is not last one!\n"); return false; } + + if (t->rx_buf && t->rx_nbits == SPI_NBITS_SINGLE && + multidata) { + bcm63xx_prepend_printk_on_checkfail(bs, + "single-bit after multi-bit not supported!\n"); + return false; + } } if (list_is_last(&t->transfer_list, &msg->transfers)) { memcpy(t_prepend, t, sizeof(struct spi_transfer)); - if (tx_only && t->tx_nbits == SPI_NBITS_SINGLE) { + if (tx_only) { /* - * if the last one is also a single bit tx only transfer, merge + * if the last one is also a tx only transfer, merge * all of them into one single tx transfer */ t_prepend->len = bs->prepend_cnt; @@ -329,7 +345,7 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host, bs->prepend_cnt = 0; } else { /* - * if the last one is not a tx only transfer or dual tx xfer, all + * if the last one is not a tx only transfer, all * the previous transfers are sent through prepend bytes and * make sure it does not exceed the max prepend len */ @@ -339,6 +355,15 @@ static bool bcm63xx_prepare_prepend_transfer(struct spi_controller *host, return false; } } + /* + * If switching from single-bit to multi-bit, make sure + * the start offset does not exceed the maximum + */ + if (multidata && bs->md_start > HSSPI_MAX_PREPEND_LEN) { + bcm63xx_prepend_printk_on_checkfail(bs, + "exceed max multi-bit offset, abort prepending transfers!\n"); + return false; + } } } @@ -381,11 +406,11 @@ static int bcm63xx_hsspi_do_prepend_txrx(struct spi_device *spi, if (t->rx_nbits == SPI_NBITS_DUAL) { reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT; - reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT; + reg |= bs->md_start << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT; } if (t->tx_nbits == SPI_NBITS_DUAL) { reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT; - reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT; + reg |= bs->md_start << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT; } } @@ -692,13 +717,6 @@ static bool bcm63xx_hsspi_mem_supports_op(struct spi_mem *mem, if (!spi_mem_default_supports_op(mem, op)) return false; - /* Controller doesn't support spi mem dual io mode */ - if ((op->cmd.opcode == SPINOR_OP_READ_1_2_2) || - (op->cmd.opcode == SPINOR_OP_READ_1_2_2_4B) || - (op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR) || - (op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR_4B)) - return false; - return true; } @@ -804,7 +822,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) init_completion(&bs->done); host->mem_ops = &bcm63xx_hsspi_mem_ops; - host->dev.of_node = dev->of_node; if (!dev->of_node) host->bus_num = HSSPI_BUS_NUM; diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 4c549f166b0f..47266bb23a33 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -571,7 +571,6 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) goto out_err; } - host->dev.of_node = dev->of_node; host->bus_num = bus_num; host->num_chipselect = num_cs; host->transfer_one_message = bcm63xx_spi_transfer_one; diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c index f16298b75236..ece22260f570 100644 --- a/drivers/spi/spi-bcmbca-hsspi.c +++ b/drivers/spi/spi-bcmbca-hsspi.c @@ -500,7 +500,6 @@ static int bcmbca_hsspi_probe(struct platform_device *pdev) mutex_init(&bs->msg_mutex); init_completion(&bs->done); - host->dev.of_node = dev->of_node; if (!dev->of_node) host->bus_num = HSSPI_BUS_NUM; diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 965b4cea3388..649ff55333f0 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -40,13 +40,15 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_DEVICE_CS_CNT_MAX); #define CQSPI_DISABLE_DAC_MODE BIT(1) #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) -#define CQSPI_SLOW_SRAM BIT(4) +#define CQSPI_SLOW_SRAM BIT(4) #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) #define CQSPI_RD_NO_IRQ BIT(6) #define CQSPI_DMA_SET_MASK BIT(7) #define CQSPI_SUPPORT_DEVICE_RESET BIT(8) #define CQSPI_DISABLE_STIG_MODE BIT(9) #define CQSPI_DISABLE_RUNTIME_PM BIT(10) +#define CQSPI_NO_INDIRECT_MODE BIT(11) +#define CQSPI_HAS_WR_PROTECT BIT(12) /* Capabilities */ #define CQSPI_SUPPORTS_OCTAL BIT(0) @@ -55,7 +57,8 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_DEVICE_CS_CNT_MAX); #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0) enum { - CLK_QSPI_APB = 0, + CLK_QSPI_REF = 0, + CLK_QSPI_APB, CLK_QSPI_AHB, CLK_QSPI_NUM, }; @@ -76,8 +79,7 @@ struct cqspi_flash_pdata { struct cqspi_st { struct platform_device *pdev; struct spi_controller *host; - struct clk *clk; - struct clk *clks[CLK_QSPI_NUM]; + struct clk_bulk_data clks[CLK_QSPI_NUM]; unsigned int sclk; void __iomem *iobase; @@ -108,6 +110,7 @@ struct cqspi_st { bool apb_ahb_hazard; bool is_jh7110; /* Flag for StarFive JH7110 SoC */ + bool is_rzn1; /* Flag for Renesas RZ/N1 SoC */ bool disable_stig_mode; refcount_t refcount; refcount_t inflight_ops; @@ -121,8 +124,6 @@ struct cqspi_driver_platdata { int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, u_char *rxbuf, loff_t from_addr, size_t n_rx); u32 (*get_dma_status)(struct cqspi_st *cqspi); - int (*jh7110_clk_init)(struct platform_device *pdev, - struct cqspi_st *cqspi); }; /* Operation timeout value */ @@ -219,6 +220,8 @@ struct cqspi_driver_platdata { #define CQSPI_REG_IRQSTATUS 0x40 #define CQSPI_REG_IRQMASK 0x44 +#define CQSPI_REG_WR_PROT_CTRL 0x58 + #define CQSPI_REG_INDIRECTRD 0x60 #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) @@ -374,17 +377,12 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) /* Clear interrupt */ writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); - if (cqspi->use_dma_read && ddata && ddata->get_dma_status) { - if (ddata->get_dma_status(cqspi)) { - complete(&cqspi->transfer_complete); - return IRQ_HANDLED; - } - } - - else if (!cqspi->slow_sram) - irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; - else + if (cqspi->use_dma_read && ddata && ddata->get_dma_status) + irq_status = ddata->get_dma_status(cqspi); + else if (cqspi->slow_sram) irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR; + else + irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; if (irq_status) complete(&cqspi->transfer_complete); @@ -1263,7 +1261,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) reg = readl(reg_base + CQSPI_REG_CONFIG); reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); - reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; + reg |= div << CQSPI_REG_CONFIG_BAUD_LSB; writel(reg, reg_base + CQSPI_REG_CONFIG); } @@ -1340,8 +1338,9 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata, * mode. So, we can not use direct mode when in DTR mode for writing * data. */ - if (!op->cmd.dtr && cqspi->use_direct_mode && - cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) { + if ((!op->cmd.dtr && cqspi->use_direct_mode && + cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) || + (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) { memcpy_toio(cqspi->ahb_base + to, buf, len); return cqspi_wait_idle(cqspi); } @@ -1430,7 +1429,8 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata, if (ret) return ret; - if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) + if ((cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) || + (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) return cqspi_direct_read_execute(f_pdata, buf, from, len); if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma && @@ -1514,6 +1514,7 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) static bool cqspi_supports_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) { + struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); bool all_true, all_false; /* @@ -1536,6 +1537,13 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem, return false; if (op->data.nbytes && op->data.buswidth != 8) return false; + + /* A single opcode is supported, it will be repeated */ + if ((op->cmd.opcode >> 8) != (op->cmd.opcode & 0xFF)) + return false; + + if (cqspi->is_rzn1) + return false; } else if (!all_false) { /* Mixed DTR modes are not supported. */ return false; @@ -1589,20 +1597,20 @@ static int cqspi_of_get_pdata(struct cqspi_st *cqspi) cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); - if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { - /* Zero signals FIFO depth should be runtime detected. */ - cqspi->fifo_depth = 0; - } + if (!(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) { + if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { + /* Zero signals FIFO depth should be runtime detected. */ + cqspi->fifo_depth = 0; + } - if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { - dev_err(dev, "couldn't determine fifo-width\n"); - return -ENXIO; - } + if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) + cqspi->fifo_width = 4; - if (of_property_read_u32(np, "cdns,trigger-address", - &cqspi->trigger_address)) { - dev_err(dev, "couldn't determine trigger-address\n"); - return -ENXIO; + if (of_property_read_u32(np, "cdns,trigger-address", + &cqspi->trigger_address)) { + dev_err(dev, "couldn't determine trigger-address\n"); + return -ENXIO; + } } if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect)) @@ -1627,19 +1635,24 @@ static void cqspi_controller_init(struct cqspi_st *cqspi) /* Disable all interrupts. */ writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); - /* Configure the SRAM split to 1:1 . */ - writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); + if (!(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) { + /* Configure the SRAM split to 1:1 . */ + writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); + /* Load indirect trigger address. */ + writel(cqspi->trigger_address, + cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); - /* Load indirect trigger address. */ - writel(cqspi->trigger_address, - cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); + /* Program read watermark -- 1/2 of the FIFO. */ + writel(cqspi->fifo_depth * cqspi->fifo_width / 2, + cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); + /* Program write watermark -- 1/8 of the FIFO. */ + writel(cqspi->fifo_depth * cqspi->fifo_width / 8, + cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); + } - /* Program read watermark -- 1/2 of the FIFO. */ - writel(cqspi->fifo_depth * cqspi->fifo_width / 2, - cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); - /* Program write watermark -- 1/8 of the FIFO. */ - writel(cqspi->fifo_depth * cqspi->fifo_width / 8, - cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); + /* Disable write protection at controller level */ + if (cqspi->ddata && cqspi->ddata->quirks & CQSPI_HAS_WR_PROTECT) + writel(0, cqspi->iobase + CQSPI_REG_WR_PROT_CTRL); /* Disable direct access controller */ if (!cqspi->use_direct_mode) { @@ -1661,6 +1674,9 @@ static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi) struct device *dev = &cqspi->pdev->dev; u32 reg, fifo_depth; + if (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE) + return; + /* * Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N * the FIFO depth. @@ -1764,51 +1780,6 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi) return 0; } -static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi) -{ - static struct clk_bulk_data qspiclk[] = { - { .id = "apb" }, - { .id = "ahb" }, - }; - - int ret = 0; - - ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk); - if (ret) { - dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__); - return ret; - } - - cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk; - cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk; - - ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]); - if (ret) { - dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__); - return ret; - } - - ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]); - if (ret) { - dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__); - goto disable_apb_clk; - } - - cqspi->is_jh7110 = true; - - return 0; - -disable_apb_clk: - clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]); - - return ret; -} - -static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi) -{ - clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]); - clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]); -} static int cqspi_probe(struct platform_device *pdev) { const struct cqspi_driver_platdata *ddata; @@ -1817,8 +1788,7 @@ static int cqspi_probe(struct platform_device *pdev) struct spi_controller *host; struct resource *res_ahb; struct cqspi_st *cqspi; - int ret; - int irq; + int ret, irq; host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi)); if (!host) @@ -1827,13 +1797,15 @@ static int cqspi_probe(struct platform_device *pdev) host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL; host->mem_ops = &cqspi_mem_ops; host->mem_caps = &cqspi_mem_caps; - host->dev.of_node = pdev->dev.of_node; cqspi = spi_controller_get_devdata(host); + if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) + cqspi->is_jh7110 = true; + if (of_device_is_compatible(pdev->dev.of_node, "renesas,rzn1-qspi")) + cqspi->is_rzn1 = true; cqspi->pdev = pdev; cqspi->host = host; - cqspi->is_jh7110 = false; cqspi->ddata = ddata = of_device_get_match_data(dev); platform_set_drvdata(pdev, cqspi); @@ -1844,14 +1816,22 @@ static int cqspi_probe(struct platform_device *pdev) return -ENODEV; } - /* Obtain QSPI clock. */ - cqspi->clk = devm_clk_get(dev, NULL); - if (IS_ERR(cqspi->clk)) { - dev_err(dev, "Cannot claim QSPI clock.\n"); - ret = PTR_ERR(cqspi->clk); + ret = cqspi_setup_flash(cqspi); + if (ret) { + dev_err(dev, "failed to setup flash parameters %d\n", ret); return ret; } + /* Obtain QSPI clocks. */ + ret = devm_clk_bulk_get_optional(dev, CLK_QSPI_NUM, cqspi->clks); + if (ret) + return dev_err_probe(dev, ret, "Failed to get clocks\n"); + + if (!cqspi->clks[CLK_QSPI_REF].clk) { + dev_err(dev, "Cannot claim mandatory QSPI ref clock.\n"); + return -ENODEV; + } + /* Obtain and remap controller address. */ cqspi->iobase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cqspi->iobase)) { @@ -1881,11 +1861,10 @@ static int cqspi_probe(struct platform_device *pdev) if (ret) return ret; - - ret = clk_prepare_enable(cqspi->clk); + ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks); if (ret) { - dev_err(dev, "Cannot enable QSPI clock.\n"); - goto probe_clk_failed; + dev_err(dev, "Cannot enable QSPI clocks.\n"); + goto disable_rpm; } /* Obtain QSPI reset control */ @@ -1893,22 +1872,22 @@ static int cqspi_probe(struct platform_device *pdev) if (IS_ERR(rstc)) { ret = PTR_ERR(rstc); dev_err(dev, "Cannot get QSPI reset.\n"); - goto probe_reset_failed; + goto disable_clks; } rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp"); if (IS_ERR(rstc_ocp)) { ret = PTR_ERR(rstc_ocp); dev_err(dev, "Cannot get QSPI OCP reset.\n"); - goto probe_reset_failed; + goto disable_clks; } - if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) { + if (cqspi->is_jh7110) { rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref"); if (IS_ERR(rstc_ref)) { ret = PTR_ERR(rstc_ref); dev_err(dev, "Cannot get QSPI REF reset.\n"); - goto probe_reset_failed; + goto disable_clks; } reset_control_assert(rstc_ref); reset_control_deassert(rstc_ref); @@ -1920,8 +1899,13 @@ static int cqspi_probe(struct platform_device *pdev) reset_control_assert(rstc_ocp); reset_control_deassert(rstc_ocp); - cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); - host->max_speed_hz = cqspi->master_ref_clk_hz; + cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clks[CLK_QSPI_REF].clk); + if (!cqspi->is_rzn1) { + host->max_speed_hz = cqspi->master_ref_clk_hz; + } else { + host->max_speed_hz = cqspi->master_ref_clk_hz / 2; + host->min_speed_hz = cqspi->master_ref_clk_hz / 32; + } /* write completion is supported by default */ cqspi->wr_completion = true; @@ -1946,19 +1930,13 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->slow_sram = true; if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR) cqspi->apb_ahb_hazard = true; - - if (ddata->jh7110_clk_init) { - ret = cqspi_jh7110_clk_init(pdev, cqspi); - if (ret) - goto probe_reset_failed; - } if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) cqspi->disable_stig_mode = true; if (ddata->quirks & CQSPI_DMA_SET_MASK) { ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (ret) - goto probe_reset_failed; + goto disable_clks; } } @@ -1969,7 +1947,7 @@ static int cqspi_probe(struct platform_device *pdev) pdev->name, cqspi); if (ret) { dev_err(dev, "Cannot request IRQ.\n"); - goto probe_reset_failed; + goto disable_clks; } cqspi_wait_idle(cqspi); @@ -1987,48 +1965,42 @@ static int cqspi_probe(struct platform_device *pdev) pm_runtime_get_noresume(dev); } - ret = cqspi_setup_flash(cqspi); - if (ret) { - dev_err(dev, "failed to setup flash parameters %d\n", ret); - goto probe_setup_failed; - } - host->num_chipselect = cqspi->num_chipselect; if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)) cqspi_device_reset(cqspi); - if (cqspi->use_direct_mode) { + if (cqspi->use_direct_mode && !cqspi->is_rzn1) { ret = cqspi_request_mmap_dma(cqspi); if (ret == -EPROBE_DEFER) { dev_err_probe(&pdev->dev, ret, "Failed to request mmap DMA\n"); - goto probe_setup_failed; + goto disable_controller; } } ret = spi_register_controller(host); if (ret) { dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret); - goto probe_setup_failed; + goto release_dma_chan; } - if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { - pm_runtime_mark_last_busy(dev); + if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) pm_runtime_put_autosuspend(dev); - } return 0; -probe_setup_failed: + +release_dma_chan: + if (cqspi->rx_chan) + dma_release_channel(cqspi->rx_chan); +disable_controller: + cqspi_controller_enable(cqspi, 0); +disable_clks: + if (pm_runtime_get_sync(&pdev->dev) >= 0) + clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); +disable_rpm: if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) pm_runtime_disable(dev); - cqspi_controller_enable(cqspi, 0); -probe_reset_failed: - if (cqspi->is_jh7110) - cqspi_jh7110_disable_clk(pdev, cqspi); - if (pm_runtime_get_sync(&pdev->dev) >= 0) - clk_disable_unprepare(cqspi->clk); -probe_clk_failed: return ret; } @@ -2037,6 +2009,7 @@ static void cqspi_remove(struct platform_device *pdev) const struct cqspi_driver_platdata *ddata; struct cqspi_st *cqspi = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; + int ret = 0; ddata = of_device_get_match_data(dev); @@ -2046,17 +2019,18 @@ static void cqspi_remove(struct platform_device *pdev) cqspi_wait_idle(cqspi); spi_unregister_controller(cqspi->host); - cqspi_controller_enable(cqspi, 0); if (cqspi->rx_chan) dma_release_channel(cqspi->rx_chan); - if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) - if (pm_runtime_get_sync(&pdev->dev) >= 0) - clk_disable(cqspi->clk); + cqspi_controller_enable(cqspi, 0); - if (cqspi->is_jh7110) - cqspi_jh7110_disable_clk(pdev, cqspi); + + if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) + ret = pm_runtime_get_sync(&pdev->dev); + + if (ret >= 0) + clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { pm_runtime_put_sync(&pdev->dev); @@ -2069,15 +2043,19 @@ static int cqspi_runtime_suspend(struct device *dev) struct cqspi_st *cqspi = dev_get_drvdata(dev); cqspi_controller_enable(cqspi, 0); - clk_disable_unprepare(cqspi->clk); + clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); return 0; } static int cqspi_runtime_resume(struct device *dev) { struct cqspi_st *cqspi = dev_get_drvdata(dev); + int ret; + + ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks); + if (ret) + return ret; - clk_prepare_enable(cqspi->clk); cqspi_wait_idle(cqspi); cqspi_controller_enable(cqspi, 0); cqspi_controller_init(cqspi); @@ -2137,33 +2115,29 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = { }; static const struct cqspi_driver_platdata socfpga_qspi = { - .quirks = CQSPI_DISABLE_DAC_MODE - | CQSPI_NO_SUPPORT_WR_COMPLETION - | CQSPI_SLOW_SRAM - | CQSPI_DISABLE_STIG_MODE - | CQSPI_DISABLE_RUNTIME_PM, + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | + CQSPI_SLOW_SRAM | CQSPI_DISABLE_STIG_MODE | + CQSPI_DISABLE_RUNTIME_PM, }; static const struct cqspi_driver_platdata versal_ospi = { .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, - .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA - | CQSPI_DMA_SET_MASK, + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA | + CQSPI_DMA_SET_MASK, .indirect_read_dma = cqspi_versal_indirect_read_dma, .get_dma_status = cqspi_get_versal_dma_status, }; static const struct cqspi_driver_platdata versal2_ospi = { .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, - .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA - | CQSPI_DMA_SET_MASK - | CQSPI_SUPPORT_DEVICE_RESET, + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA | + CQSPI_DMA_SET_MASK | CQSPI_SUPPORT_DEVICE_RESET, .indirect_read_dma = cqspi_versal_indirect_read_dma, .get_dma_status = cqspi_get_versal_dma_status, }; static const struct cqspi_driver_platdata jh7110_qspi = { .quirks = CQSPI_DISABLE_DAC_MODE, - .jh7110_clk_init = cqspi_jh7110_clk_init, }; static const struct cqspi_driver_platdata pensando_cdns_qspi = { @@ -2173,7 +2147,13 @@ static const struct cqspi_driver_platdata pensando_cdns_qspi = { static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = { .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | - CQSPI_RD_NO_IRQ, + CQSPI_RD_NO_IRQ, +}; + +static const struct cqspi_driver_platdata renesas_rzn1_qspi = { + .hwcaps_mask = CQSPI_SUPPORTS_QUAD, + .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION | CQSPI_RD_NO_IRQ | + CQSPI_HAS_WR_PROTECT | CQSPI_NO_INDIRECT_MODE, }; static const struct of_device_id cqspi_dt_ids[] = { @@ -2217,6 +2197,10 @@ static const struct of_device_id cqspi_dt_ids[] = { .compatible = "amd,versal2-ospi", .data = &versal2_ospi, }, + { + .compatible = "renesas,rzn1-qspi", + .data = &renesas_rzn1_qspi, + }, { /* end of table */ } }; diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c index 6dcba0e0ddaa..895b4b3276a5 100644 --- a/drivers/spi/spi-cadence-xspi.c +++ b/drivers/spi/spi-cadence-xspi.c @@ -2,7 +2,6 @@ // Cadence XSPI flash controller driver // Copyright (C) 2020-21 Cadence -#include #include #include #include @@ -12,15 +11,16 @@ #include #include #include -#include #include #include +#include #include #include #include #include #include #include +#include #define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522 #define CDNS_XSPI_MAX_BANKS 8 @@ -350,6 +350,7 @@ static const int cdns_mrvl_xspi_clk_div_list[] = { struct cdns_xspi_dev { struct platform_device *pdev; + struct spi_controller *host; struct device *dev; void __iomem *iobase; @@ -774,19 +775,15 @@ static int marvell_xspi_mem_op_execute(struct spi_mem *mem, return ret; } -#ifdef CONFIG_ACPI static bool cdns_xspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { struct spi_device *spi = mem->spi; - const union acpi_object *obj; - struct acpi_device *adev; + struct device *dev = &spi->dev; + u32 value; - adev = ACPI_COMPANION(&spi->dev); - - if (!acpi_dev_get_property(adev, "spi-tx-bus-width", ACPI_TYPE_INTEGER, - &obj)) { - switch (obj->integer.value) { + if (!device_property_read_u32(dev, "spi-tx-bus-width", &value)) { + switch (value) { case 1: break; case 2: @@ -799,16 +796,13 @@ static bool cdns_xspi_supports_op(struct spi_mem *mem, spi->mode |= SPI_TX_OCTAL; break; default: - dev_warn(&spi->dev, - "spi-tx-bus-width %lld not supported\n", - obj->integer.value); + dev_warn(dev, "spi-tx-bus-width %u not supported\n", value); break; } } - if (!acpi_dev_get_property(adev, "spi-rx-bus-width", ACPI_TYPE_INTEGER, - &obj)) { - switch (obj->integer.value) { + if (!device_property_read_u32(dev, "spi-rx-bus-width", &value)) { + switch (value) { case 1: break; case 2: @@ -821,9 +815,7 @@ static bool cdns_xspi_supports_op(struct spi_mem *mem, spi->mode |= SPI_RX_OCTAL; break; default: - dev_warn(&spi->dev, - "spi-rx-bus-width %lld not supported\n", - obj->integer.value); + dev_warn(dev, "spi-rx-bus-width %u not supported\n", value); break; } } @@ -833,7 +825,6 @@ static bool cdns_xspi_supports_op(struct spi_mem *mem, return true; } -#endif static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op) { @@ -846,17 +837,13 @@ static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op * } static const struct spi_controller_mem_ops cadence_xspi_mem_ops = { -#ifdef CONFIG_ACPI - .supports_op = cdns_xspi_supports_op, -#endif + .supports_op = PTR_IF(IS_ENABLED(CONFIG_ACPI), cdns_xspi_supports_op), .exec_op = cdns_xspi_mem_op_execute, .adjust_op_size = cdns_xspi_adjust_mem_op_size, }; static const struct spi_controller_mem_ops marvell_xspi_mem_ops = { -#ifdef CONFIG_ACPI - .supports_op = cdns_xspi_supports_op, -#endif + .supports_op = PTR_IF(IS_ENABLED(CONFIG_ACPI), cdns_xspi_supports_op), .exec_op = marvell_xspi_mem_op_execute, .adjust_op_size = cdns_xspi_adjust_mem_op_size, }; @@ -1157,12 +1144,9 @@ static int cdns_xspi_probe(struct platform_device *pdev) SPI_MODE_0 | SPI_MODE_3; cdns_xspi = spi_controller_get_devdata(host); - cdns_xspi->driver_data = of_device_get_match_data(dev); - if (!cdns_xspi->driver_data) { - cdns_xspi->driver_data = acpi_device_get_match_data(dev); - if (!cdns_xspi->driver_data) - return -ENODEV; - } + cdns_xspi->driver_data = device_get_match_data(dev); + if (!cdns_xspi->driver_data) + return -ENODEV; if (cdns_xspi->driver_data->mrvl_hw_overlay) { host->mem_ops = &marvell_xspi_mem_ops; @@ -1174,12 +1158,12 @@ static int cdns_xspi_probe(struct platform_device *pdev) cdns_xspi->sdma_handler = &cdns_xspi_sdma_handle; cdns_xspi->set_interrupts_handler = &cdns_xspi_set_interrupts; } - host->dev.of_node = pdev->dev.of_node; host->bus_num = -1; - platform_set_drvdata(pdev, host); + platform_set_drvdata(pdev, cdns_xspi); cdns_xspi->pdev = pdev; + cdns_xspi->host = host; cdns_xspi->dev = &pdev->dev; cdns_xspi->cur_cs = 0; @@ -1268,6 +1252,30 @@ static int cdns_xspi_probe(struct platform_device *pdev) return 0; } +static int cdns_xspi_suspend(struct device *dev) +{ + struct cdns_xspi_dev *cdns_xspi = dev_get_drvdata(dev); + + return spi_controller_suspend(cdns_xspi->host); +} + +static int cdns_xspi_resume(struct device *dev) +{ + struct cdns_xspi_dev *cdns_xspi = dev_get_drvdata(dev); + + if (cdns_xspi->driver_data->mrvl_hw_overlay) { + cdns_mrvl_xspi_setup_clock(cdns_xspi, MRVL_DEFAULT_CLK); + cdns_xspi_configure_phy(cdns_xspi); + } + + cdns_xspi->set_interrupts_handler(cdns_xspi, false); + + return spi_controller_resume(cdns_xspi->host); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(cdns_xspi_pm_ops, + cdns_xspi_suspend, cdns_xspi_resume); + static const struct of_device_id cdns_xspi_of_match[] = { { .compatible = "cdns,xspi-nor", @@ -1286,6 +1294,7 @@ static struct platform_driver cdns_xspi_platform_driver = { .driver = { .name = CDNS_XSPI_NAME, .of_match_table = cdns_xspi_of_match, + .pm = pm_sleep_ptr(&cdns_xspi_pm_ops), }, }; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 41b5b58cbfac..caa7a57e6d27 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -651,7 +651,6 @@ static int cdns_spi_probe(struct platform_device *pdev) return -ENOMEM; xspi = spi_controller_get_devdata(ctlr); - ctlr->dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, ctlr); xspi->regs = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/spi/spi-cavium-octeon.c b/drivers/spi/spi-cavium-octeon.c index a5ad90d66ec0..155085a053a1 100644 --- a/drivers/spi/spi-cavium-octeon.c +++ b/drivers/spi/spi-cavium-octeon.c @@ -54,7 +54,6 @@ static int octeon_spi_probe(struct platform_device *pdev) host->bits_per_word_mask = SPI_BPW_MASK(8); host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ; - host->dev.of_node = pdev->dev.of_node; err = devm_spi_register_controller(&pdev->dev, host); if (err) { dev_err(&pdev->dev, "register host failed: %d\n", err); diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c index 367ae7120bb3..99aac40a1bba 100644 --- a/drivers/spi/spi-cavium-thunderx.c +++ b/drivers/spi/spi-cavium-thunderx.c @@ -67,7 +67,6 @@ static int thunderx_spi_probe(struct pci_dev *pdev, host->transfer_one_message = octeon_spi_transfer_one_message; host->bits_per_word_mask = SPI_BPW_MASK(8); host->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ; - host->dev.of_node = pdev->dev.of_node; pci_set_drvdata(pdev, host); diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c index 5552ccd716fc..d6458e59d41b 100644 --- a/drivers/spi/spi-clps711x.c +++ b/drivers/spi/spi-clps711x.c @@ -107,7 +107,6 @@ static int spi_clps711x_probe(struct platform_device *pdev) host->bus_num = -1; host->mode_bits = SPI_CPHA | SPI_CS_HIGH; host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8); - host->dev.of_node = pdev->dev.of_node; host->prepare_message = spi_clps711x_prepare_message; host->transfer_one = spi_clps711x_transfer_one; diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index 4b6b65f450a8..a4a650c8d740 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -371,6 +371,14 @@ static int cs42l43_spi_probe(struct platform_device *pdev) fwnode_property_read_u32(xu_fwnode, "01fa-sidecar-instances", &nsidecars); + /* + * Depending on the value of nsidecars we either create a software node + * or assign an fwnode. We don't want software node to be attached to + * the default one. That's why we need to clear the SPI controller fwnode + * first. + */ + device_set_node(&priv->ctlr->dev, NULL); + if (nsidecars) { struct software_node_ref_args args[] = { SOFTWARE_NODE_REFERENCE(fwnode, 0, GPIO_ACTIVE_LOW), diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 21a14e800eed..d680142a059f 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -988,7 +988,6 @@ static int davinci_spi_probe(struct platform_device *pdev) } host->use_gpio_descriptors = true; - host->dev.of_node = pdev->dev.of_node; host->bus_num = pdev->id; host->num_chipselect = pdata->num_chipselect; host->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16); diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c index 2013bc56ded8..d90282960ab6 100644 --- a/drivers/spi/spi-dln2.c +++ b/drivers/spi/spi-dln2.c @@ -682,15 +682,12 @@ static int dln2_spi_probe(struct platform_device *pdev) struct spi_controller *host; struct dln2_spi *dln2; struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct device *dev = &pdev->dev; int ret; host = spi_alloc_host(&pdev->dev, sizeof(*dln2)); if (!host) return -ENOMEM; - device_set_node(&host->dev, dev_fwnode(dev)); - platform_set_drvdata(pdev, host); dln2 = spi_controller_get_devdata(host); diff --git a/drivers/spi/spi-dw-bt1.c b/drivers/spi/spi-dw-bt1.c deleted file mode 100644 index 91642e05ac60..000000000000 --- a/drivers/spi/spi-dw-bt1.c +++ /dev/null @@ -1,331 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -// -// Copyright (C) 2020 BAIKAL ELECTRONICS, JSC -// -// Authors: -// Ramil Zaripov -// Serge Semin -// -// Baikal-T1 DW APB SPI and System Boot SPI driver -// - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "spi-dw.h" - -#define BT1_BOOT_DIRMAP 0 -#define BT1_BOOT_REGS 1 - -struct dw_spi_bt1 { - struct dw_spi dws; - struct clk *clk; - struct mux_control *mux; - -#ifdef CONFIG_SPI_DW_BT1_DIRMAP - void __iomem *map; - resource_size_t map_len; -#endif -}; -#define to_dw_spi_bt1(_ctlr) \ - container_of(spi_controller_get_devdata(_ctlr), struct dw_spi_bt1, dws) - -typedef int (*dw_spi_bt1_init_cb)(struct platform_device *pdev, - struct dw_spi_bt1 *dwsbt1); - -#ifdef CONFIG_SPI_DW_BT1_DIRMAP - -static int dw_spi_bt1_dirmap_create(struct spi_mem_dirmap_desc *desc) -{ - struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller); - - if (!dwsbt1->map || - !dwsbt1->dws.mem_ops.supports_op(desc->mem, &desc->info.op_tmpl)) - return -EOPNOTSUPP; - - if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) - return -EOPNOTSUPP; - - /* - * Make sure the requested region doesn't go out of the physically - * mapped flash memory bounds. - */ - if (desc->info.offset + desc->info.length > dwsbt1->map_len) - return -EINVAL; - - return 0; -} - -/* - * Directly mapped SPI memory region is only accessible in the dword chunks. - * That's why we have to create a dedicated read-method to copy data from there - * to the passed buffer. - */ -static void dw_spi_bt1_dirmap_copy_from_map(void *to, void __iomem *from, size_t len) -{ - size_t shift, chunk; - u32 data; - - /* - * We split the copying up into the next three stages: unaligned head, - * aligned body, unaligned tail. - */ - shift = (size_t)from & 0x3; - if (shift) { - chunk = min_t(size_t, 4 - shift, len); - data = readl_relaxed(from - shift); - memcpy(to, (char *)&data + shift, chunk); - from += chunk; - to += chunk; - len -= chunk; - } - - while (len >= 4) { - data = readl_relaxed(from); - memcpy(to, &data, 4); - from += 4; - to += 4; - len -= 4; - } - - if (len) { - data = readl_relaxed(from); - memcpy(to, &data, len); - } -} - -static ssize_t dw_spi_bt1_dirmap_read(struct spi_mem_dirmap_desc *desc, - u64 offs, size_t len, void *buf) -{ - struct dw_spi_bt1 *dwsbt1 = to_dw_spi_bt1(desc->mem->spi->controller); - struct dw_spi *dws = &dwsbt1->dws; - struct spi_mem *mem = desc->mem; - struct dw_spi_cfg cfg; - int ret; - - /* - * Make sure the requested operation length is valid. Truncate the - * length if it's greater than the length of the MMIO region. - */ - if (offs >= dwsbt1->map_len || !len) - return 0; - - len = min_t(size_t, len, dwsbt1->map_len - offs); - - /* Collect the controller configuration required by the operation */ - cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD; - cfg.dfs = 8; - cfg.ndf = 4; - cfg.freq = mem->spi->max_speed_hz; - - /* Make sure the corresponding CS is de-asserted on transmission */ - dw_spi_set_cs(mem->spi, false); - - dw_spi_enable_chip(dws, 0); - - dw_spi_update_config(dws, mem->spi, &cfg); - - dw_spi_umask_intr(dws, DW_SPI_INT_RXFI); - - dw_spi_enable_chip(dws, 1); - - /* - * Enable the transparent mode of the System Boot Controller. - * The SPI core IO should have been locked before calling this method - * so noone would be touching the controller' registers during the - * dirmap operation. - */ - ret = mux_control_select(dwsbt1->mux, BT1_BOOT_DIRMAP); - if (ret) - return ret; - - dw_spi_bt1_dirmap_copy_from_map(buf, dwsbt1->map + offs, len); - - mux_control_deselect(dwsbt1->mux); - - dw_spi_set_cs(mem->spi, true); - - ret = dw_spi_check_status(dws, true); - - return ret ?: len; -} - -#endif /* CONFIG_SPI_DW_BT1_DIRMAP */ - -static int dw_spi_bt1_std_init(struct platform_device *pdev, - struct dw_spi_bt1 *dwsbt1) -{ - struct dw_spi *dws = &dwsbt1->dws; - - dws->irq = platform_get_irq(pdev, 0); - if (dws->irq < 0) - return dws->irq; - - dws->num_cs = 4; - - /* - * Baikal-T1 Normal SPI Controllers don't always keep up with full SPI - * bus speed especially when it comes to the concurrent access to the - * APB bus resources. Thus we have no choice but to set a constraint on - * the SPI bus frequency for the memory operations which require to - * read/write data as fast as possible. - */ - dws->max_mem_freq = 20000000U; - - dw_spi_dma_setup_generic(dws); - - return 0; -} - -static int dw_spi_bt1_sys_init(struct platform_device *pdev, - struct dw_spi_bt1 *dwsbt1) -{ - struct resource *mem __maybe_unused; - struct dw_spi *dws = &dwsbt1->dws; - - /* - * Baikal-T1 System Boot Controller is equipped with a mux, which - * switches between the directly mapped SPI flash access mode and - * IO access to the DW APB SSI registers. Note the mux controller - * must be setup to preserve the registers being accessible by default - * (on idle-state). - */ - dwsbt1->mux = devm_mux_control_get(&pdev->dev, NULL); - if (IS_ERR(dwsbt1->mux)) - return PTR_ERR(dwsbt1->mux); - - /* - * Directly mapped SPI flash memory is a 16MB MMIO region, which can be - * used to access a peripheral memory device just by reading/writing - * data from/to it. Note the system APB bus will stall during each IO - * from/to the dirmap region until the operation is finished. So don't - * use it concurrently with time-critical tasks (like the SPI memory - * operations implemented in the DW APB SSI driver). - */ -#ifdef CONFIG_SPI_DW_BT1_DIRMAP - mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (mem) { - dwsbt1->map = devm_ioremap_resource(&pdev->dev, mem); - if (!IS_ERR(dwsbt1->map)) { - dwsbt1->map_len = resource_size(mem); - dws->mem_ops.dirmap_create = dw_spi_bt1_dirmap_create; - dws->mem_ops.dirmap_read = dw_spi_bt1_dirmap_read; - } else { - dwsbt1->map = NULL; - } - } -#endif /* CONFIG_SPI_DW_BT1_DIRMAP */ - - /* - * There is no IRQ, no DMA and just one CS available on the System Boot - * SPI controller. - */ - dws->irq = IRQ_NOTCONNECTED; - dws->num_cs = 1; - - /* - * Baikal-T1 System Boot SPI Controller doesn't keep up with the full - * SPI bus speed due to relatively slow APB bus and races for it' - * resources from different CPUs. The situation is worsen by a small - * FIFOs depth (just 8 words). It works better in a single CPU mode - * though, but still tends to be not fast enough at low CPU - * frequencies. - */ - if (num_possible_cpus() > 1) - dws->max_mem_freq = 10000000U; - else - dws->max_mem_freq = 20000000U; - - return 0; -} - -static int dw_spi_bt1_probe(struct platform_device *pdev) -{ - dw_spi_bt1_init_cb init_func; - struct dw_spi_bt1 *dwsbt1; - struct resource *mem; - struct dw_spi *dws; - int ret; - - dwsbt1 = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_bt1), GFP_KERNEL); - if (!dwsbt1) - return -ENOMEM; - - dws = &dwsbt1->dws; - - dws->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); - if (IS_ERR(dws->regs)) - return PTR_ERR(dws->regs); - - dws->paddr = mem->start; - - dwsbt1->clk = devm_clk_get_enabled(&pdev->dev, NULL); - if (IS_ERR(dwsbt1->clk)) - return PTR_ERR(dwsbt1->clk); - - dws->bus_num = pdev->id; - dws->reg_io_width = 4; - dws->max_freq = clk_get_rate(dwsbt1->clk); - if (!dws->max_freq) - return -EINVAL; - - init_func = device_get_match_data(&pdev->dev); - ret = init_func(pdev, dwsbt1); - if (ret) - return ret; - - pm_runtime_enable(&pdev->dev); - - ret = dw_spi_add_controller(&pdev->dev, dws); - if (ret) { - pm_runtime_disable(&pdev->dev); - return ret; - } - - platform_set_drvdata(pdev, dwsbt1); - - return 0; -} - -static void dw_spi_bt1_remove(struct platform_device *pdev) -{ - struct dw_spi_bt1 *dwsbt1 = platform_get_drvdata(pdev); - - dw_spi_remove_controller(&dwsbt1->dws); - - pm_runtime_disable(&pdev->dev); -} - -static const struct of_device_id dw_spi_bt1_of_match[] = { - { .compatible = "baikal,bt1-ssi", .data = dw_spi_bt1_std_init}, - { .compatible = "baikal,bt1-sys-ssi", .data = dw_spi_bt1_sys_init}, - { } -}; -MODULE_DEVICE_TABLE(of, dw_spi_bt1_of_match); - -static struct platform_driver dw_spi_bt1_driver = { - .probe = dw_spi_bt1_probe, - .remove = dw_spi_bt1_remove, - .driver = { - .name = "bt1-sys-ssi", - .of_match_table = dw_spi_bt1_of_match, - }, -}; -module_platform_driver(dw_spi_bt1_driver); - -MODULE_AUTHOR("Serge Semin "); -MODULE_DESCRIPTION("Baikal-T1 System Boot SPI Controller driver"); -MODULE_LICENSE("GPL v2"); -MODULE_IMPORT_NS("SPI_DW_CORE"); diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index 9ebf244294f8..0d59c141beb0 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -936,8 +936,6 @@ int dw_spi_add_controller(struct device *dev, struct dw_spi *dws) if (!ctlr) return -ENOMEM; - device_set_node(&ctlr->dev, dev_fwnode(dev)); - dws->ctlr = ctlr; dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR); diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index 33239b4778cb..1bfdf24c3227 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -104,10 +104,8 @@ static int dw_spi_mscc_init(struct platform_device *pdev, return -ENOMEM; dwsmscc->spi_mst = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(dwsmscc->spi_mst)) { - dev_err(&pdev->dev, "SPI_MST region map failed\n"); + if (IS_ERR(dwsmscc->spi_mst)) return PTR_ERR(dwsmscc->spi_mst); - } dwsmscc->syscon = syscon_regmap_lookup_by_compatible(cpu_syscon); if (IS_ERR(dwsmscc->syscon)) @@ -392,6 +390,38 @@ out_reset: return ret; } +static int dw_spi_mmio_suspend(struct device *dev) +{ + struct dw_spi_mmio *dwsmmio = dev_get_drvdata(dev); + int ret; + + ret = dw_spi_suspend_controller(&dwsmmio->dws); + if (ret) + return ret; + + reset_control_assert(dwsmmio->rstc); + + clk_disable_unprepare(dwsmmio->pclk); + clk_disable_unprepare(dwsmmio->clk); + + return 0; +} + +static int dw_spi_mmio_resume(struct device *dev) +{ + struct dw_spi_mmio *dwsmmio = dev_get_drvdata(dev); + + clk_prepare_enable(dwsmmio->clk); + clk_prepare_enable(dwsmmio->pclk); + + reset_control_deassert(dwsmmio->rstc); + + return dw_spi_resume_controller(&dwsmmio->dws); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(dw_spi_mmio_pm_ops, + dw_spi_mmio_suspend, dw_spi_mmio_resume); + static void dw_spi_mmio_remove(struct platform_device *pdev) { struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); @@ -435,6 +465,7 @@ static struct platform_driver dw_spi_mmio_driver = { .name = DRIVER_NAME, .of_match_table = dw_spi_mmio_of_match, .acpi_match_table = ACPI_PTR(dw_spi_mmio_acpi_match), + .pm = pm_sleep_ptr(&dw_spi_mmio_pm_ops), }, }; module_platform_driver(dw_spi_mmio_driver); diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index e1d097091925..90d5f3ea6508 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -689,7 +689,6 @@ static int ep93xx_spi_probe(struct platform_device *pdev) /* make sure that the hardware is disabled */ writel(0, espi->mmio + SSPCR1); - device_set_node(&host->dev, dev_fwnode(&pdev->dev)); error = devm_spi_register_controller(&pdev->dev, host); if (error) { dev_err(&pdev->dev, "failed to register SPI host\n"); diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c index faa893f83dc5..cb15faabd88f 100644 --- a/drivers/spi/spi-falcon.c +++ b/drivers/spi/spi-falcon.c @@ -405,7 +405,6 @@ static int falcon_sflash_probe(struct platform_device *pdev) host->flags = SPI_CONTROLLER_HALF_DUPLEX; host->setup = falcon_sflash_setup; host->transfer_one_message = falcon_sflash_xfer_one; - host->dev.of_node = pdev->dev.of_node; ret = devm_spi_register_controller(&pdev->dev, host); if (ret) diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c index e01c63d23b64..68276d195917 100644 --- a/drivers/spi/spi-fsi.c +++ b/drivers/spi/spi-fsi.c @@ -531,7 +531,6 @@ static size_t fsi_spi_max_transfer_size(struct spi_device *spi) static int fsi_spi_probe(struct device *dev) { int rc; - struct device_node *np; int num_controllers_registered = 0; struct fsi2spi *bridge; struct fsi_device *fsi = to_fsi_dev(dev); @@ -547,7 +546,7 @@ static int fsi_spi_probe(struct device *dev) bridge->fsi = fsi; mutex_init(&bridge->lock); - for_each_available_child_of_node(dev->of_node, np) { + for_each_available_child_of_node_scoped(dev->of_node, np) { u32 base; struct fsi_spi *ctx; struct spi_controller *ctlr; @@ -556,10 +555,8 @@ static int fsi_spi_probe(struct device *dev) continue; ctlr = spi_alloc_host(dev, sizeof(*ctx)); - if (!ctlr) { - of_node_put(np); + if (!ctlr) break; - } ctlr->dev.of_node = np; ctlr->num_chipselect = of_get_available_child_count(np) ?: 1; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 83ea296597e9..76f142a54254 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1555,7 +1555,6 @@ static int dspi_probe(struct platform_device *pdev) ctlr->setup = dspi_setup; ctlr->transfer_one_message = dspi_transfer_one_message; - ctlr->dev.of_node = pdev->dev.of_node; ctlr->cleanup = dspi_cleanup; ctlr->target_abort = dspi_target_abort; diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index f2f1d3298e6c..b06555a457f8 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -675,7 +675,6 @@ static int fsl_espi_probe(struct device *dev, struct resource *mem, host->mode_bits = SPI_RX_DUAL | SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_LOOP; - host->dev.of_node = dev->of_node; host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); host->setup = fsl_espi_setup; host->cleanup = fsl_espi_cleanup; diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c index bb7a625db5b0..1f8960f15483 100644 --- a/drivers/spi/spi-fsl-lib.c +++ b/drivers/spi/spi-fsl-lib.c @@ -91,7 +91,6 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem, ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_LOOP; - ctlr->dev.of_node = dev->of_node; mpc8xxx_spi = spi_controller_get_devdata(ctlr); mpc8xxx_spi->dev = dev; diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 065456aba2ae..b361c1bb3e43 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -281,7 +281,8 @@ static void fsl_lpspi_read_rx_fifo(struct fsl_lpspi_data *fsl_lpspi) fsl_lpspi->rx(fsl_lpspi); } -static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi) +static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi, + struct spi_device *spi) { u32 temp = 0; @@ -303,6 +304,13 @@ static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi) temp |= TCR_CONTC; } } + + if (spi->mode & SPI_CPOL) + temp |= TCR_CPOL; + + if (spi->mode & SPI_CPHA) + temp |= TCR_CPHA; + writel(temp, fsl_lpspi->base + IMX7ULP_TCR); dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp); @@ -486,22 +494,47 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller, fsl_lpspi->tx = fsl_lpspi_buf_tx_u32; } - /* - * t->len is 'unsigned' and txfifosize and watermrk is 'u8', force - * type cast is inevitable. When len > 255, len will be truncated in min_t(), - * it caused wrong watermark set. 'unsigned int' is as the designated type - * for min_t() to avoid truncation. - */ - fsl_lpspi->watermark = min_t(unsigned int, - fsl_lpspi->txfifosize, - t->len); + fsl_lpspi->watermark = min(fsl_lpspi->txfifosize, t->len); + + return fsl_lpspi_config(fsl_lpspi); +} + +static int fsl_lpspi_prepare_message(struct spi_controller *controller, + struct spi_message *msg) +{ + struct fsl_lpspi_data *fsl_lpspi = + spi_controller_get_devdata(controller); + struct spi_device *spi = msg->spi; + struct spi_transfer *t; + int ret; + + t = list_first_entry_or_null(&msg->transfers, struct spi_transfer, + transfer_list); + if (!t) + return 0; + + fsl_lpspi->is_first_byte = true; + fsl_lpspi->usedma = false; + ret = fsl_lpspi_setup_transfer(controller, spi, t); if (fsl_lpspi_can_dma(controller, spi, t)) fsl_lpspi->usedma = true; else fsl_lpspi->usedma = false; - return fsl_lpspi_config(fsl_lpspi); + if (ret < 0) + return ret; + + fsl_lpspi_set_cmd(fsl_lpspi, spi); + + /* No IRQs */ + writel(0, fsl_lpspi->base + IMX7ULP_IER); + + /* Controller disable, clear FIFOs, clear status */ + writel(CR_RRF | CR_RTF, fsl_lpspi->base + IMX7ULP_CR); + writel(SR_CLEAR_MASK, fsl_lpspi->base + IMX7ULP_SR); + + return 0; } static int fsl_lpspi_target_abort(struct spi_controller *controller) @@ -761,14 +794,18 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller, spi_controller_get_devdata(controller); int ret; - fsl_lpspi->is_first_byte = true; + if (fsl_lpspi_can_dma(controller, spi, t)) + fsl_lpspi->usedma = true; + else + fsl_lpspi->usedma = false; + ret = fsl_lpspi_setup_transfer(controller, spi, t); if (ret < 0) return ret; t->effective_speed_hz = fsl_lpspi->config.effective_speed_hz; - fsl_lpspi_set_cmd(fsl_lpspi); + fsl_lpspi_set_cmd(fsl_lpspi, spi); fsl_lpspi->is_first_byte = false; if (fsl_lpspi->usedma) @@ -952,12 +989,12 @@ static int fsl_lpspi_probe(struct platform_device *pdev) } controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); + controller->prepare_message = fsl_lpspi_prepare_message; controller->transfer_one = fsl_lpspi_transfer_one; controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; - controller->dev.of_node = pdev->dev.of_node; controller->bus_num = pdev->id; controller->num_chipselect = num_cs; controller->target_abort = fsl_lpspi_target_abort; diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index a0d8d3425c6c..43ce47f2454c 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -82,6 +82,7 @@ struct spi_geni_master { u32 fifo_width_bits; u32 tx_wm; u32 last_mode; + u8 last_cs; unsigned long cur_speed_hz; unsigned long cur_sclk_hz; unsigned int cur_bits_per_word; @@ -145,8 +146,7 @@ static int get_spi_clk_cfg(unsigned int speed_hz, return ret; } -static void handle_se_timeout(struct spi_controller *spi, - struct spi_message *msg) +static void handle_se_timeout(struct spi_controller *spi) { struct spi_geni_master *mas = spi_controller_get_devdata(spi); unsigned long time_left; @@ -160,24 +160,20 @@ static void handle_se_timeout(struct spi_controller *spi, xfer = mas->cur_xfer; mas->cur_xfer = NULL; - if (spi->target) { - /* - * skip CMD Cancel sequnece since spi target - * doesn`t support CMD Cancel sequnece - */ + /* The controller doesn't support the Cancel commnand in target mode */ + if (!spi->target) { + reinit_completion(&mas->cancel_done); + geni_se_cancel_m_cmd(se); + spin_unlock_irq(&mas->lock); - goto reset_if_dma; + + time_left = wait_for_completion_timeout(&mas->cancel_done, HZ); + if (time_left) + goto reset_if_dma; + + spin_lock_irq(&mas->lock); } - reinit_completion(&mas->cancel_done); - geni_se_cancel_m_cmd(se); - spin_unlock_irq(&mas->lock); - - time_left = wait_for_completion_timeout(&mas->cancel_done, HZ); - if (time_left) - goto reset_if_dma; - - spin_lock_irq(&mas->lock); reinit_completion(&mas->abort_done); geni_se_abort_m_cmd(se); spin_unlock_irq(&mas->lock); @@ -225,7 +221,7 @@ reset_if_dma: } } -static void handle_gpi_timeout(struct spi_controller *spi, struct spi_message *msg) +static void handle_gpi_timeout(struct spi_controller *spi) { struct spi_geni_master *mas = spi_controller_get_devdata(spi); @@ -240,10 +236,10 @@ static void spi_geni_handle_err(struct spi_controller *spi, struct spi_message * switch (mas->cur_xfer_mode) { case GENI_SE_FIFO: case GENI_SE_DMA: - handle_se_timeout(spi, msg); + handle_se_timeout(spi); break; case GENI_GPI_DMA: - handle_gpi_timeout(spi, msg); + handle_gpi_timeout(spi); break; default: dev_err(mas->dev, "Abort on Mode:%d not supported", mas->cur_xfer_mode); @@ -284,55 +280,6 @@ static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas) return false; } -static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) -{ - struct spi_geni_master *mas = spi_controller_get_devdata(slv->controller); - struct spi_controller *spi = dev_get_drvdata(mas->dev); - struct geni_se *se = &mas->se; - unsigned long time_left; - - if (!(slv->mode & SPI_CS_HIGH)) - set_flag = !set_flag; - - if (set_flag == mas->cs_flag) - return; - - pm_runtime_get_sync(mas->dev); - - if (spi_geni_is_abort_still_pending(mas)) { - dev_err(mas->dev, "Can't set chip select\n"); - goto exit; - } - - spin_lock_irq(&mas->lock); - if (mas->cur_xfer) { - dev_err(mas->dev, "Can't set CS when prev xfer running\n"); - spin_unlock_irq(&mas->lock); - goto exit; - } - - mas->cs_flag = set_flag; - /* set xfer_mode to FIFO to complete cs_done in isr */ - mas->cur_xfer_mode = GENI_SE_FIFO; - geni_se_select_mode(se, mas->cur_xfer_mode); - - reinit_completion(&mas->cs_done); - if (set_flag) - geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0); - else - geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0); - spin_unlock_irq(&mas->lock); - - time_left = wait_for_completion_timeout(&mas->cs_done, HZ); - if (!time_left) { - dev_warn(mas->dev, "Timeout setting chip select\n"); - handle_se_timeout(spi, NULL); - } - -exit: - pm_runtime_put(mas->dev); -} - static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode, unsigned int bits_per_word) { @@ -399,36 +346,27 @@ static int setup_fifo_params(struct spi_device *spi_slv, { struct spi_geni_master *mas = spi_controller_get_devdata(spi); struct geni_se *se = &mas->se; - u32 loopback_cfg = 0, cpol = 0, cpha = 0, demux_output_inv = 0; - u32 demux_sel; + u8 chipselect = spi_get_chipselect(spi_slv, 0); + bool cs_changed = (mas->last_cs != chipselect); + u32 mode_changed = mas->last_mode ^ spi_slv->mode; - if (mas->last_mode != spi_slv->mode) { - if (spi_slv->mode & SPI_LOOP) - loopback_cfg = LOOPBACK_ENABLE; + mas->last_cs = chipselect; + mas->last_mode = spi_slv->mode; - if (spi_slv->mode & SPI_CPOL) - cpol = CPOL; + if (mode_changed & SPI_LSB_FIRST) + mas->cur_bits_per_word = 0; /* force next setup_se_xfer to call spi_setup_word_len */ + if (mode_changed & SPI_LOOP) + writel((spi_slv->mode & SPI_LOOP) ? LOOPBACK_ENABLE : 0, se->base + SE_SPI_LOOPBACK); + if (cs_changed) + writel(chipselect, se->base + SE_SPI_DEMUX_SEL); + if (mode_changed & SE_SPI_CPHA) + writel((spi_slv->mode & SPI_CPHA) ? CPHA : 0, se->base + SE_SPI_CPHA); + if (mode_changed & SE_SPI_CPOL) + writel((spi_slv->mode & SPI_CPOL) ? CPOL : 0, se->base + SE_SPI_CPOL); + if ((mode_changed & SPI_CS_HIGH) || (cs_changed && (spi_slv->mode & SPI_CS_HIGH))) + writel((spi_slv->mode & SPI_CS_HIGH) ? BIT(chipselect) : 0, se->base + SE_SPI_DEMUX_OUTPUT_INV); - if (spi_slv->mode & SPI_CPHA) - cpha = CPHA; - - if (spi_slv->mode & SPI_CS_HIGH) - demux_output_inv = BIT(spi_get_chipselect(spi_slv, 0)); - - demux_sel = spi_get_chipselect(spi_slv, 0); - mas->cur_bits_per_word = spi_slv->bits_per_word; - - spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word); - writel(loopback_cfg, se->base + SE_SPI_LOOPBACK); - writel(demux_sel, se->base + SE_SPI_DEMUX_SEL); - writel(cpha, se->base + SE_SPI_CPHA); - writel(cpol, se->base + SE_SPI_CPOL); - writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV); - - mas->last_mode = spi_slv->mode; - } - - return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz); + return 0; } static void @@ -548,10 +486,10 @@ static u32 get_xfer_len_in_words(struct spi_transfer *xfer, { u32 len; - if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) - len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word; + if (!(xfer->bits_per_word % MIN_WORD_LEN)) + len = xfer->len * BITS_PER_BYTE / xfer->bits_per_word; else - len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1); + len = xfer->len / (xfer->bits_per_word / BITS_PER_BYTE + 1); len &= TRANS_LEN_MSK; return len; @@ -571,7 +509,7 @@ static bool geni_can_dma(struct spi_controller *ctlr, return true; len = get_xfer_len_in_words(xfer, mas); - fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word; + fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / xfer->bits_per_word; if (len > fifo_size) return true; @@ -724,11 +662,17 @@ static int spi_geni_init(struct spi_geni_master *mas) case 0: mas->cur_xfer_mode = GENI_SE_FIFO; geni_se_select_mode(se, GENI_SE_FIFO); + /* setup_fifo_params assumes that these registers start with a zero value */ + writel(0, se->base + SE_SPI_LOOPBACK); + writel(0, se->base + SE_SPI_DEMUX_SEL); + writel(0, se->base + SE_SPI_CPHA); + writel(0, se->base + SE_SPI_CPOL); + writel(0, se->base + SE_SPI_DEMUX_OUTPUT_INV); ret = 0; break; } - /* We always control CS manually */ + /* We never control CS manually */ if (!spi->target) { spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG); spi_tx_cfg &= ~CS_TOGGLE; @@ -841,6 +785,7 @@ static int setup_se_xfer(struct spi_transfer *xfer, u16 mode, struct spi_controller *spi) { u32 m_cmd = 0; + u32 m_params = 0; u32 len; struct geni_se *se = &mas->se; int ret; @@ -904,12 +849,17 @@ static int setup_se_xfer(struct spi_transfer *xfer, mas->cur_xfer_mode = GENI_SE_DMA; geni_se_select_mode(se, mas->cur_xfer_mode); + if (!xfer->cs_change) { + if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers)) + m_params = FRAGMENTATION; + } + /* * Lock around right before we start the transfer since our * interrupt could come in at any time now. */ spin_lock_irq(&mas->lock); - geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION); + geni_se_setup_m_cmd(se, m_cmd, m_params); if (mas->cur_xfer_mode == GENI_SE_DMA) { if (m_cmd & SPI_RX_ONLY) @@ -1053,6 +1003,17 @@ static irqreturn_t geni_spi_isr(int irq, void *data) return IRQ_HANDLED; } +static int spi_geni_target_abort(struct spi_controller *spi) +{ + if (!spi->cur_msg) + return 0; + + handle_se_timeout(spi); + spi_finalize_current_transfer(spi); + + return 0; +} + static int spi_geni_probe(struct platform_device *pdev) { int ret, irq; @@ -1078,7 +1039,11 @@ static int spi_geni_probe(struct platform_device *pdev) if (IS_ERR(clk)) return PTR_ERR(clk); - spi = devm_spi_alloc_host(dev, sizeof(*mas)); + if (device_property_read_bool(dev, "spi-slave")) + spi = devm_spi_alloc_target(dev, sizeof(*mas)); + else + spi = devm_spi_alloc_host(dev, sizeof(*mas)); + if (!spi) return -ENOMEM; @@ -1102,7 +1067,6 @@ static int spi_geni_probe(struct platform_device *pdev) } spi->bus_num = -1; - spi->dev.of_node = dev->of_node; spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH; spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); spi->num_chipselect = 4; @@ -1123,6 +1087,9 @@ static int spi_geni_probe(struct platform_device *pdev) init_completion(&mas->rx_reset_done); spin_lock_init(&mas->lock); + if (spi->target) + spi->target_abort = spi_geni_target_abort; + ret = geni_icc_get(&mas->se, NULL); if (ret) return ret; @@ -1133,9 +1100,6 @@ static int spi_geni_probe(struct platform_device *pdev) if (ret) return ret; - if (device_property_read_bool(&pdev->dev, "spi-slave")) - spi->target = true; - /* Set the bus quota to a reasonable value for register access */ mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ); mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW; @@ -1148,14 +1112,6 @@ static int spi_geni_probe(struct platform_device *pdev) if (ret) return ret; - /* - * check the mode supported and set_cs for fifo mode only - * for dma (gsi) mode, the gsi will set cs based on params passed in - * TRE - */ - if (!spi->target && mas->cur_xfer_mode == GENI_SE_FIFO) - spi->set_cs = spi_geni_set_cs; - /* * TX is required per GSI spec, see setup_gsi_xfer(). */ diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index c8dadb532c40..072127a38fad 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -351,7 +351,6 @@ static int spi_gpio_probe(struct platform_device *pdev) return -ENOMEM; if (fwnode) { - device_set_node(&host->dev, fwnode); host->use_gpio_descriptors = true; } else { status = spi_gpio_probe_pdata(pdev, host); diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c index 3aff5a166c94..97d2420108c0 100644 --- a/drivers/spi/spi-gxp.c +++ b/drivers/spi/spi-gxp.c @@ -284,7 +284,6 @@ static int gxp_spifi_probe(struct platform_device *pdev) ctlr->mem_ops = &gxp_spi_mem_ops; ctlr->setup = gxp_spi_setup; ctlr->num_chipselect = data->max_cs; - ctlr->dev.of_node = dev->of_node; ret = devm_spi_register_controller(dev, ctlr); if (ret) { diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c index 80a1a15de0bc..f123cdab9007 100644 --- a/drivers/spi/spi-hisi-kunpeng.c +++ b/drivers/spi/spi-hisi-kunpeng.c @@ -495,7 +495,6 @@ static int hisi_spi_probe(struct platform_device *pdev) host->cleanup = hisi_spi_cleanup; host->transfer_one = hisi_spi_transfer_one; host->handle_err = hisi_spi_handle_err; - host->dev.fwnode = dev->fwnode; host->min_speed_hz = DIV_ROUND_UP(host->max_speed_hz, CLK_DIV_MAX); hisi_spi_hw_init(hs); diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index 168ccf51f6d4..902fb64815c9 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -587,7 +587,6 @@ static int img_spfi_probe(struct platform_device *pdev) host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL; if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode")) host->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD; - host->dev.of_node = pdev->dev.of_node; host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8); host->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4; host->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512; diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index b8b79bb7fec3..f65c0bf11a73 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -60,6 +60,7 @@ MODULE_PARM_DESC(polling_limit_us, #define MX51_ECSPI_CTRL_MAX_BURST 512 /* The maximum bytes that IMX53_ECSPI can transfer in target mode.*/ #define MX53_MAX_TRANSFER_BYTES 512 +#define BYTES_PER_32BITS_WORD 4 enum spi_imx_devtype { IMX1_CSPI, @@ -95,6 +96,16 @@ struct spi_imx_devtype_data { enum spi_imx_devtype devtype; }; +struct dma_data_package { + u32 cmd_word; + void *dma_rx_buf; + void *dma_tx_buf; + dma_addr_t dma_tx_addr; + dma_addr_t dma_rx_addr; + int dma_len; + int data_len; +}; + struct spi_imx_data { struct spi_controller *controller; struct device *dev; @@ -130,6 +141,9 @@ struct spi_imx_data { u32 wml; struct completion dma_rx_completion; struct completion dma_tx_completion; + size_t dma_package_num; + struct dma_data_package *dma_data; + int rx_offset; const struct spi_imx_devtype_data *devtype_data; }; @@ -189,6 +203,9 @@ MXC_SPI_BUF_TX(u16) MXC_SPI_BUF_RX(u32) MXC_SPI_BUF_TX(u32) +/* Align to cache line to avoid swiotlo bounce */ +#define DMA_CACHE_ALIGNED_LEN(x) ALIGN((x), dma_get_cache_alignment()) + /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set * (which is currently not the case in this driver) */ @@ -247,12 +264,26 @@ static bool spi_imx_can_dma(struct spi_controller *controller, struct spi_device if (!controller->dma_rx) return false; - if (spi_imx->target_mode) + /* + * Due to Freescale errata ERR003775 "eCSPI: Burst completion by Chip + * Select (SS) signal in Slave mode is not functional" burst size must + * be set exactly to the size of the transfer. This limit SPI transaction + * with maximum 2^12 bits. + */ + if (transfer->len > MX53_MAX_TRANSFER_BYTES && spi_imx->target_mode) return false; if (transfer->len < spi_imx->devtype_data->fifo_size) return false; + /* DMA only can transmit data in bytes */ + if (spi_imx->bits_per_word != 8 && spi_imx->bits_per_word != 16 && + spi_imx->bits_per_word != 32) + return false; + + if (transfer->len >= MAX_SDMA_BD_BYTES) + return false; + spi_imx->dynamic_burst = 0; return true; @@ -1282,50 +1313,6 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int spi_imx_dma_configure(struct spi_controller *controller) -{ - int ret; - enum dma_slave_buswidth buswidth; - struct dma_slave_config rx = {}, tx = {}; - struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); - - switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) { - case 4: - buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; - break; - case 2: - buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; - break; - case 1: - buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; - break; - default: - return -EINVAL; - } - - tx.direction = DMA_MEM_TO_DEV; - tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA; - tx.dst_addr_width = buswidth; - tx.dst_maxburst = spi_imx->wml; - ret = dmaengine_slave_config(controller->dma_tx, &tx); - if (ret) { - dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret); - return ret; - } - - rx.direction = DMA_DEV_TO_MEM; - rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA; - rx.src_addr_width = buswidth; - rx.src_maxburst = spi_imx->wml; - ret = dmaengine_slave_config(controller->dma_rx, &rx); - if (ret) { - dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret); - return ret; - } - - return 0; -} - static int spi_imx_setupxfer(struct spi_device *spi, struct spi_transfer *t) { @@ -1442,8 +1429,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, init_completion(&spi_imx->dma_rx_completion); init_completion(&spi_imx->dma_tx_completion); - controller->can_dma = spi_imx_can_dma; - controller->max_dma_len = MAX_SDMA_BD_BYTES; spi_imx->controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; @@ -1481,31 +1466,445 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size) return secs_to_jiffies(2 * timeout); } -static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, - struct spi_transfer *transfer) +static void spi_imx_dma_unmap(struct spi_imx_data *spi_imx, + struct dma_data_package *dma_data) { + struct device *tx_dev = spi_imx->controller->dma_tx->device->dev; + struct device *rx_dev = spi_imx->controller->dma_rx->device->dev; + + dma_unmap_single(tx_dev, dma_data->dma_tx_addr, + DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), + DMA_TO_DEVICE); + dma_unmap_single(rx_dev, dma_data->dma_rx_addr, + DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), + DMA_FROM_DEVICE); +} + +static void spi_imx_dma_rx_data_handle(struct spi_imx_data *spi_imx, + struct dma_data_package *dma_data, void *rx_buf, + bool word_delay) +{ + void *copy_ptr; + int unaligned; + + /* + * On little-endian CPUs, adjust byte order: + * - Swap bytes when bpw = 8 + * - Swap half-words when bpw = 16 + * This ensures correct data ordering for DMA transfers. + */ +#ifdef __LITTLE_ENDIAN + if (!word_delay) { + unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); + u32 *temp = dma_data->dma_rx_buf; + + for (int i = 0; i < DIV_ROUND_UP(dma_data->dma_len, sizeof(*temp)); i++) { + if (bytes_per_word == 1) + swab32s(temp + i); + else if (bytes_per_word == 2) + swahw32s(temp + i); + } + } +#endif + + /* + * When dynamic burst enabled, DMA RX always receives 32-bit words from RXFIFO with + * buswidth = 4, but when data_len is not 4-bytes alignment, the RM shows when + * burst length = 32*n + m bits, a SPI burst contains the m LSB in first word and all + * 32 bits in other n words. So if garbage bytes in the first word, trim first word then + * copy the actual data to rx_buf. + */ + if (dma_data->data_len % BYTES_PER_32BITS_WORD && !word_delay) { + unaligned = dma_data->data_len % BYTES_PER_32BITS_WORD; + copy_ptr = (u8 *)dma_data->dma_rx_buf + BYTES_PER_32BITS_WORD - unaligned; + } else { + copy_ptr = dma_data->dma_rx_buf; + } + + memcpy(rx_buf, copy_ptr, dma_data->data_len); +} + +static int spi_imx_dma_map(struct spi_imx_data *spi_imx, + struct dma_data_package *dma_data) +{ + struct spi_controller *controller = spi_imx->controller; + struct device *tx_dev = controller->dma_tx->device->dev; + struct device *rx_dev = controller->dma_rx->device->dev; + int ret; + + dma_data->dma_tx_addr = dma_map_single(tx_dev, dma_data->dma_tx_buf, + DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), + DMA_TO_DEVICE); + ret = dma_mapping_error(tx_dev, dma_data->dma_tx_addr); + if (ret < 0) { + dev_err(spi_imx->dev, "DMA TX map failed %d\n", ret); + return ret; + } + + dma_data->dma_rx_addr = dma_map_single(rx_dev, dma_data->dma_rx_buf, + DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), + DMA_FROM_DEVICE); + ret = dma_mapping_error(rx_dev, dma_data->dma_rx_addr); + if (ret < 0) { + dev_err(spi_imx->dev, "DMA RX map failed %d\n", ret); + dma_unmap_single(tx_dev, dma_data->dma_tx_addr, + DMA_CACHE_ALIGNED_LEN(dma_data->dma_len), + DMA_TO_DEVICE); + return ret; + } + + return 0; +} + +static int spi_imx_dma_tx_data_handle(struct spi_imx_data *spi_imx, + struct dma_data_package *dma_data, + const void *tx_buf, + bool word_delay) +{ + void *copy_ptr; + int unaligned; + + if (word_delay) { + dma_data->dma_len = dma_data->data_len; + } else { + /* + * As per the reference manual, when burst length = 32*n + m bits, ECSPI + * sends m LSB bits in the first word, followed by n full 32-bit words. + * Since actual data may not be 4-byte aligned, allocate DMA TX/RX buffers + * to ensure alignment. For TX, DMA pushes 4-byte aligned words to TXFIFO, + * while ECSPI uses BURST_LENGTH settings to maintain correct bit count. + * For RX, DMA always receives 32-bit words from RXFIFO, when data len is + * not 4-byte aligned, trim the first word to drop garbage bytes, then group + * all transfer DMA bounse buffer and copy all valid data to rx_buf. + */ + dma_data->dma_len = ALIGN(dma_data->data_len, BYTES_PER_32BITS_WORD); + } + + dma_data->dma_tx_buf = kzalloc(dma_data->dma_len, GFP_KERNEL); + if (!dma_data->dma_tx_buf) + return -ENOMEM; + + dma_data->dma_rx_buf = kzalloc(dma_data->dma_len, GFP_KERNEL); + if (!dma_data->dma_rx_buf) { + kfree(dma_data->dma_tx_buf); + return -ENOMEM; + } + + if (dma_data->data_len % BYTES_PER_32BITS_WORD && !word_delay) { + unaligned = dma_data->data_len % BYTES_PER_32BITS_WORD; + copy_ptr = (u8 *)dma_data->dma_tx_buf + BYTES_PER_32BITS_WORD - unaligned; + } else { + copy_ptr = dma_data->dma_tx_buf; + } + + memcpy(copy_ptr, tx_buf, dma_data->data_len); + + /* + * When word_delay is enabled, DMA transfers an entire word in one minor loop. + * In this case, no data requires additional handling. + */ + if (word_delay) + return 0; + +#ifdef __LITTLE_ENDIAN + /* + * On little-endian CPUs, adjust byte order: + * - Swap bytes when bpw = 8 + * - Swap half-words when bpw = 16 + * This ensures correct data ordering for DMA transfers. + */ + unsigned int bytes_per_word = spi_imx_bytes_per_word(spi_imx->bits_per_word); + u32 *temp = dma_data->dma_tx_buf; + + for (int i = 0; i < DIV_ROUND_UP(dma_data->dma_len, sizeof(*temp)); i++) { + if (bytes_per_word == 1) + swab32s(temp + i); + else if (bytes_per_word == 2) + swahw32s(temp + i); + } +#endif + + return 0; +} + +static int spi_imx_dma_data_prepare(struct spi_imx_data *spi_imx, + struct spi_transfer *transfer, + bool word_delay) +{ + u32 pre_bl, tail_bl; + u32 ctrl; + int ret; + + /* + * ECSPI supports a maximum burst of 512 bytes. When xfer->len exceeds 512 + * and is not a multiple of 512, a tail transfer is required. BURST_LEGTH + * is used for SPI HW to maintain correct bit count. BURST_LENGTH should + * update with data length. After DMA request submit, SPI can not update the + * BURST_LENGTH, in this case, we must split two package, update the register + * then setup second DMA transfer. + */ + ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); + if (word_delay) { + /* + * When SPI IMX need to support word delay, according to "Sample Period Control + * Register" shows, The Sample Period Control Register (ECSPI_PERIODREG) + * provides software a way to insert delays (wait states) between consecutive + * SPI transfers. As a result, ECSPI can only transfer one word per frame, and + * the delay occurs between frames. + */ + spi_imx->dma_package_num = 1; + pre_bl = spi_imx->bits_per_word - 1; + } else if (transfer->len <= MX51_ECSPI_CTRL_MAX_BURST) { + spi_imx->dma_package_num = 1; + pre_bl = transfer->len * BITS_PER_BYTE - 1; + } else if (!(transfer->len % MX51_ECSPI_CTRL_MAX_BURST)) { + spi_imx->dma_package_num = 1; + pre_bl = MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1; + } else { + spi_imx->dma_package_num = 2; + pre_bl = MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1; + tail_bl = (transfer->len % MX51_ECSPI_CTRL_MAX_BURST) * BITS_PER_BYTE - 1; + } + + spi_imx->dma_data = kmalloc_array(spi_imx->dma_package_num, + sizeof(struct dma_data_package), + GFP_KERNEL | __GFP_ZERO); + if (!spi_imx->dma_data) { + dev_err(spi_imx->dev, "Failed to allocate DMA package buffer!\n"); + return -ENOMEM; + } + + if (spi_imx->dma_package_num == 1) { + ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; + ctrl |= pre_bl << MX51_ECSPI_CTRL_BL_OFFSET; + spi_imx->dma_data[0].cmd_word = ctrl; + spi_imx->dma_data[0].data_len = transfer->len; + ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[0], transfer->tx_buf, + word_delay); + if (ret) { + kfree(spi_imx->dma_data); + return ret; + } + } else { + ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; + ctrl |= pre_bl << MX51_ECSPI_CTRL_BL_OFFSET; + spi_imx->dma_data[0].cmd_word = ctrl; + spi_imx->dma_data[0].data_len = round_down(transfer->len, + MX51_ECSPI_CTRL_MAX_BURST); + ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[0], transfer->tx_buf, + false); + if (ret) { + kfree(spi_imx->dma_data); + return ret; + } + + ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; + ctrl |= tail_bl << MX51_ECSPI_CTRL_BL_OFFSET; + spi_imx->dma_data[1].cmd_word = ctrl; + spi_imx->dma_data[1].data_len = transfer->len % MX51_ECSPI_CTRL_MAX_BURST; + ret = spi_imx_dma_tx_data_handle(spi_imx, &spi_imx->dma_data[1], + transfer->tx_buf + spi_imx->dma_data[0].data_len, + false); + if (ret) { + kfree(spi_imx->dma_data[0].dma_tx_buf); + kfree(spi_imx->dma_data[0].dma_rx_buf); + kfree(spi_imx->dma_data); + } + } + + return 0; +} + +static int spi_imx_dma_submit(struct spi_imx_data *spi_imx, + struct dma_data_package *dma_data, + struct spi_transfer *transfer) +{ + struct spi_controller *controller = spi_imx->controller; struct dma_async_tx_descriptor *desc_tx, *desc_rx; unsigned long transfer_timeout; unsigned long time_left; - struct spi_controller *controller = spi_imx->controller; - struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; - struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents); - unsigned int bytes_per_word, i; - int ret; + dma_cookie_t cookie; + + /* + * The TX DMA setup starts the transfer, so make sure RX is configured + * before TX. + */ + desc_rx = dmaengine_prep_slave_single(controller->dma_rx, dma_data->dma_rx_addr, + dma_data->dma_len, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_rx) { + transfer->error |= SPI_TRANS_FAIL_NO_START; + return -EINVAL; + } + + desc_rx->callback = spi_imx_dma_rx_callback; + desc_rx->callback_param = (void *)spi_imx; + cookie = dmaengine_submit(desc_rx); + if (dma_submit_error(cookie)) { + dev_err(spi_imx->dev, "submitting DMA RX failed\n"); + transfer->error |= SPI_TRANS_FAIL_NO_START; + goto dmaengine_terminate_rx; + } + + reinit_completion(&spi_imx->dma_rx_completion); + dma_async_issue_pending(controller->dma_rx); + + desc_tx = dmaengine_prep_slave_single(controller->dma_tx, dma_data->dma_tx_addr, + dma_data->dma_len, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc_tx) + goto dmaengine_terminate_rx; + + desc_tx->callback = spi_imx_dma_tx_callback; + desc_tx->callback_param = (void *)spi_imx; + cookie = dmaengine_submit(desc_tx); + if (dma_submit_error(cookie)) { + dev_err(spi_imx->dev, "submitting DMA TX failed\n"); + goto dmaengine_terminate_tx; + } + reinit_completion(&spi_imx->dma_tx_completion); + dma_async_issue_pending(controller->dma_tx); + + spi_imx->devtype_data->trigger(spi_imx); + + transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); + + if (!spi_imx->target_mode) { + /* Wait SDMA to finish the data transfer.*/ + time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion, + transfer_timeout); + if (!time_left) { + dev_err(spi_imx->dev, "I/O Error in DMA TX\n"); + dmaengine_terminate_all(controller->dma_tx); + dmaengine_terminate_all(controller->dma_rx); + return -ETIMEDOUT; + } + + time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion, + transfer_timeout); + if (!time_left) { + dev_err(&controller->dev, "I/O Error in DMA RX\n"); + spi_imx->devtype_data->reset(spi_imx); + dmaengine_terminate_all(controller->dma_rx); + return -ETIMEDOUT; + } + } else { + spi_imx->target_aborted = false; + + if (wait_for_completion_interruptible(&spi_imx->dma_tx_completion) || + READ_ONCE(spi_imx->target_aborted)) { + dev_dbg(spi_imx->dev, "I/O Error in DMA TX interrupted\n"); + dmaengine_terminate_all(controller->dma_tx); + dmaengine_terminate_all(controller->dma_rx); + return -EINTR; + } + + if (wait_for_completion_interruptible(&spi_imx->dma_rx_completion) || + READ_ONCE(spi_imx->target_aborted)) { + dev_dbg(spi_imx->dev, "I/O Error in DMA RX interrupted\n"); + dmaengine_terminate_all(controller->dma_rx); + return -EINTR; + } + + /* + * ECSPI has a HW issue when works in Target mode, after 64 words + * writtern to TXFIFO, even TXFIFO becomes empty, ECSPI_TXDATA keeps + * shift out the last word data, so we have to disable ECSPI when in + * target mode after the transfer completes. + */ + if (spi_imx->devtype_data->disable) + spi_imx->devtype_data->disable(spi_imx); + } + + return 0; + +dmaengine_terminate_tx: + dmaengine_terminate_all(controller->dma_tx); +dmaengine_terminate_rx: + dmaengine_terminate_all(controller->dma_rx); + + return -EINVAL; +} + +static void spi_imx_dma_max_wml_find(struct spi_imx_data *spi_imx, + struct dma_data_package *dma_data, + bool word_delay) +{ + unsigned int bytes_per_word = word_delay ? + spi_imx_bytes_per_word(spi_imx->bits_per_word) : + BYTES_PER_32BITS_WORD; + unsigned int i; - /* Get the right burst length from the last sg to ensure no tail data */ - bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word); for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) { - if (!(sg_dma_len(last_sg) % (i * bytes_per_word))) + if (!dma_data->dma_len % (i * bytes_per_word)) break; } /* Use 1 as wml in case no available burst length got */ if (i == 0) i = 1; - spi_imx->wml = i; + spi_imx->wml = i; +} - ret = spi_imx_dma_configure(controller); +static int spi_imx_dma_configure(struct spi_controller *controller, bool word_delay) +{ + int ret; + enum dma_slave_buswidth buswidth; + struct dma_slave_config rx = {}, tx = {}; + struct spi_imx_data *spi_imx = spi_controller_get_devdata(controller); + + if (word_delay) { + switch (spi_imx_bytes_per_word(spi_imx->bits_per_word)) { + case 4: + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + case 2: + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case 1: + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + default: + return -EINVAL; + } + } else { + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + } + + tx.direction = DMA_MEM_TO_DEV; + tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA; + tx.dst_addr_width = buswidth; + tx.dst_maxburst = spi_imx->wml; + ret = dmaengine_slave_config(controller->dma_tx, &tx); + if (ret) { + dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret); + return ret; + } + + rx.direction = DMA_DEV_TO_MEM; + rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA; + rx.src_addr_width = buswidth; + rx.src_maxburst = spi_imx->wml; + ret = dmaengine_slave_config(controller->dma_rx, &rx); + if (ret) { + dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret); + return ret; + } + + return 0; +} + +static int spi_imx_dma_package_transfer(struct spi_imx_data *spi_imx, + struct dma_data_package *dma_data, + struct spi_transfer *transfer, + bool word_delay) +{ + struct spi_controller *controller = spi_imx->controller; + int ret; + + spi_imx_dma_max_wml_find(spi_imx, dma_data, word_delay); + + ret = spi_imx_dma_configure(controller, word_delay); if (ret) goto dma_failure_no_start; @@ -1516,61 +1915,16 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, } spi_imx->devtype_data->setup_wml(spi_imx); - /* - * The TX DMA setup starts the transfer, so make sure RX is configured - * before TX. - */ - desc_rx = dmaengine_prep_slave_sg(controller->dma_rx, - rx->sgl, rx->nents, DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc_rx) { - ret = -EINVAL; - goto dma_failure_no_start; - } + ret = spi_imx_dma_submit(spi_imx, dma_data, transfer); + if (ret) + return ret; - desc_rx->callback = spi_imx_dma_rx_callback; - desc_rx->callback_param = (void *)spi_imx; - dmaengine_submit(desc_rx); - reinit_completion(&spi_imx->dma_rx_completion); - dma_async_issue_pending(controller->dma_rx); - - desc_tx = dmaengine_prep_slave_sg(controller->dma_tx, - tx->sgl, tx->nents, DMA_MEM_TO_DEV, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); - if (!desc_tx) { - dmaengine_terminate_all(controller->dma_tx); - dmaengine_terminate_all(controller->dma_rx); - return -EINVAL; - } - - desc_tx->callback = spi_imx_dma_tx_callback; - desc_tx->callback_param = (void *)spi_imx; - dmaengine_submit(desc_tx); - reinit_completion(&spi_imx->dma_tx_completion); - dma_async_issue_pending(controller->dma_tx); - - spi_imx->devtype_data->trigger(spi_imx); - - transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len); - - /* Wait SDMA to finish the data transfer.*/ - time_left = wait_for_completion_timeout(&spi_imx->dma_tx_completion, - transfer_timeout); - if (!time_left) { - dev_err(spi_imx->dev, "I/O Error in DMA TX\n"); - dmaengine_terminate_all(controller->dma_tx); - dmaengine_terminate_all(controller->dma_rx); - return -ETIMEDOUT; - } - - time_left = wait_for_completion_timeout(&spi_imx->dma_rx_completion, - transfer_timeout); - if (!time_left) { - dev_err(&controller->dev, "I/O Error in DMA RX\n"); - spi_imx->devtype_data->reset(spi_imx); - dmaengine_terminate_all(controller->dma_rx); - return -ETIMEDOUT; - } + /* Trim the DMA RX buffer and copy the actual data to rx_buf */ + dma_sync_single_for_cpu(controller->dma_rx->device->dev, dma_data->dma_rx_addr, + dma_data->dma_len, DMA_FROM_DEVICE); + spi_imx_dma_rx_data_handle(spi_imx, dma_data, transfer->rx_buf + spi_imx->rx_offset, + word_delay); + spi_imx->rx_offset += dma_data->data_len; return 0; /* fallback to pio */ @@ -1579,6 +1933,57 @@ dma_failure_no_start: return ret; } +static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, + struct spi_transfer *transfer) +{ + bool word_delay = transfer->word_delay.value != 0 && !spi_imx->target_mode; + int ret; + int i; + + ret = spi_imx_dma_data_prepare(spi_imx, transfer, word_delay); + if (ret < 0) { + transfer->error |= SPI_TRANS_FAIL_NO_START; + dev_err(spi_imx->dev, "DMA data prepare fail\n"); + goto fallback_pio; + } + + spi_imx->rx_offset = 0; + + /* Each dma_package performs a separate DMA transfer once */ + for (i = 0; i < spi_imx->dma_package_num; i++) { + ret = spi_imx_dma_map(spi_imx, &spi_imx->dma_data[i]); + if (ret < 0) { + if (i == 0) + transfer->error |= SPI_TRANS_FAIL_NO_START; + dev_err(spi_imx->dev, "DMA map fail\n"); + break; + } + + /* Update the CTRL register BL field */ + writel(spi_imx->dma_data[i].cmd_word, spi_imx->base + MX51_ECSPI_CTRL); + + ret = spi_imx_dma_package_transfer(spi_imx, &spi_imx->dma_data[i], + transfer, word_delay); + + /* Whether the dma transmission is successful or not, dma unmap is necessary */ + spi_imx_dma_unmap(spi_imx, &spi_imx->dma_data[i]); + + if (ret < 0) { + dev_dbg(spi_imx->dev, "DMA %d transfer not really finish\n", i); + break; + } + } + + for (int j = 0; j < spi_imx->dma_package_num; j++) { + kfree(spi_imx->dma_data[j].dma_tx_buf); + kfree(spi_imx->dma_data[j].dma_rx_buf); + } + kfree(spi_imx->dma_data); + +fallback_pio: + return ret; +} + static int spi_imx_pio_transfer(struct spi_device *spi, struct spi_transfer *transfer) { @@ -1737,7 +2142,7 @@ static int spi_imx_transfer_one(struct spi_controller *controller, while (spi_imx->devtype_data->rx_available(spi_imx)) readl(spi_imx->base + MXC_CSPIRXDATA); - if (spi_imx->target_mode) + if (spi_imx->target_mode && !spi_imx->usedma) return spi_imx_pio_transfer_target(spi, transfer); /* @@ -1745,9 +2150,17 @@ static int spi_imx_transfer_one(struct spi_controller *controller, * transfer, the SPI transfer has already been mapped, so we * have to do the DMA transfer here. */ - if (spi_imx->usedma) - return spi_imx_dma_transfer(spi_imx, transfer); - + if (spi_imx->usedma) { + ret = spi_imx_dma_transfer(spi_imx, transfer); + if (transfer->error & SPI_TRANS_FAIL_NO_START) { + spi_imx->usedma = false; + if (spi_imx->target_mode) + return spi_imx_pio_transfer_target(spi, transfer); + else + return spi_imx_pio_transfer(spi, transfer); + } + return ret; + } /* run in polling mode for short transfers */ if (transfer->len == 1 || (polling_limit_us && spi_imx_transfer_estimate_time_us(transfer) < polling_limit_us)) @@ -1955,7 +2368,6 @@ static int spi_imx_probe(struct platform_device *pdev) spi_imx->devtype_data->intctrl(spi_imx, 0); - controller->dev.of_node = pdev->dev.of_node; ret = spi_register_controller(controller); if (ret) { dev_err_probe(&pdev->dev, ret, "register controller failed\n"); diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c index 318b0768701e..adcf85bccbcc 100644 --- a/drivers/spi/spi-ingenic.c +++ b/drivers/spi/spi-ingenic.c @@ -442,7 +442,6 @@ static int spi_ingenic_probe(struct platform_device *pdev) ctlr->use_gpio_descriptors = true; ctlr->max_native_cs = pdata->max_native_cs; ctlr->num_chipselect = num_cs; - ctlr->dev.of_node = pdev->dev.of_node; if (spi_ingenic_request_dma(ctlr, dev)) dev_warn(dev, "DMA not available.\n"); diff --git a/drivers/spi/spi-lantiq-ssc.c b/drivers/spi/spi-lantiq-ssc.c index 60849e07f674..f83cb63c9d0c 100644 --- a/drivers/spi/spi-lantiq-ssc.c +++ b/drivers/spi/spi-lantiq-ssc.c @@ -962,7 +962,6 @@ static int lantiq_ssc_probe(struct platform_device *pdev) spi->bits_per_word = 8; spi->speed_hz = 0; - host->dev.of_node = pdev->dev.of_node; host->num_chipselect = num_cs; host->use_gpio_descriptors = true; host->setup = lantiq_ssc_setup; diff --git a/drivers/spi/spi-ljca.c b/drivers/spi/spi-ljca.c index 3f412cf8f1cd..0c6e6248d8ba 100644 --- a/drivers/spi/spi-ljca.c +++ b/drivers/spi/spi-ljca.c @@ -238,7 +238,6 @@ static int ljca_spi_probe(struct auxiliary_device *auxdev, controller->auto_runtime_pm = false; controller->max_speed_hz = LJCA_SPI_BUS_MAX_HZ; - device_set_node(&ljca_spi->controller->dev, dev_fwnode(&auxdev->dev)); auxiliary_set_drvdata(auxdev, controller); ret = spi_register_controller(controller); diff --git a/drivers/spi/spi-loongson-core.c b/drivers/spi/spi-loongson-core.c index b46f072a0387..f50423c3db4c 100644 --- a/drivers/spi/spi-loongson-core.c +++ b/drivers/spi/spi-loongson-core.c @@ -210,7 +210,6 @@ int loongson_spi_init_controller(struct device *dev, void __iomem *regs) controller->unprepare_message = loongson_spi_unprepare_message; controller->set_cs = loongson_spi_set_cs; controller->num_chipselect = 4; - device_set_node(&controller->dev, dev_fwnode(dev)); dev_set_drvdata(dev, controller); spi = spi_controller_get_devdata(controller); diff --git a/drivers/spi/spi-lp8841-rtc.c b/drivers/spi/spi-lp8841-rtc.c index 382e2a69f7a7..e466866d5e80 100644 --- a/drivers/spi/spi-lp8841-rtc.c +++ b/drivers/spi/spi-lp8841-rtc.c @@ -200,7 +200,6 @@ spi_lp8841_rtc_probe(struct platform_device *pdev) host->transfer_one = spi_lp8841_rtc_transfer_one; host->bits_per_word_mask = SPI_BPW_MASK(8); #ifdef CONFIG_OF - host->dev.of_node = pdev->dev.of_node; #endif data = spi_controller_get_devdata(host); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index c8b2add2640e..965673bac98b 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -178,8 +178,19 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, if (op->data.swap16 && !spi_mem_controller_is_capable(ctlr, swap16)) return false; - if (op->cmd.nbytes != 2) - return false; + /* Extra 8D-8D-8D limitations */ + if (op->cmd.dtr && op->cmd.buswidth == 8) { + if (op->cmd.nbytes != 2) + return false; + + if ((op->addr.nbytes % 2) || + (op->dummy.nbytes % 2) || + (op->data.nbytes % 2)) { + dev_err(&ctlr->dev, + "Even byte numbers not allowed in octal DTR operations\n"); + return false; + } + } } else { if (op->cmd.nbytes != 1) return false; @@ -708,9 +719,18 @@ spi_mem_dirmap_create(struct spi_mem *mem, desc->mem = mem; desc->info = *info; - if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) + if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) { + ret = spi_mem_access_start(mem); + if (ret) { + kfree(desc); + return ERR_PTR(ret); + } + ret = ctlr->mem_ops->dirmap_create(desc); + spi_mem_access_end(mem); + } + if (ret) { desc->nodirmap = true; if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index 6b9137307533..a7001b9e36e6 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -1054,7 +1054,6 @@ static int meson_spicc_probe(struct platform_device *pdev) device_reset_optional(&pdev->dev); host->num_chipselect = 4; - host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LOOP; host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX); host->min_speed_hz = spicc->data->min_speed_hz; diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c index ef7efeaeee97..b818950a8cb7 100644 --- a/drivers/spi/spi-meson-spifc.c +++ b/drivers/spi/spi-meson-spifc.c @@ -322,7 +322,6 @@ static int meson_spifc_probe(struct platform_device *pdev) rate = clk_get_rate(spifc->clk); host->num_chipselect = 1; - host->dev.of_node = pdev->dev.of_node; host->bits_per_word_mask = SPI_BPW_MASK(8); host->auto_runtime_pm = true; host->transfer_one = meson_spifc_transfer_one; diff --git a/drivers/spi/spi-microchip-core-spi.c b/drivers/spi/spi-microchip-core-spi.c index 89e40fc45d73..a4c128ae391b 100644 --- a/drivers/spi/spi-microchip-core-spi.c +++ b/drivers/spi/spi-microchip-core-spi.c @@ -161,7 +161,7 @@ static int mchp_corespi_setup(struct spi_device *spi) return -EOPNOTSUPP; } - if (spi->mode & SPI_MODE_X_MASK & ~spi->controller->mode_bits) { + if ((spi->mode ^ spi->controller->mode_bits) & SPI_MODE_X_MASK) { dev_err(&spi->dev, "incompatible CPOL/CPHA, must match controller's Motorola mode\n"); return -EINVAL; } @@ -360,7 +360,6 @@ static int mchp_corespi_probe(struct platform_device *pdev) host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); host->transfer_one = mchp_corespi_transfer_one; host->set_cs = mchp_corespi_set_cs; - host->dev.of_node = dev->of_node; ret = of_property_read_u32(dev->of_node, "fifo-depth", &spi->fifo_depth); if (ret) diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 3c1638ba5bee..a1aeb5403a74 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c @@ -480,8 +480,6 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *pdev) host->use_gpio_descriptors = true; host->cleanup = mpc512x_psc_spi_cleanup; - device_set_node(&host->dev, dev_fwnode(dev)); - tempp = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(tempp)) return dev_err_probe(dev, PTR_ERR(tempp), "could not ioremap I/O port range\n"); diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c index 3bbeb8d5bfb8..73d2383461ca 100644 --- a/drivers/spi/spi-mpc52xx-psc.c +++ b/drivers/spi/spi-mpc52xx-psc.c @@ -319,8 +319,6 @@ static int mpc52xx_psc_spi_of_probe(struct platform_device *pdev) host->transfer_one_message = mpc52xx_psc_spi_transfer_one_message; host->cleanup = mpc52xx_psc_spi_cleanup; - device_set_node(&host->dev, dev_fwnode(dev)); - mps->psc = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(mps->psc)) return dev_err_probe(dev, PTR_ERR(mps->psc), "could not ioremap I/O port range\n"); diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c index 6d4dde15ac54..14188a6ba5a1 100644 --- a/drivers/spi/spi-mpc52xx.c +++ b/drivers/spi/spi-mpc52xx.c @@ -430,7 +430,6 @@ static int mpc52xx_spi_probe(struct platform_device *op) host->transfer = mpc52xx_spi_transfer; host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; host->bits_per_word_mask = SPI_BPW_MASK(8); - host->dev.of_node = op->dev.of_node; platform_set_drvdata(op, host); diff --git a/drivers/spi/spi-mpfs.c b/drivers/spi/spi-mpfs.c index 7e9e64d8e6c8..64d15a6188ac 100644 --- a/drivers/spi/spi-mpfs.c +++ b/drivers/spi/spi-mpfs.c @@ -550,7 +550,6 @@ static int mpfs_spi_probe(struct platform_device *pdev) host->transfer_one = mpfs_spi_transfer_one; host->prepare_message = mpfs_spi_prepare_message; host->set_cs = mpfs_spi_set_cs; - host->dev.of_node = pdev->dev.of_node; spi = spi_controller_get_devdata(host); diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 90e5813cfdc3..0368a26bca9a 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -1184,7 +1184,6 @@ static int mtk_spi_probe(struct platform_device *pdev) return -ENOMEM; host->auto_runtime_pm = true; - host->dev.of_node = dev->of_node; host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; host->set_cs = mtk_spi_set_cs; diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c index 3770b8e096a4..bbedfad2ccae 100644 --- a/drivers/spi/spi-mt7621.c +++ b/drivers/spi/spi-mt7621.c @@ -348,7 +348,6 @@ static int mt7621_spi_probe(struct platform_device *pdev) host->set_cs = mt7621_spi_set_native_cs; host->transfer_one = mt7621_spi_transfer_one; host->bits_per_word_mask = SPI_BPW_MASK(8); - host->dev.of_node = pdev->dev.of_node; host->max_native_cs = MT7621_NATIVE_CS_COUNT; host->num_chipselect = MT7621_NATIVE_CS_COUNT; host->use_gpio_descriptors = true; diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c index 5cc4632e13d7..1e5ec0840174 100644 --- a/drivers/spi/spi-mtk-nor.c +++ b/drivers/spi/spi-mtk-nor.c @@ -851,7 +851,6 @@ static int mtk_nor_probe(struct platform_device *pdev) } ctlr->bits_per_word_mask = SPI_BPW_MASK(8); - ctlr->dev.of_node = pdev->dev.of_node; ctlr->max_message_size = mtk_max_msg_size; ctlr->mem_ops = &mtk_nor_mem_ops; ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c index ae38c244e258..7f7d0dfec743 100644 --- a/drivers/spi/spi-mtk-snfi.c +++ b/drivers/spi/spi-mtk-snfi.c @@ -1448,7 +1448,6 @@ static int mtk_snand_probe(struct platform_device *pdev) ctlr->mem_caps = &mtk_snand_mem_caps; ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; - ctlr->dev.of_node = pdev->dev.of_node; ret = spi_register_controller(ctlr); if (ret) { dev_err(&pdev->dev, "spi_register_controller failed.\n"); diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index 0eb35c4e3987..bd122de152c0 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -161,7 +161,6 @@ static int spi_mux_probe(struct spi_device *spi) ctlr->setup = spi_mux_setup; ctlr->num_chipselect = mux_control_states(priv->mux); ctlr->bus_num = -1; - ctlr->dev.of_node = spi->dev.of_node; ctlr->must_async = true; ctlr->defer_optimize_message = true; diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index eeaea6a5e310..f9369c69911c 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -768,7 +768,6 @@ static int mxic_spi_probe(struct platform_device *pdev) mxic = spi_controller_get_devdata(host); mxic->dev = &pdev->dev; - host->dev.of_node = pdev->dev.of_node; mxic->ps_clk = devm_clk_get(&pdev->dev, "ps_clk"); if (IS_ERR(mxic->ps_clk)) diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index cccd17f24775..3961b0ccdb4b 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -746,7 +746,6 @@ static int npcm_fiu_probe(struct platform_device *pdev) ctrl->bus_num = -1; ctrl->mem_ops = &npcm_fiu_mem_ops; ctrl->num_chipselect = fiu->info->max_cs; - ctrl->dev.of_node = dev->of_node; return devm_spi_register_controller(dev, ctrl); } diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c index 98b6479b961c..e60b3cc398ec 100644 --- a/drivers/spi/spi-npcm-pspi.c +++ b/drivers/spi/spi-npcm-pspi.c @@ -401,7 +401,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) host->max_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MIN_CLK_DIVIDER); host->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER); host->mode_bits = SPI_CPHA | SPI_CPOL; - host->dev.of_node = pdev->dev.of_node; host->bus_num = -1; host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); host->transfer_one = npcm_pspi_transfer_one; diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 50a7e4916a60..320b3d93df57 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -1383,8 +1383,6 @@ static int nxp_fspi_probe(struct platform_device *pdev) else ctlr->mem_caps = &nxp_fspi_mem_caps; - device_set_node(&ctlr->dev, fwnode); - ret = devm_add_action_or_reset(dev, nxp_fspi_cleanup, f); if (ret) return ret; diff --git a/drivers/spi/spi-nxp-xspi.c b/drivers/spi/spi-nxp-xspi.c new file mode 100644 index 000000000000..06fcdf22990b --- /dev/null +++ b/drivers/spi/spi-nxp-xspi.c @@ -0,0 +1,1384 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * NXP xSPI controller driver. + * + * Copyright 2025 NXP + * + * xSPI is a flexible SPI host controller which supports single + * external devices. This device can have up to eight bidirectional + * data lines, this means xSPI support Single/Dual/Quad/Octal mode + * data transfer (1/2/4/8 bidirectional data lines). + * + * xSPI controller is driven by the LUT(Look-up Table) registers + * LUT registers are a look-up-table for sequences of instructions. + * A valid sequence consists of five LUT registers. + * Maximum 16 LUT sequences can be programmed simultaneously. + * + * LUTs are being created at run-time based on the commands passed + * from the spi-mem framework, thus using single LUT index. + * + * Software triggered Flash read/write access by IP Bus. + * + * Memory mapped read access by AHB Bus. + * + * Based on SPI MEM interface and spi-nxp-fspi.c driver. + * + * Author: + * Haibo Chen + * Co-author: + * Han Xu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Runtime pm timeout */ +#define XSPI_RPM_TIMEOUT_MS 50 /* 50ms */ +/* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry (15). + */ +#define XSPI_SEQID_LUT 15 + +#define XSPI_MCR 0x0 +#define XSPI_MCR_CKN_FA_EN BIT(26) +#define XSPI_MCR_DQS_FA_SEL_MASK GENMASK(25, 24) +#define XSPI_MCR_ISD3FA BIT(17) +#define XSPI_MCR_ISD2FA BIT(16) +#define XSPI_MCR_DOZE BIT(15) +#define XSPI_MCR_MDIS BIT(14) +#define XSPI_MCR_DLPEN BIT(12) +#define XSPI_MCR_CLR_TXF BIT(11) +#define XSPI_MCR_CLR_RXF BIT(10) +#define XSPI_MCR_IPS_TG_RST BIT(9) +#define XSPI_MCR_VAR_LAT_EN BIT(8) +#define XSPI_MCR_DDR_EN BIT(7) +#define XSPI_MCR_DQS_EN BIT(6) +#define XSPI_MCR_DQS_LAT_EN BIT(5) +#define XSPI_MCR_DQS_OUT_EN BIT(4) +#define XSPI_MCR_SWRSTHD BIT(1) +#define XSPI_MCR_SWRSTSD BIT(0) + +#define XSPI_IPCR 0x8 + +#define XSPI_FLSHCR 0xC +#define XSPI_FLSHCR_TDH_MASK GENMASK(17, 16) +#define XSPI_FLSHCR_TCSH_MASK GENMASK(11, 8) +#define XSPI_FLSHCR_TCSS_MASK GENMASK(3, 0) + +#define XSPI_BUF0CR 0x10 +#define XSPI_BUF1CR 0x14 +#define XSPI_BUF2CR 0x18 +#define XSPI_BUF3CR 0x1C +#define XSPI_BUF3CR_ALLMST BIT(31) +#define XSPI_BUF3CR_ADATSZ_MASK GENMASK(17, 8) +#define XSPI_BUF3CR_MSTRID_MASK GENMASK(3, 0) + +#define XSPI_BFGENCR 0x20 +#define XSPI_BFGENCR_SEQID_WR_MASK GENMASK(31, 28) +#define XSPI_BFGENCR_ALIGN_MASK GENMASK(24, 22) +#define XSPI_BFGENCR_PPWF_CLR BIT(20) +#define XSPI_BFGENCR_WR_FLUSH_EN BIT(21) +#define XSPI_BFGENCR_SEQID_WR_EN BIT(17) +#define XSPI_BFGENCR_SEQID_MASK GENMASK(15, 12) + +#define XSPI_BUF0IND 0x30 +#define XSPI_BUF1IND 0x34 +#define XSPI_BUF2IND 0x38 + +#define XSPI_DLLCRA 0x60 +#define XSPI_DLLCRA_DLLEN BIT(31) +#define XSPI_DLLCRA_FREQEN BIT(30) +#define XSPI_DLLCRA_DLL_REFCNTR_MASK GENMASK(27, 24) +#define XSPI_DLLCRA_DLLRES_MASK GENMASK(23, 20) +#define XSPI_DLLCRA_SLV_FINE_MASK GENMASK(19, 16) +#define XSPI_DLLCRA_SLV_DLY_MASK GENMASK(14, 12) +#define XSPI_DLLCRA_SLV_DLY_COARSE_MASK GENMASK(11, 8) +#define XSPI_DLLCRA_SLV_DLY_FINE_MASK GENMASK(7, 5) +#define XSPI_DLLCRA_DLL_CDL8 BIT(4) +#define XSPI_DLLCRA_SLAVE_AUTO_UPDT BIT(3) +#define XSPI_DLLCRA_SLV_EN BIT(2) +#define XSPI_DLLCRA_SLV_DLL_BYPASS BIT(1) +#define XSPI_DLLCRA_SLV_UPD BIT(0) + +#define XSPI_SFAR 0x100 + +#define XSPI_SFACR 0x104 +#define XSPI_SFACR_FORCE_A10 BIT(22) +#define XSPI_SFACR_WA_4B_EN BIT(21) +#define XSPI_SFACR_CAS_INTRLVD BIT(20) +#define XSPI_SFACR_RX_BP_EN BIT(18) +#define XSPI_SFACR_BYTE_SWAP BIT(17) +#define XSPI_SFACR_WA BIT(16) +#define XSPI_SFACR_CAS_MASK GENMASK(3, 0) + +#define XSPI_SMPR 0x108 +#define XSPI_SMPR_DLLFSMPFA_MASK GENMASK(26, 24) +#define XSPI_SMPR_FSDLY BIT(6) +#define XSPI_SMPR_FSPHS BIT(5) + +#define XSPI_RBSR 0x10C + +#define XSPI_RBCT 0x110 +#define XSPI_RBCT_WMRK_MASK GENMASK(6, 0) + +#define XSPI_DLLSR 0x12C +#define XSPI_DLLSR_DLLA_LOCK BIT(15) +#define XSPI_DLLSR_SLVA_LOCK BIT(14) +#define XSPI_DLLSR_DLLA_RANGE_ERR BIT(13) +#define XSPI_DLLSR_DLLA_FINE_UNDERFLOW BIT(12) + +#define XSPI_TBSR 0x150 + +#define XSPI_TBDR 0x154 + +#define XSPI_TBCT 0x158 +#define XSPI_TBCT_WMRK_MASK GENMASK(7, 0) + +#define XSPI_SR 0x15C +#define XSPI_SR_TXFULL BIT(27) +#define XSPI_SR_TXDMA BIT(26) +#define XSPI_SR_TXWA BIT(25) +#define XSPI_SR_TXNE BIT(24) +#define XSPI_SR_RXDMA BIT(23) +#define XSPI_SR_ARB_STATE_MASK GENMASK(23, 20) +#define XSPI_SR_RXFULL BIT(19) +#define XSPI_SR_RXWE BIT(16) +#define XSPI_SR_ARB_LCK BIT(15) +#define XSPI_SR_AHBnFUL BIT(11) +#define XSPI_SR_AHBnNE BIT(7) +#define XSPI_SR_AHBTRN BIT(6) +#define XSPI_SR_AWRACC BIT(4) +#define XSPI_SR_AHB_ACC BIT(2) +#define XSPI_SR_IP_ACC BIT(1) +#define XSPI_SR_BUSY BIT(0) + +#define XSPI_FR 0x160 +#define XSPI_FR_DLPFF BIT(31) +#define XSPI_FR_DLLABRT BIT(28) +#define XSPI_FR_TBFF BIT(27) +#define XSPI_FR_TBUF BIT(26) +#define XSPI_FR_DLLUNLCK BIT(24) +#define XSPI_FR_ILLINE BIT(23) +#define XSPI_FR_RBOF BIT(17) +#define XSPI_FR_RBDF BIT(16) +#define XSPI_FR_AAEF BIT(15) +#define XSPI_FR_AITEF BIT(14) +#define XSPI_FR_AIBSEF BIT(13) +#define XSPI_FR_ABOF BIT(12) +#define XSPI_FR_CRCAEF BIT(10) +#define XSPI_FR_PPWF BIT(8) +#define XSPI_FR_IPIEF BIT(6) +#define XSPI_FR_IPEDERR BIT(5) +#define XSPI_FR_PERFOVF BIT(2) +#define XSPI_FR_RDADDR BIT(1) +#define XSPI_FR_TFF BIT(0) + +#define XSPI_RSER 0x164 +#define XSPI_RSER_TFIE BIT(0) + +#define XSPI_SFA1AD 0x180 + +#define XSPI_SFA2AD 0x184 + +#define XSPI_RBDR0 0x200 + +#define XSPI_LUTKEY 0x300 +#define XSPI_LUT_KEY_VAL (0x5AF05AF0UL) + +#define XSPI_LCKCR 0x304 +#define XSPI_LOKCR_LOCK BIT(0) +#define XSPI_LOKCR_UNLOCK BIT(1) + +#define XSPI_LUT 0x310 +#define XSPI_LUT_OFFSET (XSPI_SEQID_LUT * 5 * 4) +#define XSPI_LUT_REG(idx) \ + (XSPI_LUT + XSPI_LUT_OFFSET + (idx) * 4) + +#define XSPI_MCREXT 0x4FC +#define XSPI_MCREXT_RST_MASK GENMASK(3, 0) + + +#define XSPI_FRAD0_WORD2 0x808 +#define XSPI_FRAD0_WORD2_MD0ACP_MASK GENMASK(2, 0) + +#define XSPI_FRAD0_WORD3 0x80C +#define XSPI_FRAD0_WORD3_VLD BIT(31) + +#define XSPI_TG0MDAD 0x900 +#define XSPI_TG0MDAD_VLD BIT(31) + +#define XSPI_TG1MDAD 0x910 + +#define XSPI_MGC 0x920 +#define XSPI_MGC_GVLD BIT(31) +#define XSPI_MGC_GVLDMDAD BIT(29) +#define XSPI_MGC_GVLDFRAD BIT(27) + +#define XSPI_MTO 0x928 + +#define XSPI_ERRSTAT 0x938 +#define XSPI_INT_EN 0x93C + +#define XSPI_SFP_TG_IPCR 0x958 +#define XSPI_SFP_TG_IPCR_SEQID_MASK GENMASK(27, 24) +#define XSPI_SFP_TG_IPCR_ARB_UNLOCK BIT(23) +#define XSPI_SFP_TG_IPCR_ARB_LOCK BIT(22) +#define XSPI_SFP_TG_IPCR_IDATSZ_MASK GENMASK(15, 0) + +#define XSPI_SFP_TG_SFAR 0x95C + +/* Register map end */ + +/********* XSPI CMD definitions ***************************/ +#define LUT_STOP 0x00 +#define LUT_CMD_SDR 0x01 +#define LUT_ADDR_SDR 0x02 +#define LUT_DUMMY 0x03 +#define LUT_MODE8_SDR 0x04 +#define LUT_MODE2_SDR 0x05 +#define LUT_MODE4_SDR 0x06 +#define LUT_READ_SDR 0x07 +#define LUT_WRITE_SDR 0x08 +#define LUT_JMP_ON_CS 0x09 +#define LUT_ADDR_DDR 0x0A +#define LUT_MODE8_DDR 0x0B +#define LUT_MODE2_DDR 0x0C +#define LUT_MODE4_DDR 0x0D +#define LUT_READ_DDR 0x0E +#define LUT_WRITE_DDR 0x0F +#define LUT_DATA_LEARN 0x10 +#define LUT_CMD_DDR 0x11 +#define LUT_CADDR_SDR 0x12 +#define LUT_CADDR_DDR 0x13 +#define JMP_TO_SEQ 0x14 + +#define XSPI_64BIT_LE 0x3 +/* + * Calculate number of required PAD bits for LUT register. + * + * The pad stands for the number of IO lines [0:7]. + * For example, the octal read needs eight IO lines, + * so you should use LUT_PAD(8). This macro + * returns 3 i.e. use eight (2^3) IP lines for read. + */ +#define LUT_PAD(x) (fls(x) - 1) + +/* + * Macro for constructing the LUT entries with the following + * register layout: + * + * --------------------------------------------------- + * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | + * --------------------------------------------------- + */ +#define PAD_SHIFT 8 +#define INSTR_SHIFT 10 +#define OPRND_SHIFT 16 + +/* Macros for constructing the LUT register. */ +#define LUT_DEF(idx, ins, pad, opr) \ + ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \ + (opr)) << (((idx) % 2) * OPRND_SHIFT)) + +#define NXP_XSPI_MIN_IOMAP SZ_4M +#define NXP_XSPI_MAX_CHIPSELECT 2 +#define POLL_TOUT_US 5000 + +/* Access flash memory using IP bus only */ +#define XSPI_QUIRK_USE_IP_ONLY BIT(0) + +struct nxp_xspi_devtype_data { + unsigned int rxfifo; + unsigned int txfifo; + unsigned int ahb_buf_size; + unsigned int quirks; +}; + +static struct nxp_xspi_devtype_data imx94_data = { + .rxfifo = SZ_512, /* (128 * 4 bytes) */ + .txfifo = SZ_1K, /* (256 * 4 bytes) */ + .ahb_buf_size = SZ_4K, /* (1024 * 4 bytes) */ +}; + +struct nxp_xspi { + void __iomem *iobase; + void __iomem *ahb_addr; + u32 memmap_phy; + u32 memmap_phy_size; + u32 memmap_start; + u32 memmap_len; + struct clk *clk; + struct device *dev; + struct completion c; + const struct nxp_xspi_devtype_data *devtype_data; + /* mutex lock for each operation */ + struct mutex lock; + int selected; +#define XSPI_DTR_PROTO BIT(0) + int flags; + /* Save the previous operation clock rate */ + unsigned long pre_op_rate; + /* The max clock rate xspi supported output to device */ + unsigned long support_max_rate; +}; + +static inline int needs_ip_only(struct nxp_xspi *xspi) +{ + return xspi->devtype_data->quirks & XSPI_QUIRK_USE_IP_ONLY; +} + +static irqreturn_t nxp_xspi_irq_handler(int irq, void *dev_id) +{ + struct nxp_xspi *xspi = dev_id; + u32 reg; + + reg = readl(xspi->iobase + XSPI_FR); + if (reg & XSPI_FR_TFF) { + /* Clear interrupt */ + writel(XSPI_FR_TFF, xspi->iobase + XSPI_FR); + complete(&xspi->c); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int nxp_xspi_check_buswidth(struct nxp_xspi *xspi, u8 width) +{ + return (is_power_of_2(width) && width <= 8) ? 0 : -EOPNOTSUPP; +} + +static bool nxp_xspi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller); + int ret; + + ret = nxp_xspi_check_buswidth(xspi, op->cmd.buswidth); + + if (op->addr.nbytes) + ret |= nxp_xspi_check_buswidth(xspi, op->addr.buswidth); + + if (op->dummy.nbytes) + ret |= nxp_xspi_check_buswidth(xspi, op->dummy.buswidth); + + if (op->data.nbytes) + ret |= nxp_xspi_check_buswidth(xspi, op->data.buswidth); + + if (ret) + return false; + + /* + * The number of address bytes should be equal to or less than 4 bytes. + */ + if (op->addr.nbytes > 4) + return false; + + /* Max 32 dummy clock cycles supported */ + if (op->dummy.buswidth && + (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) + return false; + + if (needs_ip_only(xspi) && op->data.dir == SPI_MEM_DATA_IN && + op->data.nbytes > xspi->devtype_data->rxfifo) + return false; + + if (op->data.dir == SPI_MEM_DATA_OUT && + op->data.nbytes > xspi->devtype_data->txfifo) + return false; + + return spi_mem_default_supports_op(mem, op); +} + +static void nxp_xspi_prepare_lut(struct nxp_xspi *xspi, + const struct spi_mem_op *op) +{ + void __iomem *base = xspi->iobase; + u32 lutval[5] = {}; + int lutidx = 1, i; + + /* cmd */ + if (op->cmd.dtr) { + lutval[0] |= LUT_DEF(0, LUT_CMD_DDR, LUT_PAD(op->cmd.buswidth), + op->cmd.opcode >> 8); + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_CMD_DDR, + LUT_PAD(op->cmd.buswidth), + op->cmd.opcode & 0x00ff); + lutidx++; + } else { + lutval[0] |= LUT_DEF(0, LUT_CMD_SDR, LUT_PAD(op->cmd.buswidth), + op->cmd.opcode); + } + + /* Addr bytes */ + if (op->addr.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, op->addr.dtr ? + LUT_ADDR_DDR : LUT_ADDR_SDR, + LUT_PAD(op->addr.buswidth), + op->addr.nbytes * 8); + lutidx++; + } + + /* Dummy bytes, if needed */ + if (op->dummy.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY, + LUT_PAD(op->data.buswidth), + op->dummy.nbytes * 8 / + /* need distinguish ddr mode */ + op->dummy.buswidth / (op->dummy.dtr ? 2 : 1)); + lutidx++; + } + + /* Read/Write data bytes */ + if (op->data.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, + op->data.dir == SPI_MEM_DATA_IN ? + (op->data.dtr ? LUT_READ_DDR : LUT_READ_SDR) : + (op->data.dtr ? LUT_WRITE_DDR : LUT_WRITE_SDR), + LUT_PAD(op->data.buswidth), + 0); + lutidx++; + } + + /* Stop condition. */ + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0); + + /* Unlock LUT */ + writel(XSPI_LUT_KEY_VAL, xspi->iobase + XSPI_LUTKEY); + writel(XSPI_LOKCR_UNLOCK, xspi->iobase + XSPI_LCKCR); + + /* Fill LUT */ + for (i = 0; i < ARRAY_SIZE(lutval); i++) + writel(lutval[i], base + XSPI_LUT_REG(i)); + + dev_dbg(xspi->dev, "CMD[%02x] lutval[0:%08x 1:%08x 2:%08x 3:%08x 4:%08x], size: 0x%08x\n", + op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3], lutval[4], + op->data.nbytes); + + /* Lock LUT */ + writel(XSPI_LUT_KEY_VAL, xspi->iobase + XSPI_LUTKEY); + writel(XSPI_LOKCR_LOCK, xspi->iobase + XSPI_LCKCR); +} + +static void nxp_xspi_disable_ddr(struct nxp_xspi *xspi) +{ + void __iomem *base = xspi->iobase; + u32 reg; + + /* Disable module */ + reg = readl(base + XSPI_MCR); + reg |= XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); + + reg &= ~XSPI_MCR_DDR_EN; + reg &= ~XSPI_MCR_DQS_FA_SEL_MASK; + /* Use dummy pad loopback mode to sample data */ + reg |= FIELD_PREP(XSPI_MCR_DQS_FA_SEL_MASK, 0x01); + writel(reg, base + XSPI_MCR); + xspi->support_max_rate = 133000000; + + reg = readl(base + XSPI_FLSHCR); + reg &= ~XSPI_FLSHCR_TDH_MASK; + writel(reg, base + XSPI_FLSHCR); + + /* Select sampling at inverted clock */ + reg = FIELD_PREP(XSPI_SMPR_DLLFSMPFA_MASK, 0) | XSPI_SMPR_FSPHS; + writel(reg, base + XSPI_SMPR); + + /* Enable module */ + reg = readl(base + XSPI_MCR); + reg &= ~XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); +} + +static void nxp_xspi_enable_ddr(struct nxp_xspi *xspi) +{ + void __iomem *base = xspi->iobase; + u32 reg; + + /* Disable module */ + reg = readl(base + XSPI_MCR); + reg |= XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); + + reg |= XSPI_MCR_DDR_EN; + reg &= ~XSPI_MCR_DQS_FA_SEL_MASK; + /* Use external dqs to sample data */ + reg |= FIELD_PREP(XSPI_MCR_DQS_FA_SEL_MASK, 0x03); + writel(reg, base + XSPI_MCR); + xspi->support_max_rate = 200000000; + + reg = readl(base + XSPI_FLSHCR); + reg &= ~XSPI_FLSHCR_TDH_MASK; + reg |= FIELD_PREP(XSPI_FLSHCR_TDH_MASK, 0x01); + writel(reg, base + XSPI_FLSHCR); + + reg = FIELD_PREP(XSPI_SMPR_DLLFSMPFA_MASK, 0x04); + writel(reg, base + XSPI_SMPR); + + /* Enable module */ + reg = readl(base + XSPI_MCR); + reg &= ~XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); +} + +static void nxp_xspi_sw_reset(struct nxp_xspi *xspi) +{ + void __iomem *base = xspi->iobase; + bool mdis_flag = false; + u32 reg; + int ret; + + reg = readl(base + XSPI_MCR); + + /* + * Per RM, when reset SWRSTSD and SWRSTHD, XSPI must be + * enabled (MDIS = 0). + * So if MDIS is 1, should clear it before assert SWRSTSD + * and SWRSTHD. + */ + if (reg & XSPI_MCR_MDIS) { + reg &= ~XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); + mdis_flag = true; + } + + /* Software reset for AHB domain and Serial flash memory domain */ + reg |= XSPI_MCR_SWRSTHD | XSPI_MCR_SWRSTSD; + /* Software Reset for IPS Target Group Queue 0 */ + reg |= XSPI_MCR_IPS_TG_RST; + writel(reg, base + XSPI_MCR); + + /* IPS_TG_RST will self-clear to 0 once IPS_TG_RST complete */ + ret = readl_poll_timeout(base + XSPI_MCR, reg, !(reg & XSPI_MCR_IPS_TG_RST), + 100, 5000); + if (ret == -ETIMEDOUT) + dev_warn(xspi->dev, "XSPI_MCR_IPS_TG_RST do not self-clear in 5ms!"); + + /* + * Per RM, must wait for at least three system cycles and + * three flash cycles after changing the value of reset field. + * delay 5us for safe. + */ + fsleep(5); + + /* + * Per RM, before dessert SWRSTSD and SWRSTHD, XSPI must be + * disabled (MIDS = 1). + */ + reg = readl(base + XSPI_MCR); + reg |= XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); + + /* deassert software reset */ + reg &= ~(XSPI_MCR_SWRSTHD | XSPI_MCR_SWRSTSD); + writel(reg, base + XSPI_MCR); + + /* + * Per RM, must wait for at least three system cycles and + * three flash cycles after changing the value of reset field. + * delay 5us for safe. + */ + fsleep(5); + + /* Re-enable XSPI if it is enabled at beginning */ + if (!mdis_flag) { + reg &= ~XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); + } +} + +static void nxp_xspi_dll_bypass(struct nxp_xspi *xspi) +{ + void __iomem *base = xspi->iobase; + int ret; + u32 reg; + + nxp_xspi_sw_reset(xspi); + + writel(0, base + XSPI_DLLCRA); + + /* Set SLV EN first */ + reg = XSPI_DLLCRA_SLV_EN; + writel(reg, base + XSPI_DLLCRA); + + reg = XSPI_DLLCRA_FREQEN | + FIELD_PREP(XSPI_DLLCRA_SLV_DLY_COARSE_MASK, 0x0) | + XSPI_DLLCRA_SLV_EN | XSPI_DLLCRA_SLV_DLL_BYPASS; + writel(reg, base + XSPI_DLLCRA); + + reg |= XSPI_DLLCRA_SLV_UPD; + writel(reg, base + XSPI_DLLCRA); + + ret = readl_poll_timeout(base + XSPI_DLLSR, reg, + reg & XSPI_DLLSR_SLVA_LOCK, 0, POLL_TOUT_US); + if (ret) + dev_err(xspi->dev, + "DLL SLVA unlock, the DLL status is %x, need to check!\n", + readl(base + XSPI_DLLSR)); +} + +static void nxp_xspi_dll_auto(struct nxp_xspi *xspi, unsigned long rate) +{ + void __iomem *base = xspi->iobase; + int ret; + u32 reg; + + nxp_xspi_sw_reset(xspi); + + writel(0, base + XSPI_DLLCRA); + + /* Set SLV EN first */ + reg = XSPI_DLLCRA_SLV_EN; + writel(reg, base + XSPI_DLLCRA); + + reg = FIELD_PREP(XSPI_DLLCRA_DLL_REFCNTR_MASK, 0x02) | + FIELD_PREP(XSPI_DLLCRA_DLLRES_MASK, 0x08) | + XSPI_DLLCRA_SLAVE_AUTO_UPDT | XSPI_DLLCRA_SLV_EN; + if (rate > 133000000) + reg |= XSPI_DLLCRA_FREQEN; + + writel(reg, base + XSPI_DLLCRA); + + reg |= XSPI_DLLCRA_SLV_UPD; + writel(reg, base + XSPI_DLLCRA); + + reg |= XSPI_DLLCRA_DLLEN; + writel(reg, base + XSPI_DLLCRA); + + ret = readl_poll_timeout(base + XSPI_DLLSR, reg, + reg & XSPI_DLLSR_DLLA_LOCK, 0, POLL_TOUT_US); + if (ret) + dev_err(xspi->dev, + "DLL unlock, the DLL status is %x, need to check!\n", + readl(base + XSPI_DLLSR)); + + ret = readl_poll_timeout(base + XSPI_DLLSR, reg, + reg & XSPI_DLLSR_SLVA_LOCK, 0, POLL_TOUT_US); + if (ret) + dev_err(xspi->dev, + "DLL SLVA unlock, the DLL status is %x, need to check!\n", + readl(base + XSPI_DLLSR)); +} + +static void nxp_xspi_select_mem(struct nxp_xspi *xspi, struct spi_device *spi, + const struct spi_mem_op *op) +{ + /* xspi only support one DTR mode: 8D-8D-8D */ + bool op_is_dtr = op->cmd.dtr && op->addr.dtr && op->dummy.dtr && op->data.dtr; + unsigned long root_clk_rate, rate; + uint64_t cs0_top_address; + uint64_t cs1_top_address; + u32 reg; + int ret; + + /* + * Return when following condition all meet, + * 1, if previously selected target device is same as current + * requested target device. + * 2, the DTR or STR mode do not change. + * 3, previous operation max rate equals current one. + * + * For other case, need to re-config. + */ + if (xspi->selected == spi_get_chipselect(spi, 0) && + (!!(xspi->flags & XSPI_DTR_PROTO) == op_is_dtr) && + (xspi->pre_op_rate == op->max_freq)) + return; + + if (op_is_dtr) { + nxp_xspi_enable_ddr(xspi); + xspi->flags |= XSPI_DTR_PROTO; + } else { + nxp_xspi_disable_ddr(xspi); + xspi->flags &= ~XSPI_DTR_PROTO; + } + rate = min_t(unsigned long, xspi->support_max_rate, op->max_freq); + /* + * There is two dividers between xspi_clk_root(from SoC CCM) and xspi_sfif. + * xspi_clk_root ---->divider1 ----> ipg_clk_2xsfif + * | + * | + * |---> divider2 ---> ipg_clk_sfif + * divider1 is controlled by SOCCR, SOCCR default value is 0. + * divider2 fix to divide 2. + * when SOCCR = 0: + * ipg_clk_2xsfif = xspi_clk_root + * ipg_clk_sfif = ipg_clk_2xsfif / 2 = xspi_clk_root / 2 + * ipg_clk_2xsfif is used for DTR mode. + * xspi_sck(output to device) is defined based on xspi_sfif clock. + */ + root_clk_rate = rate * 2; + + clk_disable_unprepare(xspi->clk); + + ret = clk_set_rate(xspi->clk, root_clk_rate); + if (ret) + return; + + ret = clk_prepare_enable(xspi->clk); + if (ret) + return; + + xspi->pre_op_rate = op->max_freq; + xspi->selected = spi_get_chipselect(spi, 0); + + if (xspi->selected) { /* CS1 select */ + cs0_top_address = xspi->memmap_phy; + cs1_top_address = SZ_4G - 1; + } else { /* CS0 select */ + cs0_top_address = SZ_4G - 1; + cs1_top_address = SZ_4G - 1; + } + writel(cs0_top_address, xspi->iobase + XSPI_SFA1AD); + writel(cs1_top_address, xspi->iobase + XSPI_SFA2AD); + + reg = readl(xspi->iobase + XSPI_SFACR); + if (op->data.swap16) + reg |= XSPI_SFACR_BYTE_SWAP; + else + reg &= ~XSPI_SFACR_BYTE_SWAP; + writel(reg, xspi->iobase + XSPI_SFACR); + + if (!op_is_dtr || rate < 60000000) + nxp_xspi_dll_bypass(xspi); + else + nxp_xspi_dll_auto(xspi, rate); +} + +static int nxp_xspi_ahb_read(struct nxp_xspi *xspi, const struct spi_mem_op *op) +{ + u32 start = op->addr.val; + u32 len = op->data.nbytes; + + /* If necessary, ioremap before AHB read */ + if ((!xspi->ahb_addr) || start < xspi->memmap_start || + start + len > xspi->memmap_start + xspi->memmap_len) { + if (xspi->ahb_addr) + iounmap(xspi->ahb_addr); + + xspi->memmap_start = start; + xspi->memmap_len = len > NXP_XSPI_MIN_IOMAP ? + len : NXP_XSPI_MIN_IOMAP; + + xspi->ahb_addr = ioremap(xspi->memmap_phy + xspi->memmap_start, + xspi->memmap_len); + + if (!xspi->ahb_addr) { + dev_err(xspi->dev, "failed to alloc memory\n"); + return -ENOMEM; + } + } + + /* Read out the data directly from the AHB buffer. */ + memcpy_fromio(op->data.buf.in, + xspi->ahb_addr + start - xspi->memmap_start, len); + + return 0; +} + +static int nxp_xspi_fill_txfifo(struct nxp_xspi *xspi, + const struct spi_mem_op *op) +{ + void __iomem *base = xspi->iobase; + u8 *buf = (u8 *)op->data.buf.out; + u32 reg, left; + int i; + + for (i = 0; i < ALIGN(op->data.nbytes, 4); i += 4) { + reg = readl(base + XSPI_FR); + reg |= XSPI_FR_TBFF; + writel(reg, base + XSPI_FR); + /* Read again to check whether the tx fifo has rom */ + reg = readl(base + XSPI_FR); + if (!(reg & XSPI_FR_TBFF)) { + WARN_ON(1); + return -EIO; + } + + if (i == ALIGN_DOWN(op->data.nbytes, 4)) { + /* Use 0xFF for extra bytes */ + left = 0xFFFFFFFF; + /* The last 1 to 3 bytes */ + memcpy((u8 *)&left, buf + i, op->data.nbytes - i); + writel(left, base + XSPI_TBDR); + } else { + writel(*(u32 *)(buf + i), base + XSPI_TBDR); + } + } + + return 0; +} + +static int nxp_xspi_read_rxfifo(struct nxp_xspi *xspi, + const struct spi_mem_op *op) +{ + u32 watermark, watermark_bytes, reg; + void __iomem *base = xspi->iobase; + u8 *buf = (u8 *) op->data.buf.in; + int i, ret, len; + + /* + * Config the rx watermark half of the 64 memory-mapped RX data buffer RBDRn + * refer to the RBCT config in nxp_xspi_do_op() + */ + watermark = 32; + watermark_bytes = watermark * 4; + + len = op->data.nbytes; + + while (len >= watermark_bytes) { + /* Make sure the RX FIFO contains valid data before read */ + ret = readl_poll_timeout(base + XSPI_FR, reg, + reg & XSPI_FR_RBDF, 0, POLL_TOUT_US); + if (ret) { + WARN_ON(1); + return ret; + } + + for (i = 0; i < watermark; i++) + *(u32 *)(buf + i * 4) = readl(base + XSPI_RBDR0 + i * 4); + + len = len - watermark_bytes; + buf = buf + watermark_bytes; + /* Pop up data to RXFIFO for next read. */ + reg = readl(base + XSPI_FR); + reg |= XSPI_FR_RBDF; + writel(reg, base + XSPI_FR); + } + + /* Wait for the total data transfer finished */ + ret = readl_poll_timeout(base + XSPI_SR, reg, !(reg & XSPI_SR_BUSY), 0, POLL_TOUT_US); + if (ret) { + WARN_ON(1); + return ret; + } + + i = 0; + while (len >= 4) { + *(u32 *)(buf) = readl(base + XSPI_RBDR0 + i); + i += 4; + len -= 4; + buf += 4; + } + + if (len > 0) { + reg = readl(base + XSPI_RBDR0 + i); + memcpy(buf, (u8 *)®, len); + } + + /* Invalid RXFIFO first */ + reg = readl(base + XSPI_MCR); + reg |= XSPI_MCR_CLR_RXF; + writel(reg, base + XSPI_MCR); + /* Wait for the CLR_RXF clear */ + ret = readl_poll_timeout(base + XSPI_MCR, reg, + !(reg & XSPI_MCR_CLR_RXF), 1, POLL_TOUT_US); + WARN_ON(ret); + + return ret; +} + +static int nxp_xspi_do_op(struct nxp_xspi *xspi, const struct spi_mem_op *op) +{ + void __iomem *base = xspi->iobase; + int watermark, err = 0; + u32 reg, len; + + len = op->data.nbytes; + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) { + /* Clear the TX FIFO. */ + reg = readl(base + XSPI_MCR); + reg |= XSPI_MCR_CLR_TXF; + writel(reg, base + XSPI_MCR); + /* Wait for the CLR_TXF clear */ + err = readl_poll_timeout(base + XSPI_MCR, reg, + !(reg & XSPI_MCR_CLR_TXF), 1, POLL_TOUT_US); + if (err) { + WARN_ON(1); + return err; + } + + /* Cover the no 4bytes alignment data length */ + watermark = (xspi->devtype_data->txfifo - ALIGN(op->data.nbytes, 4)) / 4 + 1; + reg = FIELD_PREP(XSPI_TBCT_WMRK_MASK, watermark); + writel(reg, base + XSPI_TBCT); + /* + * According to the RM, for TBDR register, a write transaction on the + * flash memory with data size of less than 32 bits leads to the removal + * of one data entry from the TX buffer. The valid bits are used and the + * rest of the bits are discarded. + * But for data size large than 32 bits, according to test, for no 4bytes + * alignment data, the last 1~3 bytes will lost, because TX buffer use + * 4 bytes entries. + * So here adjust the transfer data length to make it 4bytes alignment. + * then will meet the upper watermark setting, trigger the 4bytes entries + * pop out. + * Will use extra 0xff to append, refer to nxp_xspi_fill_txfifo(). + */ + if (len > 4) + len = ALIGN(op->data.nbytes, 4); + + } else if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) { + /* Invalid RXFIFO first */ + reg = readl(base + XSPI_MCR); + reg |= XSPI_MCR_CLR_RXF; + writel(reg, base + XSPI_MCR); + /* Wait for the CLR_RXF clear */ + err = readl_poll_timeout(base + XSPI_MCR, reg, + !(reg & XSPI_MCR_CLR_RXF), 1, POLL_TOUT_US); + if (err) { + WARN_ON(1); + return err; + } + + reg = FIELD_PREP(XSPI_RBCT_WMRK_MASK, 31); + writel(reg, base + XSPI_RBCT); + } + + init_completion(&xspi->c); + + /* Config the data address */ + writel(op->addr.val + xspi->memmap_phy, base + XSPI_SFP_TG_SFAR); + + /* Config the data size and lut id, trigger the transfer */ + reg = FIELD_PREP(XSPI_SFP_TG_IPCR_SEQID_MASK, XSPI_SEQID_LUT) | + FIELD_PREP(XSPI_SFP_TG_IPCR_IDATSZ_MASK, len); + writel(reg, base + XSPI_SFP_TG_IPCR); + + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) { + err = nxp_xspi_fill_txfifo(xspi, op); + if (err) + return err; + } + + /* Wait for the interrupt. */ + if (!wait_for_completion_timeout(&xspi->c, msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + + /* Invoke IP data read. */ + if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) + err = nxp_xspi_read_rxfifo(xspi, op); + + return err; +} + +static int nxp_xspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller); + void __iomem *base = xspi->iobase; + u32 reg; + int err; + + guard(mutex)(&xspi->lock); + + PM_RUNTIME_ACQUIRE_AUTOSUSPEND(xspi->dev, pm); + err = PM_RUNTIME_ACQUIRE_ERR(&pm); + if (err) + return err; + + /* Wait for controller being ready. */ + err = readl_poll_timeout(base + XSPI_SR, reg, + !(reg & XSPI_SR_BUSY), 1, POLL_TOUT_US); + if (err) { + dev_err(xspi->dev, "SR keeps in BUSY!"); + return err; + } + + nxp_xspi_select_mem(xspi, mem->spi, op); + + nxp_xspi_prepare_lut(xspi, op); + + /* + * For read: + * the address in AHB mapped range will use AHB read. + * the address out of AHB mapped range will use IP read. + * For write: + * all use IP write. + */ + if ((op->data.dir == SPI_MEM_DATA_IN) && !needs_ip_only(xspi) + && ((op->addr.val + op->data.nbytes) <= xspi->memmap_phy_size)) + err = nxp_xspi_ahb_read(xspi, op); + else + err = nxp_xspi_do_op(xspi, op); + + nxp_xspi_sw_reset(xspi); + + return err; +} + +static int nxp_xspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller); + + if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.nbytes > xspi->devtype_data->txfifo) + op->data.nbytes = xspi->devtype_data->txfifo; + } else { + /* Limit data bytes to RX FIFO in case of IP read only */ + if (needs_ip_only(xspi) && (op->data.nbytes > xspi->devtype_data->rxfifo)) + op->data.nbytes = xspi->devtype_data->rxfifo; + + /* Address in AHB mapped range prefer to use AHB read. */ + if (!needs_ip_only(xspi) && (op->addr.val < xspi->memmap_phy_size) + && ((op->addr.val + op->data.nbytes) > xspi->memmap_phy_size)) + op->data.nbytes = xspi->memmap_phy_size - op->addr.val; + } + + return 0; +} + +static void nxp_xspi_config_ahb_buffer(struct nxp_xspi *xspi) +{ + void __iomem *base = xspi->iobase; + u32 ahb_data_trans_size; + u32 reg; + + writel(0xA, base + XSPI_BUF0CR); + writel(0x2, base + XSPI_BUF1CR); + writel(0xD, base + XSPI_BUF2CR); + + /* Configure buffer3 for All Master Access */ + reg = FIELD_PREP(XSPI_BUF3CR_MSTRID_MASK, 0x06) | + XSPI_BUF3CR_ALLMST; + + ahb_data_trans_size = xspi->devtype_data->ahb_buf_size / 8; + reg |= FIELD_PREP(XSPI_BUF3CR_ADATSZ_MASK, ahb_data_trans_size); + writel(reg, base + XSPI_BUF3CR); + + /* Only the buffer3 is used */ + writel(0, base + XSPI_BUF0IND); + writel(0, base + XSPI_BUF1IND); + writel(0, base + XSPI_BUF2IND); + + /* AHB only use ID=15 for read */ + reg = FIELD_PREP(XSPI_BFGENCR_SEQID_MASK, XSPI_SEQID_LUT); + reg |= XSPI_BFGENCR_WR_FLUSH_EN; + /* No limit for align */ + reg |= FIELD_PREP(XSPI_BFGENCR_ALIGN_MASK, 0); + writel(reg, base + XSPI_BFGENCR); +} + +static int nxp_xspi_default_setup(struct nxp_xspi *xspi) +{ + void __iomem *base = xspi->iobase; + u32 reg; + + /* Bypass SFP check, clear MGC_GVLD, MGC_GVLDMDAD, MGC_GVLDFRAD */ + writel(0, base + XSPI_MGC); + + /* Enable the EENV0 SFP check */ + reg = readl(base + XSPI_TG0MDAD); + reg |= XSPI_TG0MDAD_VLD; + writel(reg, base + XSPI_TG0MDAD); + + /* Give read/write access right to EENV0 */ + reg = readl(base + XSPI_FRAD0_WORD2); + reg &= ~XSPI_FRAD0_WORD2_MD0ACP_MASK; + reg |= FIELD_PREP(XSPI_FRAD0_WORD2_MD0ACP_MASK, 0x03); + writel(reg, base + XSPI_FRAD0_WORD2); + + /* Enable the FRAD check for EENV0 */ + reg = readl(base + XSPI_FRAD0_WORD3); + reg |= XSPI_FRAD0_WORD3_VLD; + writel(reg, base + XSPI_FRAD0_WORD3); + + /* + * Config the timeout to max value, this timeout will affect the + * TBDR and RBDRn access right after IP cmd triggered. + */ + writel(0xFFFFFFFF, base + XSPI_MTO); + + /* Disable module */ + reg = readl(base + XSPI_MCR); + reg |= XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); + + nxp_xspi_sw_reset(xspi); + + reg = readl(base + XSPI_MCR); + reg &= ~(XSPI_MCR_CKN_FA_EN | XSPI_MCR_DQS_FA_SEL_MASK | + XSPI_MCR_DOZE | XSPI_MCR_VAR_LAT_EN | + XSPI_MCR_DDR_EN | XSPI_MCR_DQS_OUT_EN); + reg |= XSPI_MCR_DQS_EN; + reg |= XSPI_MCR_ISD3FA | XSPI_MCR_ISD2FA; + writel(reg, base + XSPI_MCR); + + reg = readl(base + XSPI_SFACR); + reg &= ~(XSPI_SFACR_FORCE_A10 | XSPI_SFACR_WA_4B_EN | + XSPI_SFACR_BYTE_SWAP | XSPI_SFACR_WA | + XSPI_SFACR_CAS_MASK); + reg |= XSPI_SFACR_FORCE_A10; + writel(reg, base + XSPI_SFACR); + + nxp_xspi_config_ahb_buffer(xspi); + + reg = FIELD_PREP(XSPI_FLSHCR_TCSH_MASK, 0x03) | + FIELD_PREP(XSPI_FLSHCR_TCSS_MASK, 0x03); + writel(reg, base + XSPI_FLSHCR); + + /* Enable module */ + reg = readl(base + XSPI_MCR); + reg &= ~XSPI_MCR_MDIS; + writel(reg, base + XSPI_MCR); + + xspi->selected = -1; + + /* Enable the interrupt */ + writel(XSPI_RSER_TFIE, base + XSPI_RSER); + + return 0; +} + +static const char *nxp_xspi_get_name(struct spi_mem *mem) +{ + struct nxp_xspi *xspi = spi_controller_get_devdata(mem->spi->controller); + struct device *dev = &mem->spi->dev; + const char *name; + + /* Set custom name derived from the platform_device of the controller. */ + if (of_get_available_child_count(xspi->dev->of_node) == 1) + return dev_name(xspi->dev); + + name = devm_kasprintf(dev, GFP_KERNEL, + "%s-%d", dev_name(xspi->dev), + spi_get_chipselect(mem->spi, 0)); + + if (!name) { + dev_err(dev, "failed to get memory for custom flash name\n"); + return ERR_PTR(-ENOMEM); + } + + return name; +} + +static const struct spi_controller_mem_ops nxp_xspi_mem_ops = { + .adjust_op_size = nxp_xspi_adjust_op_size, + .supports_op = nxp_xspi_supports_op, + .exec_op = nxp_xspi_exec_op, + .get_name = nxp_xspi_get_name, +}; + +static const struct spi_controller_mem_caps nxp_xspi_mem_caps = { + .dtr = true, + .per_op_freq = true, + .swap16 = true, +}; + +static void nxp_xspi_cleanup(void *data) +{ + struct nxp_xspi *xspi = data; + u32 reg; + + pm_runtime_get_sync(xspi->dev); + + /* Disable interrupt */ + writel(0, xspi->iobase + XSPI_RSER); + /* Clear all the internal logic flags */ + writel(0xFFFFFFFF, xspi->iobase + XSPI_FR); + /* Disable the hardware */ + reg = readl(xspi->iobase + XSPI_MCR); + reg |= XSPI_MCR_MDIS; + writel(reg, xspi->iobase + XSPI_MCR); + + pm_runtime_put_sync(xspi->dev); + + if (xspi->ahb_addr) + iounmap(xspi->ahb_addr); +} + +static int nxp_xspi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *ctlr; + struct nxp_xspi *xspi; + struct resource *res; + int ret, irq; + + ctlr = devm_spi_alloc_host(dev, sizeof(*xspi)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL | + SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL; + + xspi = spi_controller_get_devdata(ctlr); + xspi->dev = dev; + xspi->devtype_data = device_get_match_data(dev); + if (!xspi->devtype_data) + return -ENODEV; + + platform_set_drvdata(pdev, xspi); + + /* Find the resources - configuration register address space */ + xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "base"); + if (IS_ERR(xspi->iobase)) + return PTR_ERR(xspi->iobase); + + /* Find the resources - controller memory mapped space */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmap"); + if (!res) + return -ENODEV; + + /* Assign memory mapped starting address and mapped size. */ + xspi->memmap_phy = res->start; + xspi->memmap_phy_size = resource_size(res); + + /* Find the clocks */ + xspi->clk = devm_clk_get(dev, "per"); + if (IS_ERR(xspi->clk)) + return PTR_ERR(xspi->clk); + + /* Find the irq */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return dev_err_probe(dev, irq, "Failed to get irq source"); + + pm_runtime_set_autosuspend_delay(dev, XSPI_RPM_TIMEOUT_MS); + pm_runtime_use_autosuspend(dev); + ret = devm_pm_runtime_enable(dev); + if (ret) + return ret; + + PM_RUNTIME_ACQUIRE_AUTOSUSPEND(dev, pm); + ret = PM_RUNTIME_ACQUIRE_ERR(&pm); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable clock"); + + /* Clear potential interrupt by write xspi errstat */ + writel(0xFFFFFFFF, xspi->iobase + XSPI_ERRSTAT); + writel(0xFFFFFFFF, xspi->iobase + XSPI_FR); + + nxp_xspi_default_setup(xspi); + + ret = devm_request_irq(dev, irq, + nxp_xspi_irq_handler, 0, pdev->name, xspi); + if (ret) + return dev_err_probe(dev, ret, "failed to request irq"); + + ret = devm_mutex_init(dev, &xspi->lock); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, nxp_xspi_cleanup, xspi); + if (ret) + return ret; + + ctlr->bus_num = -1; + ctlr->num_chipselect = NXP_XSPI_MAX_CHIPSELECT; + ctlr->mem_ops = &nxp_xspi_mem_ops; + ctlr->mem_caps = &nxp_xspi_mem_caps; + + return devm_spi_register_controller(dev, ctlr); +} + +static int nxp_xspi_runtime_suspend(struct device *dev) +{ + struct nxp_xspi *xspi = dev_get_drvdata(dev); + u32 reg; + + reg = readl(xspi->iobase + XSPI_MCR); + reg |= XSPI_MCR_MDIS; + writel(reg, xspi->iobase + XSPI_MCR); + + clk_disable_unprepare(xspi->clk); + + return 0; +} + +static int nxp_xspi_runtime_resume(struct device *dev) +{ + struct nxp_xspi *xspi = dev_get_drvdata(dev); + u32 reg; + int ret; + + ret = clk_prepare_enable(xspi->clk); + if (ret) + return ret; + + reg = readl(xspi->iobase + XSPI_MCR); + reg &= ~XSPI_MCR_MDIS; + writel(reg, xspi->iobase + XSPI_MCR); + + return 0; +} + +static int nxp_xspi_suspend(struct device *dev) +{ + int ret; + + ret = pinctrl_pm_select_sleep_state(dev); + if (ret) { + dev_err(dev, "select flexspi sleep pinctrl failed!\n"); + return ret; + } + + return pm_runtime_force_suspend(dev); +} + +static int nxp_xspi_resume(struct device *dev) +{ + struct nxp_xspi *xspi = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret) + return ret; + + nxp_xspi_default_setup(xspi); + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + dev_err(dev, "select flexspi default pinctrl failed!\n"); + + return ret; +} + + +static const struct dev_pm_ops nxp_xspi_pm_ops = { + RUNTIME_PM_OPS(nxp_xspi_runtime_suspend, nxp_xspi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(nxp_xspi_suspend, nxp_xspi_resume) +}; + +static const struct of_device_id nxp_xspi_dt_ids[] = { + { .compatible = "nxp,imx94-xspi", .data = (void *)&imx94_data, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nxp_xspi_dt_ids); + +static struct platform_driver nxp_xspi_driver = { + .driver = { + .name = "nxp-xspi", + .of_match_table = nxp_xspi_dt_ids, + .pm = pm_ptr(&nxp_xspi_pm_ops), + }, + .probe = nxp_xspi_probe, +}; +module_platform_driver(nxp_xspi_driver); + +MODULE_DESCRIPTION("NXP xSPI Controller Driver"); +MODULE_AUTHOR("NXP Semiconductor"); +MODULE_AUTHOR("Haibo Chen "); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c index cba229920357..29333b1f82d7 100644 --- a/drivers/spi/spi-oc-tiny.c +++ b/drivers/spi/spi-oc-tiny.c @@ -192,7 +192,6 @@ static int tiny_spi_of_probe(struct platform_device *pdev) if (!np) return 0; - hw->bitbang.ctlr->dev.of_node = pdev->dev.of_node; if (!of_property_read_u32(np, "clock-frequency", &val)) hw->freq = val; if (!of_property_read_u32(np, "baud-width", &val)) diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index 43bd9f21137f..7a2186b51b4c 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -780,7 +780,6 @@ static int orion_spi_probe(struct platform_device *pdev) if (status < 0) goto out_rel_pm; - host->dev.of_node = pdev->dev.of_node; status = spi_register_controller(host); if (status < 0) goto out_rel_pm; diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 9e56e8774614..c32a1fba31ef 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -1893,7 +1893,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) host->handle_err = pl022_handle_err; host->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware; host->rt = platform_info->rt; - host->dev.of_node = dev->of_node; host->use_gpio_descriptors = true; /* diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index ec7117a94d5f..78c399e95ef2 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -1290,8 +1290,6 @@ int pxa2xx_spi_probe(struct device *dev, struct ssp_device *ssp, drv_data->controller_info = platform_info; drv_data->ssp = ssp; - device_set_node(&controller->dev, dev_fwnode(dev)); - /* The spi->mode bits understood by this driver: */ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c index 3aeddada58e1..7e39038160e0 100644 --- a/drivers/spi/spi-qcom-qspi.c +++ b/drivers/spi/spi-qcom-qspi.c @@ -763,7 +763,6 @@ static int qcom_qspi_probe(struct platform_device *pdev) host->dma_alignment = QSPI_ALIGN_REQ; host->num_chipselect = QSPI_NUM_CS; host->bus_num = -1; - host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_MODE_0 | SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c index 7681a91d67d5..d7fef48f20ef 100644 --- a/drivers/spi/spi-qpic-snand.c +++ b/drivers/spi/spi-qpic-snand.c @@ -850,8 +850,6 @@ static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc, snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); snandc->regs->exec = cpu_to_le32(1); - qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); - qcom_clear_bam_transaction(snandc); qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); @@ -941,8 +939,6 @@ static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc, snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); snandc->regs->exec = cpu_to_le32(1); - qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); - qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, @@ -1587,7 +1583,6 @@ static int qcom_spi_probe(struct platform_device *pdev) ctlr->num_chipselect = QPIC_QSPI_NUM_CS; ctlr->mem_ops = &qcom_spi_mem_ops; ctlr->mem_caps = &qcom_spi_mem_caps; - ctlr->dev.of_node = pdev->dev.of_node; ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 7d647edf6bc3..6cbdcd060e8c 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1091,7 +1091,6 @@ static int spi_qup_probe(struct platform_device *pdev) host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); host->max_speed_hz = max_freq; host->transfer_one = spi_qup_transfer_one; - host->dev.of_node = pdev->dev.of_node; host->auto_runtime_pm = true; host->dma_alignment = dma_get_cache_alignment(); host->max_dma_len = SPI_MAX_XFER; diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c index 22b86fc89132..7b944f2819ec 100644 --- a/drivers/spi/spi-rb4xx.c +++ b/drivers/spi/spi-rb4xx.c @@ -160,7 +160,6 @@ static int rb4xx_spi_probe(struct platform_device *pdev) if (IS_ERR(ahb_clk)) return PTR_ERR(ahb_clk); - host->dev.of_node = pdev->dev.of_node; host->bus_num = 0; host->num_chipselect = 3; host->mode_bits = SPI_TX_DUAL; diff --git a/drivers/spi/spi-realtek-rtl-snand.c b/drivers/spi/spi-realtek-rtl-snand.c index 741cf2af3e91..7d5853d202c6 100644 --- a/drivers/spi/spi-realtek-rtl-snand.c +++ b/drivers/spi/spi-realtek-rtl-snand.c @@ -400,7 +400,6 @@ static int rtl_snand_probe(struct platform_device *pdev) ctrl->mem_ops = &rtl_snand_mem_ops; ctrl->bits_per_word_mask = SPI_BPW_MASK(8); ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; - device_set_node(&ctrl->dev, dev_fwnode(dev)); return devm_spi_register_controller(dev, ctrl); } diff --git a/drivers/spi/spi-realtek-rtl.c b/drivers/spi/spi-realtek-rtl.c index 0b0123e20b54..d7acc02105ca 100644 --- a/drivers/spi/spi-realtek-rtl.c +++ b/drivers/spi/spi-realtek-rtl.c @@ -169,7 +169,6 @@ static int realtek_rtl_spi_probe(struct platform_device *pdev) init_hw(rtspi); - ctrl->dev.of_node = pdev->dev.of_node; ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX; ctrl->set_cs = rt_set_cs; ctrl->transfer_one = transfer_one; diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c index b3c2b03b1153..2990bf85ee47 100644 --- a/drivers/spi/spi-rockchip-sfc.c +++ b/drivers/spi/spi-rockchip-sfc.c @@ -622,7 +622,6 @@ static int rockchip_sfc_probe(struct platform_device *pdev) host->flags = SPI_CONTROLLER_HALF_DUPLEX; host->mem_ops = &rockchip_sfc_mem_ops; host->mem_caps = &rockchip_sfc_mem_caps; - host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL; host->max_speed_hz = SFC_MAX_SPEED; host->num_chipselect = SFC_MAX_CHIPSELECT_NUM; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 1a6381de6f33..fd2ebef4903f 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -805,8 +805,8 @@ static int rockchip_spi_probe(struct platform_device *pdev) if (ret < 0) goto err_put_ctlr; - ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL, - IRQF_ONESHOT, dev_name(&pdev->dev), ctlr); + ret = devm_request_irq(&pdev->dev, ret, rockchip_spi_isr, 0, + dev_name(&pdev->dev), ctlr); if (ret) goto err_put_ctlr; @@ -858,7 +858,6 @@ static int rockchip_spi_probe(struct platform_device *pdev) ctlr->num_chipselect = num_cs; ctlr->use_gpio_descriptors = true; } - ctlr->dev.of_node = pdev->dev.of_node; ctlr->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4); ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX; ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT); diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 8e1d911b88b5..c739c1998b4c 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -1338,7 +1338,6 @@ static int rspi_probe(struct platform_device *pdev) ctlr->min_speed_hz = DIV_ROUND_UP(clksrc, ops->max_div); ctlr->max_speed_hz = DIV_ROUND_UP(clksrc, ops->min_div); ctlr->flags = ops->flags; - ctlr->dev.of_node = pdev->dev.of_node; ctlr->use_gpio_descriptors = true; ctlr->max_native_cs = rspi->ops->num_hw_ss; diff --git a/drivers/spi/spi-rzv2h-rspi.c b/drivers/spi/spi-rzv2h-rspi.c index 1db7e4e5d64e..23f0e92ae208 100644 --- a/drivers/spi/spi-rzv2h-rspi.c +++ b/drivers/spi/spi-rzv2h-rspi.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -21,6 +22,8 @@ #include #include +#include "internals.h" + /* Registers */ #define RSPI_SPDR 0x00 #define RSPI_SPCR 0x08 @@ -37,6 +40,7 @@ /* Register SPCR */ #define RSPI_SPCR_BPEN BIT(31) #define RSPI_SPCR_MSTR BIT(30) +#define RSPI_SPCR_SPTIE BIT(20) #define RSPI_SPCR_SPRIE BIT(17) #define RSPI_SPCR_SCKASE BIT(12) #define RSPI_SPCR_SPE BIT(0) @@ -93,31 +97,29 @@ struct rzv2h_rspi_info { }; struct rzv2h_rspi_priv { - struct reset_control_bulk_data resets[RSPI_RESET_NUM]; struct spi_controller *controller; const struct rzv2h_rspi_info *info; + struct platform_device *pdev; void __iomem *base; struct clk *tclk; struct clk *pclk; wait_queue_head_t wait; unsigned int bytes_per_word; + int irq_rx; u32 last_speed_hz; u32 freq; u16 status; u8 spr; u8 brdv; bool use_pclk; + bool dma_callbacked; }; #define RZV2H_RSPI_TX(func, type) \ static inline void rzv2h_rspi_tx_##type(struct rzv2h_rspi_priv *rspi, \ const void *txbuf, \ unsigned int index) { \ - type buf = 0; \ - \ - if (txbuf) \ - buf = ((type *)txbuf)[index]; \ - \ + type buf = ((type *)txbuf)[index]; \ func(buf, rspi->base + RSPI_SPDR); \ } @@ -126,9 +128,7 @@ static inline void rzv2h_rspi_rx_##type(struct rzv2h_rspi_priv *rspi, \ void *rxbuf, \ unsigned int index) { \ type buf = func(rspi->base + RSPI_SPDR); \ - \ - if (rxbuf) \ - ((type *)rxbuf)[index] = buf; \ + ((type *)rxbuf)[index] = buf; \ } RZV2H_RSPI_TX(writel, u32) @@ -224,16 +224,27 @@ static int rzv2h_rspi_receive(struct rzv2h_rspi_priv *rspi, void *rxbuf, return 0; } -static int rzv2h_rspi_transfer_one(struct spi_controller *controller, - struct spi_device *spi, - struct spi_transfer *transfer) +static bool rzv2h_rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, + struct spi_transfer *xfer) { - struct rzv2h_rspi_priv *rspi = spi_controller_get_devdata(controller); - unsigned int words_to_transfer, i; - int ret = 0; + struct rzv2h_rspi_priv *rspi = spi_controller_get_devdata(ctlr); - transfer->effective_speed_hz = rspi->freq; - words_to_transfer = transfer->len / rspi->bytes_per_word; + if (ctlr->fallback) + return false; + + if (!ctlr->dma_tx || !ctlr->dma_rx) + return false; + + return xfer->len > rspi->info->fifo_size; +} + +static int rzv2h_rspi_transfer_pio(struct rzv2h_rspi_priv *rspi, + struct spi_device *spi, + struct spi_transfer *transfer, + unsigned int words_to_transfer) +{ + unsigned int i; + int ret = 0; for (i = 0; i < words_to_transfer; i++) { rzv2h_rspi_clear_all_irqs(rspi); @@ -245,12 +256,151 @@ static int rzv2h_rspi_transfer_one(struct spi_controller *controller, break; } + return ret; +} + +static void rzv2h_rspi_dma_complete(void *arg) +{ + struct rzv2h_rspi_priv *rspi = arg; + + rspi->dma_callbacked = 1; + wake_up_interruptible(&rspi->wait); +} + +static struct dma_async_tx_descriptor * +rzv2h_rspi_setup_dma_channel(struct rzv2h_rspi_priv *rspi, + struct dma_chan *chan, struct sg_table *sg, + enum dma_slave_buswidth width, + enum dma_transfer_direction direction) +{ + struct dma_slave_config config = { + .dst_addr = rspi->pdev->resource->start + RSPI_SPDR, + .src_addr = rspi->pdev->resource->start + RSPI_SPDR, + .dst_addr_width = width, + .src_addr_width = width, + .direction = direction, + }; + struct dma_async_tx_descriptor *desc; + int ret; + + ret = dmaengine_slave_config(chan, &config); + if (ret) + return ERR_PTR(ret); + + desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return ERR_PTR(-EAGAIN); + + if (direction == DMA_DEV_TO_MEM) { + desc->callback = rzv2h_rspi_dma_complete; + desc->callback_param = rspi; + } + + return desc; +} + +static enum dma_slave_buswidth +rzv2h_rspi_dma_width(struct rzv2h_rspi_priv *rspi) +{ + switch (rspi->bytes_per_word) { + case 4: + return DMA_SLAVE_BUSWIDTH_4_BYTES; + case 2: + return DMA_SLAVE_BUSWIDTH_2_BYTES; + case 1: + return DMA_SLAVE_BUSWIDTH_1_BYTE; + default: + return DMA_SLAVE_BUSWIDTH_UNDEFINED; + } +} + +static int rzv2h_rspi_transfer_dma(struct rzv2h_rspi_priv *rspi, + struct spi_device *spi, + struct spi_transfer *transfer, + unsigned int words_to_transfer) +{ + struct dma_async_tx_descriptor *tx_desc = NULL, *rx_desc = NULL; + enum dma_slave_buswidth width; + dma_cookie_t cookie; + int ret; + + width = rzv2h_rspi_dma_width(rspi); + if (width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + return -EINVAL; + + rx_desc = rzv2h_rspi_setup_dma_channel(rspi, rspi->controller->dma_rx, + &transfer->rx_sg, width, + DMA_DEV_TO_MEM); + if (IS_ERR(rx_desc)) + return PTR_ERR(rx_desc); + + tx_desc = rzv2h_rspi_setup_dma_channel(rspi, rspi->controller->dma_tx, + &transfer->tx_sg, width, + DMA_MEM_TO_DEV); + if (IS_ERR(tx_desc)) + return PTR_ERR(tx_desc); + + cookie = dmaengine_submit(rx_desc); + if (dma_submit_error(cookie)) + return cookie; + + cookie = dmaengine_submit(tx_desc); + if (dma_submit_error(cookie)) { + dmaengine_terminate_sync(rspi->controller->dma_rx); + return cookie; + } + + /* + * DMA transfer does not need IRQs to be enabled. + * For PIO, we only use RX IRQ, so disable that. + */ + disable_irq(rspi->irq_rx); + + rspi->dma_callbacked = 0; + + dma_async_issue_pending(rspi->controller->dma_rx); + dma_async_issue_pending(rspi->controller->dma_tx); rzv2h_rspi_clear_all_irqs(rspi); - if (ret) - transfer->error = SPI_TRANS_FAIL_IO; + ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); + if (ret) { + dmaengine_synchronize(rspi->controller->dma_tx); + dmaengine_synchronize(rspi->controller->dma_rx); + ret = 0; + } else { + dmaengine_terminate_sync(rspi->controller->dma_tx); + dmaengine_terminate_sync(rspi->controller->dma_rx); + ret = -ETIMEDOUT; + } - spi_finalize_current_transfer(controller); + enable_irq(rspi->irq_rx); + + return ret; +} + +static int rzv2h_rspi_transfer_one(struct spi_controller *controller, + struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct rzv2h_rspi_priv *rspi = spi_controller_get_devdata(controller); + bool is_dma = spi_xfer_is_dma_mapped(controller, spi, transfer); + unsigned int words_to_transfer; + int ret; + + transfer->effective_speed_hz = rspi->freq; + words_to_transfer = transfer->len / rspi->bytes_per_word; + + if (is_dma) + ret = rzv2h_rspi_transfer_dma(rspi, spi, transfer, words_to_transfer); + else + ret = rzv2h_rspi_transfer_pio(rspi, spi, transfer, words_to_transfer); + + rzv2h_rspi_clear_all_irqs(rspi); + + if (is_dma && ret == -EAGAIN) + /* Retry with PIO */ + transfer->error = SPI_TRANS_FAIL_NO_START; return ret; } @@ -485,6 +635,9 @@ static int rzv2h_rspi_prepare_message(struct spi_controller *ctlr, /* SPI receive buffer full interrupt enable */ conf32 |= RSPI_SPCR_SPRIE; + /* SPI transmit buffer empty interrupt enable */ + conf32 |= RSPI_SPCR_SPTIE; + /* Bypass synchronization circuit */ conf32 |= FIELD_PREP(RSPI_SPCR_BPEN, rspi->use_pclk); @@ -512,7 +665,7 @@ static int rzv2h_rspi_prepare_message(struct spi_controller *ctlr, writeb(0, rspi->base + RSPI_SSLP); /* Setup FIFO thresholds */ - conf16 = FIELD_PREP(RSPI_SPDCR2_TTRG, rspi->info->fifo_size - 1); + conf16 = FIELD_PREP(RSPI_SPDCR2_TTRG, 0); conf16 |= FIELD_PREP(RSPI_SPDCR2_RTRG, 0); writew(conf16, rspi->base + RSPI_SPDCR2); @@ -538,9 +691,10 @@ static int rzv2h_rspi_probe(struct platform_device *pdev) struct spi_controller *controller; struct device *dev = &pdev->dev; struct rzv2h_rspi_priv *rspi; + struct reset_control *reset; struct clk_bulk_data *clks; - int irq_rx, ret, i; long tclk_rate; + int ret, i; controller = devm_spi_alloc_host(dev, sizeof(*rspi)); if (!controller) @@ -550,6 +704,7 @@ static int rzv2h_rspi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, rspi); rspi->controller = controller; + rspi->pdev = pdev; rspi->info = device_get_match_data(dev); @@ -573,83 +728,82 @@ static int rzv2h_rspi_probe(struct platform_device *pdev) if (!rspi->tclk) return dev_err_probe(dev, -EINVAL, "Failed to get tclk\n"); - rspi->resets[0].id = "presetn"; - rspi->resets[1].id = "tresetn"; - ret = devm_reset_control_bulk_get_optional_exclusive(dev, RSPI_RESET_NUM, - rspi->resets); - if (ret) - return dev_err_probe(dev, ret, "cannot get resets\n"); + reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, + "presetn"); + if (IS_ERR(reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(reset), + "cannot get presetn reset\n"); - irq_rx = platform_get_irq_byname(pdev, "rx"); - if (irq_rx < 0) - return dev_err_probe(dev, irq_rx, "cannot get IRQ 'rx'\n"); + reset = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, + "tresetn"); + if (IS_ERR(reset)) + return dev_err_probe(&pdev->dev, PTR_ERR(reset), + "cannot get tresetn reset\n"); - ret = reset_control_bulk_deassert(RSPI_RESET_NUM, rspi->resets); - if (ret) - return dev_err_probe(dev, ret, "failed to deassert resets\n"); + rspi->irq_rx = platform_get_irq_byname(pdev, "rx"); + if (rspi->irq_rx < 0) + return dev_err_probe(dev, rspi->irq_rx, "cannot get IRQ 'rx'\n"); init_waitqueue_head(&rspi->wait); - ret = devm_request_irq(dev, irq_rx, rzv2h_rx_irq_handler, 0, + ret = devm_request_irq(dev, rspi->irq_rx, rzv2h_rx_irq_handler, 0, dev_name(dev), rspi); if (ret) { dev_err(dev, "cannot request `rx` IRQ\n"); - goto quit_resets; + return ret; } controller->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | SPI_LOOP; + controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); controller->prepare_message = rzv2h_rspi_prepare_message; controller->unprepare_message = rzv2h_rspi_unprepare_message; controller->num_chipselect = 4; controller->transfer_one = rzv2h_rspi_transfer_one; + controller->can_dma = rzv2h_rspi_can_dma; tclk_rate = clk_round_rate(rspi->tclk, 0); - if (tclk_rate < 0) { - ret = tclk_rate; - goto quit_resets; - } + if (tclk_rate < 0) + return tclk_rate; controller->min_speed_hz = rzv2h_rspi_calc_bitrate(tclk_rate, RSPI_SPBR_SPR_MAX, RSPI_SPCMD_BRDV_MAX); tclk_rate = clk_round_rate(rspi->tclk, ULONG_MAX); - if (tclk_rate < 0) { - ret = tclk_rate; - goto quit_resets; - } + if (tclk_rate < 0) + return tclk_rate; controller->max_speed_hz = rzv2h_rspi_calc_bitrate(tclk_rate, RSPI_SPBR_SPR_MIN, RSPI_SPCMD_BRDV_MIN); - device_set_node(&controller->dev, dev_fwnode(dev)); - - ret = spi_register_controller(controller); - if (ret) { - dev_err(dev, "register controller failed\n"); - goto quit_resets; + controller->dma_tx = devm_dma_request_chan(dev, "tx"); + if (IS_ERR(controller->dma_tx)) { + ret = dev_warn_probe(dev, PTR_ERR(controller->dma_tx), + "failed to request TX DMA channel\n"); + if (ret == -EPROBE_DEFER) + return ret; + controller->dma_tx = NULL; } - return 0; + controller->dma_rx = devm_dma_request_chan(dev, "rx"); + if (IS_ERR(controller->dma_rx)) { + ret = dev_warn_probe(dev, PTR_ERR(controller->dma_rx), + "failed to request RX DMA channel\n"); + if (ret == -EPROBE_DEFER) + return ret; + controller->dma_rx = NULL; + } -quit_resets: - reset_control_bulk_assert(RSPI_RESET_NUM, rspi->resets); + ret = devm_spi_register_controller(dev, controller); + if (ret) + dev_err(dev, "register controller failed\n"); return ret; } -static void rzv2h_rspi_remove(struct platform_device *pdev) -{ - struct rzv2h_rspi_priv *rspi = platform_get_drvdata(pdev); - - spi_unregister_controller(rspi->controller); - - reset_control_bulk_assert(RSPI_RESET_NUM, rspi->resets); -} - static const struct rzv2h_rspi_info rzv2h_info = { .find_tclk_rate = rzv2h_rspi_find_rate_fixed, .tclk_name = "tclk", @@ -674,7 +828,6 @@ MODULE_DEVICE_TABLE(of, rzv2h_rspi_match); static struct platform_driver rzv2h_rspi_drv = { .probe = rzv2h_rspi_probe, - .remove = rzv2h_rspi_remove, .driver = { .name = "rzv2h_rspi", .of_match_table = rzv2h_rspi_match, diff --git a/drivers/spi/spi-rzv2m-csi.c b/drivers/spi/spi-rzv2m-csi.c index 7c0442883ac0..5d80939dddb5 100644 --- a/drivers/spi/spi-rzv2m-csi.c +++ b/drivers/spi/spi-rzv2m-csi.c @@ -634,8 +634,6 @@ static int rzv2m_csi_probe(struct platform_device *pdev) controller->use_gpio_descriptors = true; controller->target_abort = rzv2m_csi_target_abort; - device_set_node(&controller->dev, dev_fwnode(dev)); - ret = devm_request_irq(dev, irq, rzv2m_csi_irq_handler, 0, dev_name(dev), csi); if (ret) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index aab36c779c06..4fbefd85d2e2 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -1295,7 +1295,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) sdd->tx_dma.direction = DMA_MEM_TO_DEV; sdd->rx_dma.direction = DMA_DEV_TO_MEM; - host->dev.of_node = pdev->dev.of_node; host->bus_num = -1; host->setup = s3c64xx_spi_setup; host->cleanup = s3c64xx_spi_cleanup; diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index 1627aa66c965..78c558e7228e 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c @@ -251,8 +251,6 @@ static int sc18is602_probe(struct i2c_client *client) if (!host) return -ENOMEM; - device_set_node(&host->dev, dev_fwnode(dev)); - hw = spi_controller_get_devdata(host); /* assert reset and then release */ diff --git a/drivers/spi/spi-sg2044-nor.c b/drivers/spi/spi-sg2044-nor.c index 37f1cfe10be4..f3bcb8a1b92b 100644 --- a/drivers/spi/spi-sg2044-nor.c +++ b/drivers/spi/spi-sg2044-nor.c @@ -455,7 +455,6 @@ static int sg2044_spifmc_probe(struct platform_device *pdev) return PTR_ERR(spifmc->io_base); ctrl->num_chipselect = 1; - ctrl->dev.of_node = pdev->dev.of_node; ctrl->bits_per_word_mask = SPI_BPW_MASK(8); ctrl->auto_runtime_pm = false; ctrl->mem_ops = &sg2044_spifmc_mem_ops; diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index 93017faeb7b5..e03eaca1b1a7 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c @@ -253,7 +253,6 @@ static int hspi_probe(struct platform_device *pdev) ctlr->bus_num = pdev->id; ctlr->mode_bits = SPI_CPOL | SPI_CPHA; - ctlr->dev.of_node = pdev->dev.of_node; ctlr->auto_runtime_pm = true; ctlr->transfer_one_message = hspi_transfer_one_message; ctlr->bits_per_word_mask = SPI_BPW_MASK(8); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index b695870fae8c..7f3e08810560 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -1276,7 +1276,6 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) ctlr->flags = chipdata->ctlr_flags; ctlr->bus_num = pdev->id; ctlr->num_chipselect = p->info->num_chipselect; - ctlr->dev.of_node = dev->of_node; ctlr->setup = sh_msiof_spi_setup; ctlr->prepare_message = sh_msiof_prepare_message; ctlr->target_abort = sh_msiof_target_abort; diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c index 87bde2a207a3..6c7aba8befa0 100644 --- a/drivers/spi/spi-sifive.c +++ b/drivers/spi/spi-sifive.c @@ -368,7 +368,6 @@ static int sifive_spi_probe(struct platform_device *pdev) } /* Define our host */ - host->dev.of_node = pdev->dev.of_node; host->bus_num = pdev->id; host->num_chipselect = num_cs; host->mode_bits = SPI_CPHA | SPI_CPOL diff --git a/drivers/spi/spi-slave-mt27xx.c b/drivers/spi/spi-slave-mt27xx.c index e331df967385..ce889cb33228 100644 --- a/drivers/spi/spi-slave-mt27xx.c +++ b/drivers/spi/spi-slave-mt27xx.c @@ -395,7 +395,6 @@ static int mtk_spi_slave_probe(struct platform_device *pdev) } ctlr->auto_runtime_pm = true; - ctlr->dev.of_node = pdev->dev.of_node; ctlr->mode_bits = SPI_CPOL | SPI_CPHA; ctlr->mode_bits |= SPI_LSB_FIRST; diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c index c4969f66a0ba..bfcc140df810 100644 --- a/drivers/spi/spi-sn-f-ospi.c +++ b/drivers/spi/spi-sn-f-ospi.c @@ -628,7 +628,6 @@ static int f_ospi_probe(struct platform_device *pdev) return -ENOMEM; } ctlr->num_chipselect = num_cs; - ctlr->dev.of_node = dev->of_node; ospi = spi_controller_get_devdata(ctlr); ospi->dev = dev; diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c index f25b34a91756..e7d83c16b46c 100644 --- a/drivers/spi/spi-sprd-adi.c +++ b/drivers/spi/spi-sprd-adi.c @@ -566,7 +566,6 @@ static int sprd_adi_probe(struct platform_device *pdev) if (sadi->data->wdg_rst) sadi->data->wdg_rst(sadi); - ctlr->dev.of_node = pdev->dev.of_node; ctlr->bus_num = pdev->id; ctlr->num_chipselect = num_chipselect; ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index ad75f5f0f2bf..0f9fc320363c 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -936,7 +936,6 @@ static int sprd_spi_probe(struct platform_device *pdev) ss->phy_base = res->start; ss->dev = &pdev->dev; - sctlr->dev.of_node = pdev->dev.of_node; sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL; sctlr->bus_num = pdev->id; sctlr->set_cs = sprd_spi_chipselect; diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c index c07c61dc4938..b173ef70d77e 100644 --- a/drivers/spi/spi-st-ssc4.c +++ b/drivers/spi/spi-st-ssc4.c @@ -403,7 +403,7 @@ static int spi_st_runtime_resume(struct device *dev) return ret; } -static int __maybe_unused spi_st_suspend(struct device *dev) +static int spi_st_suspend(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); int ret; @@ -415,7 +415,7 @@ static int __maybe_unused spi_st_suspend(struct device *dev) return pm_runtime_force_suspend(dev); } -static int __maybe_unused spi_st_resume(struct device *dev) +static int spi_st_resume(struct device *dev) { struct spi_controller *host = dev_get_drvdata(dev); int ret; diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c index f36fd36da269..c98afe02a1b6 100644 --- a/drivers/spi/spi-stm32-ospi.c +++ b/drivers/spi/spi-stm32-ospi.c @@ -34,8 +34,6 @@ #define CR_ABORT BIT(1) #define CR_DMAEN BIT(2) #define CR_FTHRES_SHIFT 8 -#define CR_TEIE BIT(16) -#define CR_TCIE BIT(17) #define CR_SMIE BIT(19) #define CR_APMS BIT(22) #define CR_CSSEL BIT(24) @@ -106,7 +104,7 @@ #define STM32_ABT_TIMEOUT_US 100000 #define STM32_COMP_TIMEOUT_MS 5000 #define STM32_BUSY_TIMEOUT_US 100000 - +#define STM32_WAIT_CMD_TIMEOUT_US 5000 #define STM32_AUTOSUSPEND_DELAY -1 @@ -116,7 +114,6 @@ struct stm32_ospi { struct clk *clk; struct reset_control *rstc; - struct completion data_completion; struct completion match_completion; struct dma_chan *dma_chtx; @@ -142,14 +139,32 @@ struct stm32_ospi { struct mutex lock; }; -static void stm32_ospi_read_fifo(u8 *val, void __iomem *addr) +static void stm32_ospi_read_fifo(void *val, void __iomem *addr, u8 len) { - *val = readb_relaxed(addr); + switch (len) { + case sizeof(u32): + *((u32 *)val) = readl_relaxed(addr); + break; + case sizeof(u16): + *((u16 *)val) = readw_relaxed(addr); + break; + case sizeof(u8): + *((u8 *)val) = readb_relaxed(addr); + } } -static void stm32_ospi_write_fifo(u8 *val, void __iomem *addr) +static void stm32_ospi_write_fifo(void *val, void __iomem *addr, u8 len) { - writeb_relaxed(*val, addr); + switch (len) { + case sizeof(u32): + writel_relaxed(*((u32 *)val), addr); + break; + case sizeof(u16): + writew_relaxed(*((u16 *)val), addr); + break; + case sizeof(u8): + writeb_relaxed(*((u8 *)val), addr); + } } static int stm32_ospi_abort(struct stm32_ospi *ospi) @@ -172,19 +187,20 @@ static int stm32_ospi_abort(struct stm32_ospi *ospi) return timeout; } -static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read) +static int stm32_ospi_poll(struct stm32_ospi *ospi, void *buf, u32 len, bool read) { void __iomem *regs_base = ospi->regs_base; - void (*fifo)(u8 *val, void __iomem *addr); + void (*fifo)(void *val, void __iomem *addr, u8 len); u32 sr; int ret; + u8 step; if (read) fifo = stm32_ospi_read_fifo; else fifo = stm32_ospi_write_fifo; - while (len--) { + while (len) { ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR, sr, sr & SR_FTF, 1, STM32_FIFO_TIMEOUT_US); @@ -193,7 +209,17 @@ static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read) len, sr); return ret; } - fifo(buf++, regs_base + OSPI_DR); + + if (len >= sizeof(u32)) + step = sizeof(u32); + else if (len >= sizeof(u16)) + step = sizeof(u16); + else + step = sizeof(u8); + + fifo(buf, regs_base + OSPI_DR, step); + len -= step; + buf += step; } return 0; @@ -211,22 +237,16 @@ static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi) static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi) { void __iomem *regs_base = ospi->regs_base; - u32 cr, sr; + u32 sr; int err = 0; - if ((readl_relaxed(regs_base + OSPI_SR) & SR_TCF) || - ospi->fmode == CR_FMODE_APM) + if (ospi->fmode == CR_FMODE_APM) goto out; - reinit_completion(&ospi->data_completion); - cr = readl_relaxed(regs_base + OSPI_CR); - writel_relaxed(cr | CR_TCIE | CR_TEIE, regs_base + OSPI_CR); + err = readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR, sr, + (sr & (SR_TEF | SR_TCF)), 1, + STM32_WAIT_CMD_TIMEOUT_US); - if (!wait_for_completion_timeout(&ospi->data_completion, - msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) - err = -ETIMEDOUT; - - sr = readl_relaxed(regs_base + OSPI_SR); if (sr & SR_TCF) /* avoid false timeout */ err = 0; @@ -259,29 +279,29 @@ static irqreturn_t stm32_ospi_irq(int irq, void *dev_id) cr = readl_relaxed(regs_base + OSPI_CR); sr = readl_relaxed(regs_base + OSPI_SR); - if (cr & CR_SMIE && sr & SR_SMF) { + if (sr & SR_SMF) { /* disable irq */ cr &= ~CR_SMIE; writel_relaxed(cr, regs_base + OSPI_CR); complete(&ospi->match_completion); - - return IRQ_HANDLED; - } - - if (sr & (SR_TEF | SR_TCF)) { - /* disable irq */ - cr &= ~CR_TCIE & ~CR_TEIE; - writel_relaxed(cr, regs_base + OSPI_CR); - complete(&ospi->data_completion); } return IRQ_HANDLED; } -static void stm32_ospi_dma_setup(struct stm32_ospi *ospi, - struct dma_slave_config *dma_cfg) +static int stm32_ospi_dma_setup(struct stm32_ospi *ospi, + struct dma_slave_config *dma_cfg) { + struct dma_slave_caps caps; + int ret = 0; + if (dma_cfg && ospi->dma_chrx) { + ret = dma_get_slave_caps(ospi->dma_chrx, &caps); + if (ret) + return ret; + + dma_cfg->src_maxburst = caps.max_burst / dma_cfg->src_addr_width; + if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) { dev_err(ospi->dev, "dma rx config failed\n"); dma_release_channel(ospi->dma_chrx); @@ -290,6 +310,12 @@ static void stm32_ospi_dma_setup(struct stm32_ospi *ospi, } if (dma_cfg && ospi->dma_chtx) { + ret = dma_get_slave_caps(ospi->dma_chtx, &caps); + if (ret) + return ret; + + dma_cfg->dst_maxburst = caps.max_burst / dma_cfg->dst_addr_width; + if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) { dev_err(ospi->dev, "dma tx config failed\n"); dma_release_channel(ospi->dma_chtx); @@ -298,6 +324,8 @@ static void stm32_ospi_dma_setup(struct stm32_ospi *ospi, } init_completion(&ospi->dma_completion); + + return ret; } static int stm32_ospi_tx_mm(struct stm32_ospi *ospi, @@ -391,7 +419,7 @@ static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op) if (op->data.dir == SPI_MEM_DATA_IN) buf = op->data.buf.in; else - buf = (u8 *)op->data.buf.out; + buf = (void *)op->data.buf.out; return stm32_ospi_poll(ospi, buf, op->data.nbytes, op->data.dir == SPI_MEM_DATA_IN); @@ -838,7 +866,6 @@ static int stm32_ospi_get_resources(struct platform_device *pdev) dev_info(dev, "No memory-map region found\n"); } - init_completion(&ospi->data_completion); init_completion(&ospi->match_completion); return 0; @@ -899,9 +926,9 @@ static int stm32_ospi_probe(struct platform_device *pdev) dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR; dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR; - dma_cfg.src_maxburst = 4; - dma_cfg.dst_maxburst = 4; - stm32_ospi_dma_setup(ospi, &dma_cfg); + ret = stm32_ospi_dma_setup(ospi, &dma_cfg); + if (ret) + return ret; mutex_init(&ospi->lock); @@ -915,7 +942,6 @@ static int stm32_ospi_probe(struct platform_device *pdev) ctrl->use_gpio_descriptors = true; ctrl->transfer_one_message = stm32_ospi_transfer_one_message; ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP; - ctrl->dev.of_node = dev->of_node; pm_runtime_enable(ospi->dev); pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY); @@ -985,7 +1011,7 @@ static void stm32_ospi_remove(struct platform_device *pdev) pm_runtime_force_suspend(ospi->dev); } -static int __maybe_unused stm32_ospi_suspend(struct device *dev) +static int stm32_ospi_suspend(struct device *dev) { struct stm32_ospi *ospi = dev_get_drvdata(dev); @@ -996,7 +1022,7 @@ static int __maybe_unused stm32_ospi_suspend(struct device *dev) return pm_runtime_force_suspend(ospi->dev); } -static int __maybe_unused stm32_ospi_resume(struct device *dev) +static int stm32_ospi_resume(struct device *dev) { struct stm32_ospi *ospi = dev_get_drvdata(dev); void __iomem *regs_base = ospi->regs_base; @@ -1025,7 +1051,7 @@ static int __maybe_unused stm32_ospi_resume(struct device *dev) return 0; } -static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev) +static int stm32_ospi_runtime_suspend(struct device *dev) { struct stm32_ospi *ospi = dev_get_drvdata(dev); @@ -1034,7 +1060,7 @@ static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev) +static int stm32_ospi_runtime_resume(struct device *dev) { struct stm32_ospi *ospi = dev_get_drvdata(dev); @@ -1042,9 +1068,8 @@ static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev) } static const struct dev_pm_ops stm32_ospi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume) - SET_RUNTIME_PM_OPS(stm32_ospi_runtime_suspend, - stm32_ospi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume) + RUNTIME_PM_OPS(stm32_ospi_runtime_suspend, stm32_ospi_runtime_resume, NULL) }; static const struct of_device_id stm32_ospi_of_match[] = { @@ -1058,7 +1083,7 @@ static struct platform_driver stm32_ospi_driver = { .remove = stm32_ospi_remove, .driver = { .name = "stm32-ospi", - .pm = &stm32_ospi_pm_ops, + .pm = pm_ptr(&stm32_ospi_pm_ops), .of_match_table = stm32_ospi_of_match, }, }; diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index f2d19f1c5ab1..df1bbacec90a 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -31,8 +31,6 @@ #define CR_DFM BIT(6) #define CR_FSEL BIT(7) #define CR_FTHRES_SHIFT 8 -#define CR_TEIE BIT(16) -#define CR_TCIE BIT(17) #define CR_FTIE BIT(18) #define CR_SMIE BIT(19) #define CR_TOIE BIT(20) @@ -86,11 +84,12 @@ #define STM32_QSPI_MAX_MMAP_SZ SZ_256M #define STM32_QSPI_MAX_NORCHIP 2 -#define STM32_FIFO_TIMEOUT_US 30000 -#define STM32_BUSY_TIMEOUT_US 100000 -#define STM32_ABT_TIMEOUT_US 100000 -#define STM32_COMP_TIMEOUT_MS 1000 -#define STM32_AUTOSUSPEND_DELAY -1 +#define STM32_FIFO_TIMEOUT_US 30000 +#define STM32_BUSY_TIMEOUT_US 100000 +#define STM32_ABT_TIMEOUT_US 100000 +#define STM32_WAIT_CMD_TIMEOUT_US 5000 +#define STM32_COMP_TIMEOUT_MS 1000 +#define STM32_AUTOSUSPEND_DELAY -1 struct stm32_qspi_flash { u32 cs; @@ -107,7 +106,6 @@ struct stm32_qspi { struct clk *clk; u32 clk_rate; struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP]; - struct completion data_completion; struct completion match_completion; u32 fmode; @@ -134,53 +132,63 @@ static irqreturn_t stm32_qspi_irq(int irq, void *dev_id) cr = readl_relaxed(qspi->io_base + QSPI_CR); sr = readl_relaxed(qspi->io_base + QSPI_SR); - if (cr & CR_SMIE && sr & SR_SMF) { + if (sr & SR_SMF) { /* disable irq */ cr &= ~CR_SMIE; writel_relaxed(cr, qspi->io_base + QSPI_CR); complete(&qspi->match_completion); - - return IRQ_HANDLED; - } - - if (sr & (SR_TEF | SR_TCF)) { - /* disable irq */ - cr &= ~CR_TCIE & ~CR_TEIE; - writel_relaxed(cr, qspi->io_base + QSPI_CR); - complete(&qspi->data_completion); } return IRQ_HANDLED; } -static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr) +static void stm32_qspi_read_fifo(void *val, void __iomem *addr, u8 len) { - *val = readb_relaxed(addr); + switch (len) { + case sizeof(u32): + *((u32 *)val) = readl_relaxed(addr); + break; + case sizeof(u16): + *((u16 *)val) = readw_relaxed(addr); + break; + case sizeof(u8): + *((u8 *)val) = readb_relaxed(addr); + } } -static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr) +static void stm32_qspi_write_fifo(void *val, void __iomem *addr, u8 len) { - writeb_relaxed(*val, addr); + switch (len) { + case sizeof(u32): + writel_relaxed(*((u32 *)val), addr); + break; + case sizeof(u16): + writew_relaxed(*((u16 *)val), addr); + break; + case sizeof(u8): + writeb_relaxed(*((u8 *)val), addr); + } } static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, const struct spi_mem_op *op) { - void (*tx_fifo)(u8 *val, void __iomem *addr); + void (*fifo)(void *val, void __iomem *addr, u8 len); u32 len = op->data.nbytes, sr; - u8 *buf; + void *buf; int ret; + u8 step; if (op->data.dir == SPI_MEM_DATA_IN) { - tx_fifo = stm32_qspi_read_fifo; + fifo = stm32_qspi_read_fifo; buf = op->data.buf.in; } else { - tx_fifo = stm32_qspi_write_fifo; - buf = (u8 *)op->data.buf.out; + fifo = stm32_qspi_write_fifo; + buf = (void *)op->data.buf.out; } - while (len--) { + while (len) { ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr, (sr & SR_FTF), 1, STM32_FIFO_TIMEOUT_US); @@ -189,7 +197,17 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, len, sr); return ret; } - tx_fifo(buf++, qspi->io_base + QSPI_DR); + + if (len >= sizeof(u32)) + step = sizeof(u32); + else if (len >= sizeof(u16)) + step = sizeof(u16); + else + step = sizeof(u8); + + fifo(buf, qspi->io_base + QSPI_DR, step); + len -= step; + buf += step; } return 0; @@ -301,25 +319,18 @@ static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi) static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi) { - u32 cr, sr; + u32 sr; int err = 0; - if ((readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF) || - qspi->fmode == CCR_FMODE_APM) + if (qspi->fmode == CCR_FMODE_APM) goto out; - reinit_completion(&qspi->data_completion); - cr = readl_relaxed(qspi->io_base + QSPI_CR); - writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR); + err = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr, + (sr & (SR_TEF | SR_TCF)), 1, + STM32_WAIT_CMD_TIMEOUT_US); - if (!wait_for_completion_timeout(&qspi->data_completion, - msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) { - err = -ETIMEDOUT; - } else { - sr = readl_relaxed(qspi->io_base + QSPI_SR); - if (sr & SR_TEF) - err = -EIO; - } + if (sr & SR_TEF) + err = -EIO; out: /* clear flags */ @@ -689,6 +700,7 @@ static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) { struct dma_slave_config dma_cfg; struct device *dev = qspi->dev; + struct dma_slave_caps caps; int ret = 0; memset(&dma_cfg, 0, sizeof(dma_cfg)); @@ -697,8 +709,6 @@ static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_cfg.src_addr = qspi->phys_base + QSPI_DR; dma_cfg.dst_addr = qspi->phys_base + QSPI_DR; - dma_cfg.src_maxburst = 4; - dma_cfg.dst_maxburst = 4; qspi->dma_chrx = dma_request_chan(dev, "rx"); if (IS_ERR(qspi->dma_chrx)) { @@ -707,6 +717,11 @@ static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) if (ret == -EPROBE_DEFER) goto out; } else { + ret = dma_get_slave_caps(qspi->dma_chrx, &caps); + if (ret) + return ret; + + dma_cfg.src_maxburst = caps.max_burst / dma_cfg.src_addr_width; if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) { dev_err(dev, "dma rx config failed\n"); dma_release_channel(qspi->dma_chrx); @@ -719,6 +734,11 @@ static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) ret = PTR_ERR(qspi->dma_chtx); qspi->dma_chtx = NULL; } else { + ret = dma_get_slave_caps(qspi->dma_chtx, &caps); + if (ret) + return ret; + + dma_cfg.dst_maxburst = caps.max_burst / dma_cfg.dst_addr_width; if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) { dev_err(dev, "dma tx config failed\n"); dma_release_channel(qspi->dma_chtx); @@ -797,7 +817,6 @@ static int stm32_qspi_probe(struct platform_device *pdev) return ret; } - init_completion(&qspi->data_completion); init_completion(&qspi->match_completion); qspi->clk = devm_clk_get(dev, NULL); @@ -841,7 +860,6 @@ static int stm32_qspi_probe(struct platform_device *pdev) ctrl->use_gpio_descriptors = true; ctrl->transfer_one_message = stm32_qspi_transfer_one_message; ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP; - ctrl->dev.of_node = dev->of_node; pm_runtime_set_autosuspend_delay(dev, STM32_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); @@ -891,7 +909,7 @@ static void stm32_qspi_remove(struct platform_device *pdev) clk_disable_unprepare(qspi->clk); } -static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev) +static int stm32_qspi_runtime_suspend(struct device *dev) { struct stm32_qspi *qspi = dev_get_drvdata(dev); @@ -900,21 +918,21 @@ static int __maybe_unused stm32_qspi_runtime_suspend(struct device *dev) return 0; } -static int __maybe_unused stm32_qspi_runtime_resume(struct device *dev) +static int stm32_qspi_runtime_resume(struct device *dev) { struct stm32_qspi *qspi = dev_get_drvdata(dev); return clk_prepare_enable(qspi->clk); } -static int __maybe_unused stm32_qspi_suspend(struct device *dev) +static int stm32_qspi_suspend(struct device *dev) { pinctrl_pm_select_sleep_state(dev); return pm_runtime_force_suspend(dev); } -static int __maybe_unused stm32_qspi_resume(struct device *dev) +static int stm32_qspi_resume(struct device *dev) { struct stm32_qspi *qspi = dev_get_drvdata(dev); int ret; @@ -938,9 +956,8 @@ static int __maybe_unused stm32_qspi_resume(struct device *dev) } static const struct dev_pm_ops stm32_qspi_pm_ops = { - SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend, - stm32_qspi_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume) + RUNTIME_PM_OPS(stm32_qspi_runtime_suspend, stm32_qspi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend, stm32_qspi_resume) }; static const struct of_device_id stm32_qspi_match[] = { @@ -955,7 +972,7 @@ static struct platform_driver stm32_qspi_driver = { .driver = { .name = "stm32-qspi", .of_match_table = stm32_qspi_match, - .pm = &stm32_qspi_pm_ops, + .pm = pm_ptr(&stm32_qspi_pm_ops), }, }; module_platform_driver(stm32_qspi_driver); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 2c804c1aef98..b99de8c4cc99 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -202,6 +202,10 @@ #define STM32_SPI_HOST_MODE(stm32_spi) (!(stm32_spi)->device_mode) #define STM32_SPI_DEVICE_MODE(stm32_spi) ((stm32_spi)->device_mode) +static unsigned int polling_limit_us = 30; +module_param(polling_limit_us, uint, 0664); +MODULE_PARM_DESC(polling_limit_us, "maximum time in us to run a transfer in polling mode\n"); + /** * struct stm32_spi_reg - stm32 SPI register & bitfield desc * @reg: register offset @@ -266,6 +270,7 @@ struct stm32_spi; * @dma_rx_cb: routine to call after DMA RX channel operation is complete * @dma_tx_cb: routine to call after DMA TX channel operation is complete * @transfer_one_irq: routine to configure interrupts for driver + * @transfer_one_poll: routine to perform a transfer via register polling * @irq_handler_event: Interrupt handler for SPI controller events * @irq_handler_thread: thread of interrupt handler for SPI controller * @baud_rate_div_min: minimum baud rate divisor @@ -291,6 +296,7 @@ struct stm32_spi_cfg { void (*dma_rx_cb)(void *data); void (*dma_tx_cb)(void *data); int (*transfer_one_irq)(struct stm32_spi *spi); + int (*transfer_one_poll)(struct stm32_spi *spi); irqreturn_t (*irq_handler_event)(int irq, void *dev_id); irqreturn_t (*irq_handler_thread)(int irq, void *dev_id); unsigned int baud_rate_div_min; @@ -1355,6 +1361,55 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi) return 1; } +/** + * stm32h7_spi_transfer_one_poll - transfer a single spi_transfer by direct + * register access without interrupt usage + * @spi: pointer to the spi controller data structure + * + * It must returns 0 if the transfer is finished or 1 if the transfer is still + * in progress. + */ +static int stm32h7_spi_transfer_one_poll(struct stm32_spi *spi) +{ + unsigned long flags; + u32 sr; + + spin_lock_irqsave(&spi->lock, flags); + + stm32_spi_enable(spi); + + /* Be sure to have data in fifo before starting data transfer */ + if (spi->tx_buf) + stm32h7_spi_write_txfifo(spi); + + if (STM32_SPI_HOST_MODE(spi)) + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); + + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + /* Keep writing / reading while waiting for the end of transfer */ + while (spi->tx_len || spi->rx_len || !(sr & STM32H7_SPI_SR_EOT)) { + if (spi->rx_len && (sr & (STM32H7_SPI_SR_RXP | STM32H7_SPI_SR_RXWNE | + STM32H7_SPI_SR_RXPLVL))) + stm32h7_spi_read_rxfifo(spi); + + if (spi->tx_len && (sr & STM32H7_SPI_SR_TXP)) + stm32h7_spi_write_txfifo(spi); + + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + + /* Clear suspension bit if necessary */ + if (sr & STM32H7_SPI_SR_SUSP) + writel_relaxed(sr & STM32H7_SPI_SR_SUSP, spi->base + STM32H7_SPI_IFCR); + } + + spin_unlock_irqrestore(&spi->lock, flags); + + stm32h7_spi_disable(spi); + spi_finalize_current_transfer(spi->ctrl); + + return 0; +} + /** * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using * interrupts @@ -1906,11 +1961,12 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, struct spi_transfer cfg2_clrb |= STM32H7_SPI_CFG2_MIDI; if ((len > 1) && (spi->cur_midi > 0)) { u32 sck_period_ns = DIV_ROUND_UP(NSEC_PER_SEC, spi->cur_speed); - u32 midi = min_t(u32, - DIV_ROUND_UP(spi->cur_midi, sck_period_ns), - FIELD_GET(STM32H7_SPI_CFG2_MIDI, - STM32H7_SPI_CFG2_MIDI)); + u32 midi = DIV_ROUND_UP(spi->cur_midi, sck_period_ns); + if ((spi->cur_bpw + midi) < 8) + midi = 8 - spi->cur_bpw; + + midi = min_t(u32, midi, FIELD_MAX(STM32H7_SPI_CFG2_MIDI)); dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n", sck_period_ns, midi, midi * sck_period_ns); @@ -2025,6 +2081,24 @@ out: return ret; } +/** + * stm32_spi_can_poll - detect if poll based transfer is appropriate + * @spi: pointer to the spi controller data structure + * + * Returns true is poll is more appropriate, false otherwise. + */ +static bool stm32_spi_can_poll(struct stm32_spi *spi) +{ + unsigned long hz_per_byte, byte_limit; + + /* Evaluate the transfer time and use polling if applicable */ + hz_per_byte = polling_limit_us ? + DIV_ROUND_UP(8 * USEC_PER_SEC, polling_limit_us) : 0; + byte_limit = hz_per_byte ? spi->cur_speed / hz_per_byte : 1; + + return (spi->cur_xferlen < byte_limit) ? true : false; +} + /** * stm32_spi_transfer_one - transfer a single spi_transfer * @ctrl: controller interface @@ -2057,6 +2131,8 @@ static int stm32_spi_transfer_one(struct spi_controller *ctrl, if (spi->cur_usedma) return stm32_spi_transfer_one_dma(spi, transfer); + else if (spi->cfg->transfer_one_poll && stm32_spi_can_poll(spi)) + return spi->cfg->transfer_one_poll(spi); else return spi->cfg->transfer_one_irq(spi); } @@ -2215,6 +2291,7 @@ static const struct stm32_spi_cfg stm32h7_spi_cfg = { * SPI access hence handling is performed within the SPI interrupt */ .transfer_one_irq = stm32h7_spi_transfer_one_irq, + .transfer_one_poll = stm32h7_spi_transfer_one_poll, .irq_handler_thread = stm32h7_spi_irq_thread, .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN, .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX, @@ -2244,6 +2321,7 @@ static const struct stm32_spi_cfg stm32mp25_spi_cfg = { * SPI access hence handling is performed within the SPI interrupt */ .transfer_one_irq = stm32h7_spi_transfer_one_irq, + .transfer_one_poll = stm32h7_spi_transfer_one_poll, .irq_handler_thread = stm32h7_spi_irq_thread, .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN, .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX, @@ -2386,7 +2464,6 @@ static int stm32_spi_probe(struct platform_device *pdev) goto err_clk_disable; } - ctrl->dev.of_node = pdev->dev.of_node; ctrl->auto_runtime_pm = true; ctrl->bus_num = pdev->id; ctrl->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | @@ -2406,11 +2483,13 @@ static int stm32_spi_probe(struct platform_device *pdev) spi->dma_tx = dma_request_chan(spi->dev, "tx"); if (IS_ERR(spi->dma_tx)) { ret = PTR_ERR(spi->dma_tx); - spi->dma_tx = NULL; - if (ret == -EPROBE_DEFER) + if (ret == -ENODEV) { + dev_info(&pdev->dev, "tx dma disabled\n"); + spi->dma_tx = NULL; + } else { + dev_err_probe(&pdev->dev, ret, "failed to request tx dma channel\n"); goto err_clk_disable; - - dev_warn(&pdev->dev, "failed to request tx dma channel\n"); + } } else { ctrl->dma_tx = spi->dma_tx; } @@ -2418,11 +2497,13 @@ static int stm32_spi_probe(struct platform_device *pdev) spi->dma_rx = dma_request_chan(spi->dev, "rx"); if (IS_ERR(spi->dma_rx)) { ret = PTR_ERR(spi->dma_rx); - spi->dma_rx = NULL; - if (ret == -EPROBE_DEFER) + if (ret == -ENODEV) { + dev_info(&pdev->dev, "rx dma disabled\n"); + spi->dma_rx = NULL; + } else { + dev_err_probe(&pdev->dev, ret, "failed to request rx dma channel\n"); goto err_dma_release; - - dev_warn(&pdev->dev, "failed to request rx dma channel\n"); + } } else { ctrl->dma_rx = spi->dma_rx; } @@ -2532,7 +2613,7 @@ static void stm32_spi_remove(struct platform_device *pdev) pinctrl_pm_select_sleep_state(&pdev->dev); } -static int __maybe_unused stm32_spi_runtime_suspend(struct device *dev) +static int stm32_spi_runtime_suspend(struct device *dev) { struct spi_controller *ctrl = dev_get_drvdata(dev); struct stm32_spi *spi = spi_controller_get_devdata(ctrl); @@ -2542,7 +2623,7 @@ static int __maybe_unused stm32_spi_runtime_suspend(struct device *dev) return pinctrl_pm_select_sleep_state(dev); } -static int __maybe_unused stm32_spi_runtime_resume(struct device *dev) +static int stm32_spi_runtime_resume(struct device *dev) { struct spi_controller *ctrl = dev_get_drvdata(dev); struct stm32_spi *spi = spi_controller_get_devdata(ctrl); @@ -2555,7 +2636,7 @@ static int __maybe_unused stm32_spi_runtime_resume(struct device *dev) return clk_prepare_enable(spi->clk); } -static int __maybe_unused stm32_spi_suspend(struct device *dev) +static int stm32_spi_suspend(struct device *dev) { struct spi_controller *ctrl = dev_get_drvdata(dev); int ret; @@ -2567,7 +2648,7 @@ static int __maybe_unused stm32_spi_suspend(struct device *dev) return pm_runtime_force_suspend(dev); } -static int __maybe_unused stm32_spi_resume(struct device *dev) +static int stm32_spi_resume(struct device *dev) { struct spi_controller *ctrl = dev_get_drvdata(dev); struct stm32_spi *spi = spi_controller_get_devdata(ctrl); @@ -2597,9 +2678,8 @@ static int __maybe_unused stm32_spi_resume(struct device *dev) } static const struct dev_pm_ops stm32_spi_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume) - SET_RUNTIME_PM_OPS(stm32_spi_runtime_suspend, - stm32_spi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume) + RUNTIME_PM_OPS(stm32_spi_runtime_suspend, stm32_spi_runtime_resume, NULL) }; static struct platform_driver stm32_spi_driver = { @@ -2607,7 +2687,7 @@ static struct platform_driver stm32_spi_driver = { .remove = stm32_spi_remove, .driver = { .name = DRIVER_NAME, - .pm = &stm32_spi_pm_ops, + .pm = pm_ptr(&stm32_spi_pm_ops), .of_match_table = stm32_spi_of_match, }, }; diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index aa92fd5a35a9..bfdf419a583c 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -471,7 +471,6 @@ static int sun4i_spi_probe(struct platform_device *pdev) host->num_chipselect = 4; host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; host->bits_per_word_mask = SPI_BPW_MASK(8); - host->dev.of_node = pdev->dev.of_node; host->auto_runtime_pm = true; host->max_transfer_size = sun4i_spi_max_transfer_size; diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index d1de6c99e762..240e46f84f7b 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -673,7 +673,6 @@ static int sun6i_spi_probe(struct platform_device *pdev) host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | sspi->cfg->mode_bits; host->bits_per_word_mask = SPI_BPW_MASK(8); - host->dev.of_node = pdev->dev.of_node; host->auto_runtime_pm = true; host->max_transfer_size = sun6i_spi_max_transfer_size; diff --git a/drivers/spi/spi-sunplus-sp7021.c b/drivers/spi/spi-sunplus-sp7021.c index 256ae07db6be..789b092fe8c0 100644 --- a/drivers/spi/spi-sunplus-sp7021.c +++ b/drivers/spi/spi-sunplus-sp7021.c @@ -419,7 +419,6 @@ static int sp7021_spi_controller_probe(struct platform_device *pdev) ctlr = devm_spi_alloc_host(dev, sizeof(*pspim)); if (!ctlr) return -ENOMEM; - device_set_node(&ctlr->dev, dev_fwnode(dev)); ctlr->bus_num = pdev->id; ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; ctlr->auto_runtime_pm = true; diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c index eaf560487591..d0a875249910 100644 --- a/drivers/spi/spi-synquacer.c +++ b/drivers/spi/spi-synquacer.c @@ -600,7 +600,6 @@ static irqreturn_t sq_spi_tx_handler(int irq, void *priv) static int synquacer_spi_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; struct spi_controller *host; struct synquacer_spi *sspi; int ret; @@ -699,8 +698,6 @@ static int synquacer_spi_probe(struct platform_device *pdev) goto disable_clk; } - host->dev.of_node = np; - host->dev.fwnode = pdev->dev.fwnode; host->auto_runtime_pm = true; host->bus_num = pdev->id; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 48fb11fea55f..d805da250a10 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -1415,7 +1415,6 @@ static int tegra_spi_probe(struct platform_device *pdev) goto exit_pm_disable; } - host->dev.of_node = pdev->dev.of_node; ret = devm_spi_register_controller(&pdev->dev, host); if (ret < 0) { dev_err(&pdev->dev, "can not register to host err %d\n", ret); diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index d5c8ee20b8e5..d9d536d7f7b6 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c @@ -505,7 +505,6 @@ static int tegra_sflash_probe(struct platform_device *pdev) tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND); pm_runtime_put(&pdev->dev); - host->dev.of_node = pdev->dev.of_node; ret = devm_spi_register_controller(&pdev->dev, host); if (ret < 0) { dev_err(&pdev->dev, "can not register to host err %d\n", ret); diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 709669610840..8c608abd6076 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c @@ -1105,7 +1105,6 @@ static int tegra_slink_probe(struct platform_device *pdev) tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); - host->dev.of_node = pdev->dev.of_node; ret = spi_register_controller(host); if (ret < 0) { dev_err(&pdev->dev, "can not register to host err %d\n", ret); diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index f425d62e0c27..7ea5aa993596 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -1791,7 +1791,6 @@ static int tegra_qspi_probe(struct platform_device *pdev) goto exit_pm_disable; } - host->dev.of_node = pdev->dev.of_node; ret = spi_register_controller(host); if (ret < 0) { dev_err(&pdev->dev, "failed to register host: %d\n", ret); diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 0b7eaccbc797..d1d880a8ed7d 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -775,7 +775,6 @@ static int ti_qspi_probe(struct platform_device *pdev) host->setup = ti_qspi_setup; host->auto_runtime_pm = true; host->transfer_one_message = ti_qspi_start_transfer_one; - host->dev.of_node = pdev->dev.of_node; host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); host->mem_ops = &ti_qspi_mem_ops; diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index ff2142f87277..9e1d364a6198 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -697,7 +697,6 @@ static int uniphier_spi_probe(struct platform_device *pdev) host->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER); host->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER); host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST; - host->dev.of_node = pdev->dev.of_node; host->bus_num = pdev->id; host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); diff --git a/drivers/spi/spi-virtio.c b/drivers/spi/spi-virtio.c index 2acb929b2c69..9e66c917fb75 100644 --- a/drivers/spi/spi-virtio.c +++ b/drivers/spi/spi-virtio.c @@ -150,7 +150,6 @@ static int virtio_spi_transfer_one(struct spi_controller *ctrl, struct spi_transfer *xfer) { struct virtio_spi_priv *priv = spi_controller_get_devdata(ctrl); - struct virtio_spi_req *spi_req __free(kfree) = NULL; struct spi_transfer_head *th; struct scatterlist sg_out_head, sg_out_payload; struct scatterlist sg_in_result, sg_in_payload; @@ -159,7 +158,8 @@ static int virtio_spi_transfer_one(struct spi_controller *ctrl, unsigned int incnt = 0; int ret; - spi_req = kzalloc(sizeof(*spi_req), GFP_KERNEL); + struct virtio_spi_req *spi_req __free(kfree) = kzalloc(sizeof(*spi_req), + GFP_KERNEL); if (!spi_req) return -ENOMEM; @@ -344,8 +344,6 @@ static int virtio_spi_probe(struct virtio_device *vdev) priv->vdev = vdev; vdev->priv = priv; - device_set_node(&ctrl->dev, dev_fwnode(&vdev->dev)); - dev_set_drvdata(&vdev->dev, ctrl); virtio_spi_read_config(vdev); diff --git a/drivers/spi/spi-wpcm-fiu.c b/drivers/spi/spi-wpcm-fiu.c index a9aee2a6c7dc..0e3ee5516587 100644 --- a/drivers/spi/spi-wpcm-fiu.c +++ b/drivers/spi/spi-wpcm-fiu.c @@ -471,7 +471,6 @@ static int wpcm_fiu_probe(struct platform_device *pdev) ctrl->bus_num = -1; ctrl->mem_ops = &wpcm_fiu_mem_ops; ctrl->num_chipselect = 4; - ctrl->dev.of_node = dev->of_node; /* * The FIU doesn't include a clock divider, the clock is entirely diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c index 33b78c537520..130a3d716dd4 100644 --- a/drivers/spi/spi-xcomm.c +++ b/drivers/spi/spi-xcomm.c @@ -260,7 +260,6 @@ static int spi_xcomm_probe(struct i2c_client *i2c) host->bits_per_word_mask = SPI_BPW_MASK(8); host->flags = SPI_CONTROLLER_HALF_DUPLEX; host->transfer_one_message = spi_xcomm_transfer_one; - host->dev.of_node = i2c->dev.of_node; ret = devm_spi_register_controller(&i2c->dev, host); if (ret < 0) diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c index c86dc56f38b4..9f065d4e27d1 100644 --- a/drivers/spi/spi-xilinx.c +++ b/drivers/spi/spi-xilinx.c @@ -405,11 +405,11 @@ static int xilinx_spi_probe(struct platform_device *pdev) bits_per_word = pdata->bits_per_word; force_irq = pdata->force_irq; } else { - of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits", - &num_cs); - ret = of_property_read_u32(pdev->dev.of_node, - "xlnx,num-transfer-bits", - &bits_per_word); + device_property_read_u32(&pdev->dev, "xlnx,num-ss-bits", + &num_cs); + ret = device_property_read_u32(&pdev->dev, + "xlnx,num-transfer-bits", + &bits_per_word); if (ret) bits_per_word = 8; } @@ -447,7 +447,6 @@ static int xilinx_spi_probe(struct platform_device *pdev) host->bus_num = pdev->id; host->num_chipselect = num_cs; - host->dev.of_node = pdev->dev.of_node; /* * Detect endianess on the IP via loop bit in CR. Detection @@ -471,7 +470,7 @@ static int xilinx_spi_probe(struct platform_device *pdev) xspi->bytes_per_word = bits_per_word / 8; xspi->buffer_size = xilinx_spi_find_buffer_size(xspi); - xspi->irq = platform_get_irq(pdev, 0); + xspi->irq = platform_get_irq_optional(pdev, 0); if (xspi->irq < 0 && xspi->irq != -ENXIO) { return xspi->irq; } else if (xspi->irq >= 0) { diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c index 2fec18b68449..be8bbe1cbba3 100644 --- a/drivers/spi/spi-xlp.c +++ b/drivers/spi/spi-xlp.c @@ -409,7 +409,6 @@ static int xlp_spi_probe(struct platform_device *pdev) host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; host->setup = xlp_spi_setup; host->transfer_one = xlp_spi_transfer_one; - host->dev.of_node = pdev->dev.of_node; init_completion(&xspi->done); spi_controller_set_devdata(host, xspi); diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c index 1b54d8f9f5ec..71f0f176cfd9 100644 --- a/drivers/spi/spi-xtensa-xtfpga.c +++ b/drivers/spi/spi-xtensa-xtfpga.c @@ -90,7 +90,6 @@ static int xtfpga_spi_probe(struct platform_device *pdev) host->flags = SPI_CONTROLLER_NO_RX; host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); host->bus_num = pdev->dev.id; - host->dev.of_node = pdev->dev.of_node; xspi = spi_controller_get_devdata(host); xspi->bitbang.ctlr = host; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e25df9990f82..a24de5c98399 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2354,8 +2354,8 @@ static void of_spi_parse_dt_cs_delay(struct device_node *nc, static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, struct device_node *nc) { - u32 value, cs[SPI_DEVICE_CS_CNT_MAX]; - int rc, idx; + u32 value, cs[SPI_DEVICE_CS_CNT_MAX], map[SPI_DEVICE_DATA_LANE_CNT_MAX]; + int rc, idx, max_num_data_lanes; /* Mode (clock phase/polarity/etc.) */ if (of_property_read_bool(nc, "spi-cpha")) @@ -2370,7 +2370,65 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, spi->mode |= SPI_CS_HIGH; /* Device DUAL/QUAD mode */ - if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { + + rc = of_property_read_variable_u32_array(nc, "spi-tx-lane-map", map, 1, + ARRAY_SIZE(map)); + if (rc >= 0) { + max_num_data_lanes = rc; + for (idx = 0; idx < max_num_data_lanes; idx++) + spi->tx_lane_map[idx] = map[idx]; + } else if (rc == -EINVAL) { + /* Default lane map is identity mapping. */ + max_num_data_lanes = ARRAY_SIZE(spi->tx_lane_map); + for (idx = 0; idx < max_num_data_lanes; idx++) + spi->tx_lane_map[idx] = idx; + } else { + dev_err(&ctlr->dev, + "failed to read spi-tx-lane-map property: %d\n", rc); + return rc; + } + + rc = of_property_count_u32_elems(nc, "spi-tx-bus-width"); + if (rc < 0 && rc != -EINVAL) { + dev_err(&ctlr->dev, + "failed to read spi-tx-bus-width property: %d\n", rc); + return rc; + } + if (rc > max_num_data_lanes) { + dev_err(&ctlr->dev, + "spi-tx-bus-width has more elements (%d) than spi-tx-lane-map (%d)\n", + rc, max_num_data_lanes); + return -EINVAL; + } + + if (rc == -EINVAL) { + /* Default when property is not present. */ + spi->num_tx_lanes = 1; + } else { + u32 first_value; + + spi->num_tx_lanes = rc; + + for (idx = 0; idx < spi->num_tx_lanes; idx++) { + rc = of_property_read_u32_index(nc, "spi-tx-bus-width", + idx, &value); + if (rc) + return rc; + + /* + * For now, we only support all lanes having the same + * width so we can keep using the existing mode flags. + */ + if (!idx) + first_value = value; + else if (first_value != value) { + dev_err(&ctlr->dev, + "spi-tx-bus-width has inconsistent values: first %d vs later %d\n", + first_value, value); + return -EINVAL; + } + } + switch (value) { case 0: spi->mode |= SPI_NO_TX; @@ -2394,7 +2452,74 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, } } - if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { + for (idx = 0; idx < spi->num_tx_lanes; idx++) { + if (spi->tx_lane_map[idx] >= spi->controller->num_data_lanes) { + dev_err(&ctlr->dev, + "spi-tx-lane-map has invalid value %d (num_data_lanes=%d)\n", + spi->tx_lane_map[idx], + spi->controller->num_data_lanes); + return -EINVAL; + } + } + + rc = of_property_read_variable_u32_array(nc, "spi-rx-lane-map", map, 1, + ARRAY_SIZE(map)); + if (rc >= 0) { + max_num_data_lanes = rc; + for (idx = 0; idx < max_num_data_lanes; idx++) + spi->rx_lane_map[idx] = map[idx]; + } else if (rc == -EINVAL) { + /* Default lane map is identity mapping. */ + max_num_data_lanes = ARRAY_SIZE(spi->rx_lane_map); + for (idx = 0; idx < max_num_data_lanes; idx++) + spi->rx_lane_map[idx] = idx; + } else { + dev_err(&ctlr->dev, + "failed to read spi-rx-lane-map property: %d\n", rc); + return rc; + } + + rc = of_property_count_u32_elems(nc, "spi-rx-bus-width"); + if (rc < 0 && rc != -EINVAL) { + dev_err(&ctlr->dev, + "failed to read spi-rx-bus-width property: %d\n", rc); + return rc; + } + if (rc > max_num_data_lanes) { + dev_err(&ctlr->dev, + "spi-rx-bus-width has more elements (%d) than spi-rx-lane-map (%d)\n", + rc, max_num_data_lanes); + return -EINVAL; + } + + if (rc == -EINVAL) { + /* Default when property is not present. */ + spi->num_rx_lanes = 1; + } else { + u32 first_value; + + spi->num_rx_lanes = rc; + + for (idx = 0; idx < spi->num_rx_lanes; idx++) { + rc = of_property_read_u32_index(nc, "spi-rx-bus-width", + idx, &value); + if (rc) + return rc; + + /* + * For now, we only support all lanes having the same + * width so we can keep using the existing mode flags. + */ + if (!idx) + first_value = value; + else if (first_value != value) { + dev_err(&ctlr->dev, + "spi-rx-bus-width has inconsistent values: first %d vs later %d\n", + first_value, value); + return -EINVAL; + } + } + switch (value) { case 0: spi->mode |= SPI_NO_RX; @@ -2418,6 +2543,16 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, } } + for (idx = 0; idx < spi->num_rx_lanes; idx++) { + if (spi->rx_lane_map[idx] >= spi->controller->num_data_lanes) { + dev_err(&ctlr->dev, + "spi-rx-lane-map has invalid value %d (num_data_lanes=%d)\n", + spi->rx_lane_map[idx], + spi->controller->num_data_lanes); + return -EINVAL; + } + } + if (spi_controller_is_target(ctlr)) { if (!of_node_name_eq(nc, "slave")) { dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", @@ -3066,12 +3201,16 @@ struct spi_controller *__spi_alloc_controller(struct device *dev, mutex_init(&ctlr->add_lock); ctlr->bus_num = -1; ctlr->num_chipselect = 1; + ctlr->num_data_lanes = 1; ctlr->target = target; if (IS_ENABLED(CONFIG_SPI_SLAVE) && target) ctlr->dev.class = &spi_target_class; else ctlr->dev.class = &spi_controller_class; ctlr->dev.parent = dev; + + device_set_node(&ctlr->dev, dev_fwnode(dev)); + pm_suspend_ignore_children(&ctlr->dev, true); spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); @@ -3079,9 +3218,9 @@ struct spi_controller *__spi_alloc_controller(struct device *dev, } EXPORT_SYMBOL_GPL(__spi_alloc_controller); -static void devm_spi_release_controller(struct device *dev, void *ctlr) +static void devm_spi_release_controller(void *ctlr) { - spi_controller_put(*(struct spi_controller **)ctlr); + spi_controller_put(ctlr); } /** @@ -3103,21 +3242,18 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, unsigned int size, bool target) { - struct spi_controller **ptr, *ctlr; - - ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), - GFP_KERNEL); - if (!ptr) - return NULL; + struct spi_controller *ctlr; + int ret; ctlr = __spi_alloc_controller(dev, size, target); - if (ctlr) { - ctlr->devm_allocated = true; - *ptr = ctlr; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } + if (!ctlr) + return NULL; + + ret = devm_add_action_or_reset(dev, devm_spi_release_controller, ctlr); + if (ret) + return NULL; + + ctlr->devm_allocated = true; return ctlr; } @@ -3378,9 +3514,9 @@ free_bus_id: } EXPORT_SYMBOL_GPL(spi_register_controller); -static void devm_spi_unregister(struct device *dev, void *res) +static void devm_spi_unregister_controller(void *ctlr) { - spi_unregister_controller(*(struct spi_controller **)res); + spi_unregister_controller(ctlr); } /** @@ -3398,22 +3534,14 @@ static void devm_spi_unregister(struct device *dev, void *res) int devm_spi_register_controller(struct device *dev, struct spi_controller *ctlr) { - struct spi_controller **ptr; int ret; - ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return -ENOMEM; - ret = spi_register_controller(ctlr); - if (!ret) { - *ptr = ctlr; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } + if (ret) + return ret; + + return devm_add_action_or_reset(dev, devm_spi_unregister_controller, ctlr); - return ret; } EXPORT_SYMBOL_GPL(devm_spi_register_controller); diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 82390712794c..e4db0924898c 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -20,10 +20,10 @@ .opcode = __opcode, \ } -#define SPI_MEM_DTR_OP_CMD(__opcode, __buswidth) \ +#define SPI_MEM_DTR_OP_RPT_CMD(__opcode, __buswidth) \ { \ - .nbytes = 1, \ - .opcode = __opcode, \ + .nbytes = 2, \ + .opcode = __opcode | __opcode << 8, \ .buswidth = __buswidth, \ .dtr = true, \ } @@ -43,6 +43,14 @@ .dtr = true, \ } +#define SPI_MEM_DTR_OP_RPT_ADDR(__val, __buswidth) \ + { \ + .nbytes = 2, \ + .val = __val | __val << 8, \ + .buswidth = __buswidth, \ + .dtr = true, \ + } + #define SPI_MEM_OP_NO_ADDR { } #define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index cb2c2df31089..39681f7e063b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -23,6 +23,9 @@ /* Max no. of CS supported per spi device */ #define SPI_DEVICE_CS_CNT_MAX 4 +/* Max no. of data lanes supported per spi device */ +#define SPI_DEVICE_DATA_LANE_CNT_MAX 8 + struct dma_chan; struct software_node; struct ptp_system_timestamp; @@ -174,6 +177,10 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg, * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines * (optional, NULL when not using a GPIO line) + * @tx_lane_map: Map of peripheral lanes (index) to controller lanes (value). + * @num_tx_lanes: Number of transmit lanes wired up. + * @rx_lane_map: Map of peripheral lanes (index) to controller lanes (value). + * @num_rx_lanes: Number of receive lanes wired up. * * A @spi_device is used to interchange data between an SPI target device * (usually a discrete chip) and CPU memory. @@ -242,6 +249,12 @@ struct spi_device { struct gpio_desc *cs_gpiod[SPI_DEVICE_CS_CNT_MAX]; /* Chip select gpio desc */ + /* Multi-lane SPI controller support. */ + u8 tx_lane_map[SPI_DEVICE_DATA_LANE_CNT_MAX]; + u8 num_tx_lanes; + u8 rx_lane_map[SPI_DEVICE_DATA_LANE_CNT_MAX]; + u8 num_rx_lanes; + /* * Likely need more hooks for more protocol options affecting how * the controller talks to each chip, like: @@ -401,6 +414,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * SPI targets, and are numbered from zero to num_chipselects. * each target has a chipselect signal, but it's common that not * every chipselect is connected to a target. + * @num_data_lanes: Number of data lanes supported by this controller. Default is 1. * @dma_alignment: SPI controller constraint on DMA buffers alignment. * @mode_bits: flags understood by this controller driver * @buswidth_override_bits: flags to override for this controller driver @@ -576,6 +590,14 @@ struct spi_controller { */ u16 num_chipselect; + /* + * Some specialized SPI controllers can have more than one physical + * data lane interface per controller (each having it's own serializer). + * This specifies the number of data lanes in that case. Other + * controllers do not need to set this (defaults to 1). + */ + u16 num_data_lanes; + /* Some SPI controllers pose alignment requirements on DMAable * buffers; let protocol drivers know about these requirements. */ @@ -959,6 +981,8 @@ struct spi_res { * (SPI_NBITS_SINGLE) is used. * @rx_nbits: number of bits used for reading. If 0 the default * (SPI_NBITS_SINGLE) is used. + * @multi_lane_mode: How to serialize data on multiple lanes. One of the + * SPI_MULTI_LANE_MODE_* values. * @len: size of rx and tx buffers (in bytes) * @speed_hz: Select a speed other than the device default for this * transfer. If 0 the default (from @spi_device) is used. @@ -1095,6 +1119,12 @@ struct spi_transfer { unsigned cs_change:1; unsigned tx_nbits:4; unsigned rx_nbits:4; + +#define SPI_MULTI_LANE_MODE_SINGLE 0 /* only use single lane */ +#define SPI_MULTI_LANE_MODE_STRIPE 1 /* one data word per lane */ +#define SPI_MULTI_LANE_MODE_MIRROR 2 /* same word sent on all lanes */ + unsigned multi_lane_mode: 2; + unsigned timestamped:1; bool dtr_mode; #define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */ diff --git a/tools/spi/.gitignore b/tools/spi/.gitignore index 14ddba3d2195..038261b34ed8 100644 --- a/tools/spi/.gitignore +++ b/tools/spi/.gitignore @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only spidev_fdx spidev_test +include/