mediatek: 5.15: add missing patch suffix
[openwrt/staging/wigyori.git] / target / linux / mediatek / patches-5.15 / 213-spi-mediatek-add-mt7986-spi-support.patch
1 From 7d99750f96fc6904d54affebdc8c9b0bfae1e9e8 Mon Sep 17 00:00:00 2001
2 From: Sam Shih <sam.shih@mediatek.com>
3 Date: Sun, 17 Apr 2022 11:40:22 +0800
4 Subject: [PATCH] spi: mediatek: backport document and driver to support mt7986
5 spi design
6
7 this patch add the support of ipm design and upgrade devicetree binding
8
9 The patch is comming from following threads
10 - https://lore.kernel.org/all/20220315032411.2826-1-leilk.liu@mediatek.com/
11 - https://lore.kernel.org/all/20220401071616.8874-1-leilk.liu@mediatek.com/
12
13 Signed-off-by: Sam Shih <sam.shih@mediatek.com>
14 ---
15 .../bindings/spi/mediatek,spi-mt65xx.yaml | 111 ++++
16 drivers/spi/spi-mt65xx.c | 509 ++++++++++++++++--
17 2 files changed, 572 insertions(+), 48 deletions(-)
18 create mode 100644 Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
19
20 --- /dev/null
21 +++ b/Documentation/devicetree/bindings/spi/mediatek,spi-mt65xx.yaml
22 @@ -0,0 +1,111 @@
23 +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
24 +%YAML 1.2
25 +---
26 +$id: http://devicetree.org/schemas/spi/mediatek,spi-mt65xx.yaml#
27 +$schema: http://devicetree.org/meta-schemas/core.yaml#
28 +
29 +title: SPI Bus controller for MediaTek ARM SoCs
30 +
31 +maintainers:
32 + - Leilk Liu <leilk.liu@mediatek.com>
33 +
34 +allOf:
35 + - $ref: "/schemas/spi/spi-controller.yaml#"
36 +
37 +properties:
38 + compatible:
39 + oneOf:
40 + - items:
41 + - enum:
42 + - mediatek,mt7629-spi
43 + - const: mediatek,mt7622-spi
44 + - items:
45 + - enum:
46 + - mediatek,mt8516-spi
47 + - const: mediatek,mt2712-spi
48 + - items:
49 + - enum:
50 + - mediatek,mt6779-spi
51 + - mediatek,mt8186-spi
52 + - mediatek,mt8192-spi
53 + - mediatek,mt8195-spi
54 + - const: mediatek,mt6765-spi
55 + - items:
56 + - enum:
57 + - mediatek,mt7986-spi-ipm
58 + - const: mediatek,spi-ipm
59 + - items:
60 + - enum:
61 + - mediatek,mt2701-spi
62 + - mediatek,mt2712-spi
63 + - mediatek,mt6589-spi
64 + - mediatek,mt6765-spi
65 + - mediatek,mt6893-spi
66 + - mediatek,mt7622-spi
67 + - mediatek,mt8135-spi
68 + - mediatek,mt8173-spi
69 + - mediatek,mt8183-spi
70 +
71 + reg:
72 + maxItems: 1
73 +
74 + interrupts:
75 + maxItems: 1
76 +
77 + clocks:
78 + minItems: 3
79 + items:
80 + - description: clock used for the parent clock
81 + - description: clock used for the muxes clock
82 + - description: clock used for the clock gate
83 + - description: clock used for the AHB bus, this clock is optional
84 +
85 + clock-names:
86 + minItems: 3
87 + items:
88 + - const: parent-clk
89 + - const: sel-clk
90 + - const: spi-clk
91 + - const: hclk
92 +
93 + mediatek,pad-select:
94 + $ref: /schemas/types.yaml#/definitions/uint32-array
95 + minItems: 1
96 + maxItems: 4
97 + items:
98 + enum: [0, 1, 2, 3]
99 + description:
100 + specify which pins group(ck/mi/mo/cs) spi controller used.
101 + This is an array.
102 +
103 +required:
104 + - compatible
105 + - reg
106 + - interrupts
107 + - clocks
108 + - clock-names
109 + - '#address-cells'
110 + - '#size-cells'
111 +
112 +unevaluatedProperties: false
113 +
114 +examples:
115 + - |
116 + #include <dt-bindings/clock/mt8173-clk.h>
117 + #include <dt-bindings/gpio/gpio.h>
118 + #include <dt-bindings/interrupt-controller/arm-gic.h>
119 + #include <dt-bindings/interrupt-controller/irq.h>
120 +
121 + spi@1100a000 {
122 + compatible = "mediatek,mt8173-spi";
123 + #address-cells = <1>;
124 + #size-cells = <0>;
125 + reg = <0x1100a000 0x1000>;
126 + interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>;
127 + clocks = <&topckgen CLK_TOP_SYSPLL3_D2>,
128 + <&topckgen CLK_TOP_SPI_SEL>,
129 + <&pericfg CLK_PERI_SPI0>;
130 + clock-names = "parent-clk", "sel-clk", "spi-clk";
131 + cs-gpios = <&pio 105 GPIO_ACTIVE_LOW>, <&pio 72 GPIO_ACTIVE_LOW>;
132 + mediatek,pad-select = <1>, <0>;
133 + };
134 --- a/drivers/spi/spi-mt65xx.c
135 +++ b/drivers/spi/spi-mt65xx.c
136 @@ -12,11 +12,12 @@
137 #include <linux/ioport.h>
138 #include <linux/module.h>
139 #include <linux/of.h>
140 -#include <linux/of_gpio.h>
141 +#include <linux/gpio/consumer.h>
142 #include <linux/platform_device.h>
143 #include <linux/platform_data/spi-mt65xx.h>
144 #include <linux/pm_runtime.h>
145 #include <linux/spi/spi.h>
146 +#include <linux/spi/spi-mem.h>
147 #include <linux/dma-mapping.h>
148
149 #define SPI_CFG0_REG 0x0000
150 @@ -31,6 +32,7 @@
151 #define SPI_CFG2_REG 0x0028
152 #define SPI_TX_SRC_REG_64 0x002c
153 #define SPI_RX_DST_REG_64 0x0030
154 +#define SPI_CFG3_IPM_REG 0x0040
155
156 #define SPI_CFG0_SCK_HIGH_OFFSET 0
157 #define SPI_CFG0_SCK_LOW_OFFSET 8
158 @@ -51,6 +53,7 @@
159 #define SPI_CFG1_CS_IDLE_MASK 0xff
160 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
161 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
162 +#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
163 #define SPI_CFG2_SCK_HIGH_OFFSET 0
164 #define SPI_CFG2_SCK_LOW_OFFSET 16
165
166 @@ -71,6 +74,24 @@
167 #define SPI_CMD_TX_ENDIAN BIT(15)
168 #define SPI_CMD_FINISH_IE BIT(16)
169 #define SPI_CMD_PAUSE_IE BIT(17)
170 +#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
171 +#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
172 +#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
173 +
174 +#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
175 +
176 +#define PIN_MODE_CFG(x) ((x) / 2)
177 +
178 +#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
179 +#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
180 +#define SPI_CFG3_IPM_XMODE_EN BIT(4)
181 +#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
182 +#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
183 +#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
184 +
185 +#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
186 +#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
187 +#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
188
189 #define MT8173_SPI_MAX_PAD_SEL 3
190
191 @@ -81,6 +102,9 @@
192
193 #define MTK_SPI_MAX_FIFO_SIZE 32U
194 #define MTK_SPI_PACKET_SIZE 1024
195 +#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
196 +#define MTK_SPI_IPM_PACKET_LOOP SZ_256
197 +
198 #define MTK_SPI_32BITS_MASK (0xffffffff)
199
200 #define DMA_ADDR_EXT_BITS (36)
201 @@ -96,6 +120,8 @@ struct mtk_spi_compatible {
202 bool dma_ext;
203 /* some IC no need unprepare SPI clk */
204 bool no_need_unprepare;
205 + /* IPM design adjust and extend register to support more features */
206 + bool ipm_design;
207 };
208
209 struct mtk_spi {
210 @@ -103,7 +129,7 @@ struct mtk_spi {
211 u32 state;
212 int pad_num;
213 u32 *pad_sel;
214 - struct clk *parent_clk, *sel_clk, *spi_clk;
215 + struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
216 struct spi_transfer *cur_transfer;
217 u32 xfer_len;
218 u32 num_xfered;
219 @@ -111,6 +137,11 @@ struct mtk_spi {
220 u32 tx_sgl_len, rx_sgl_len;
221 const struct mtk_spi_compatible *dev_comp;
222 u32 spi_clk_hz;
223 + struct completion spimem_done;
224 + bool use_spimem;
225 + struct device *dev;
226 + dma_addr_t tx_dma;
227 + dma_addr_t rx_dma;
228 };
229
230 static const struct mtk_spi_compatible mtk_common_compat;
231 @@ -119,6 +150,12 @@ static const struct mtk_spi_compatible m
232 .must_tx = true,
233 };
234
235 +static const struct mtk_spi_compatible mtk_ipm_compat = {
236 + .enhance_timing = true,
237 + .dma_ext = true,
238 + .ipm_design = true,
239 +};
240 +
241 static const struct mtk_spi_compatible mt6765_compat = {
242 .need_pad_sel = true,
243 .must_tx = true,
244 @@ -160,6 +197,9 @@ static const struct mtk_chip_config mtk_
245 };
246
247 static const struct of_device_id mtk_spi_of_match[] = {
248 + { .compatible = "mediatek,spi-ipm",
249 + .data = (void *)&mtk_ipm_compat,
250 + },
251 { .compatible = "mediatek,mt2701-spi",
252 .data = (void *)&mtk_common_compat,
253 },
254 @@ -278,12 +318,11 @@ static int mtk_spi_set_hw_cs_timing(stru
255 return 0;
256 }
257
258 -static int mtk_spi_prepare_message(struct spi_master *master,
259 - struct spi_message *msg)
260 +static int mtk_spi_hw_init(struct spi_master *master,
261 + struct spi_device *spi)
262 {
263 u16 cpha, cpol;
264 u32 reg_val;
265 - struct spi_device *spi = msg->spi;
266 struct mtk_chip_config *chip_config = spi->controller_data;
267 struct mtk_spi *mdata = spi_master_get_devdata(master);
268
269 @@ -291,6 +330,15 @@ static int mtk_spi_prepare_message(struc
270 cpol = spi->mode & SPI_CPOL ? 1 : 0;
271
272 reg_val = readl(mdata->base + SPI_CMD_REG);
273 + if (mdata->dev_comp->ipm_design) {
274 + /* SPI transfer without idle time until packet length done */
275 + reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
276 + if (spi->mode & SPI_LOOP)
277 + reg_val |= SPI_CMD_IPM_SPIM_LOOP;
278 + else
279 + reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
280 + }
281 +
282 if (cpha)
283 reg_val |= SPI_CMD_CPHA;
284 else
285 @@ -348,23 +396,39 @@ static int mtk_spi_prepare_message(struc
286 mdata->base + SPI_PAD_SEL_REG);
287
288 /* tick delay */
289 - reg_val = readl(mdata->base + SPI_CFG1_REG);
290 if (mdata->dev_comp->enhance_timing) {
291 - reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
292 - reg_val |= ((chip_config->tick_delay & 0x7)
293 - << SPI_CFG1_GET_TICK_DLY_OFFSET);
294 + if (mdata->dev_comp->ipm_design) {
295 + reg_val = readl(mdata->base + SPI_CMD_REG);
296 + reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
297 + reg_val |= ((chip_config->tick_delay & 0x7)
298 + << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
299 + writel(reg_val, mdata->base + SPI_CMD_REG);
300 + } else {
301 + reg_val = readl(mdata->base + SPI_CFG1_REG);
302 + reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
303 + reg_val |= ((chip_config->tick_delay & 0x7)
304 + << SPI_CFG1_GET_TICK_DLY_OFFSET);
305 + writel(reg_val, mdata->base + SPI_CFG1_REG);
306 + }
307 } else {
308 + reg_val = readl(mdata->base + SPI_CFG1_REG);
309 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
310 reg_val |= ((chip_config->tick_delay & 0x3)
311 << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
312 + writel(reg_val, mdata->base + SPI_CFG1_REG);
313 }
314 - writel(reg_val, mdata->base + SPI_CFG1_REG);
315
316 /* set hw cs timing */
317 mtk_spi_set_hw_cs_timing(spi);
318 return 0;
319 }
320
321 +static int mtk_spi_prepare_message(struct spi_master *master,
322 + struct spi_message *msg)
323 +{
324 + return mtk_spi_hw_init(master, msg->spi);
325 +}
326 +
327 static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
328 {
329 u32 reg_val;
330 @@ -386,13 +450,13 @@ static void mtk_spi_set_cs(struct spi_de
331 }
332
333 static void mtk_spi_prepare_transfer(struct spi_master *master,
334 - struct spi_transfer *xfer)
335 + u32 speed_hz)
336 {
337 u32 div, sck_time, reg_val;
338 struct mtk_spi *mdata = spi_master_get_devdata(master);
339
340 - if (xfer->speed_hz < mdata->spi_clk_hz / 2)
341 - div = DIV_ROUND_UP(mdata->spi_clk_hz, xfer->speed_hz);
342 + if (speed_hz < mdata->spi_clk_hz / 2)
343 + div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
344 else
345 div = 1;
346
347 @@ -423,12 +487,24 @@ static void mtk_spi_setup_packet(struct
348 u32 packet_size, packet_loop, reg_val;
349 struct mtk_spi *mdata = spi_master_get_devdata(master);
350
351 - packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
352 + if (mdata->dev_comp->ipm_design)
353 + packet_size = min_t(u32,
354 + mdata->xfer_len,
355 + MTK_SPI_IPM_PACKET_SIZE);
356 + else
357 + packet_size = min_t(u32,
358 + mdata->xfer_len,
359 + MTK_SPI_PACKET_SIZE);
360 +
361 packet_loop = mdata->xfer_len / packet_size;
362
363 reg_val = readl(mdata->base + SPI_CFG1_REG);
364 - reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
365 + if (mdata->dev_comp->ipm_design)
366 + reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
367 + else
368 + reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
369 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
370 + reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
371 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
372 writel(reg_val, mdata->base + SPI_CFG1_REG);
373 }
374 @@ -523,7 +599,7 @@ static int mtk_spi_fifo_transfer(struct
375 mdata->cur_transfer = xfer;
376 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
377 mdata->num_xfered = 0;
378 - mtk_spi_prepare_transfer(master, xfer);
379 + mtk_spi_prepare_transfer(master, xfer->speed_hz);
380 mtk_spi_setup_packet(master);
381
382 if (xfer->tx_buf) {
383 @@ -556,7 +632,7 @@ static int mtk_spi_dma_transfer(struct s
384 mdata->cur_transfer = xfer;
385 mdata->num_xfered = 0;
386
387 - mtk_spi_prepare_transfer(master, xfer);
388 + mtk_spi_prepare_transfer(master, xfer->speed_hz);
389
390 cmd = readl(mdata->base + SPI_CMD_REG);
391 if (xfer->tx_buf)
392 @@ -591,6 +667,19 @@ static int mtk_spi_transfer_one(struct s
393 struct spi_device *spi,
394 struct spi_transfer *xfer)
395 {
396 + struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
397 + u32 reg_val = 0;
398 +
399 + /* prepare xfer direction and duplex mode */
400 + if (mdata->dev_comp->ipm_design) {
401 + if (!xfer->tx_buf || !xfer->rx_buf) {
402 + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
403 + if (xfer->rx_buf)
404 + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
405 + }
406 + writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
407 + }
408 +
409 if (master->can_dma(master, spi, xfer))
410 return mtk_spi_dma_transfer(master, spi, xfer);
411 else
412 @@ -614,8 +703,9 @@ static int mtk_spi_setup(struct spi_devi
413 if (!spi->controller_data)
414 spi->controller_data = (void *)&mtk_default_chip_info;
415
416 - if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
417 - gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
418 + if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
419 + /* CS de-asserted, gpiolib will handle inversion */
420 + gpiod_direction_output(spi->cs_gpiod, 0);
421
422 return 0;
423 }
424 @@ -633,6 +723,12 @@ static irqreturn_t mtk_spi_interrupt(int
425 else
426 mdata->state = MTK_SPI_IDLE;
427
428 + /* SPI-MEM ops */
429 + if (mdata->use_spimem) {
430 + complete(&mdata->spimem_done);
431 + return IRQ_HANDLED;
432 + }
433 +
434 if (!master->can_dma(master, NULL, trans)) {
435 if (trans->rx_buf) {
436 cnt = mdata->xfer_len / 4;
437 @@ -716,6 +812,274 @@ static irqreturn_t mtk_spi_interrupt(int
438 return IRQ_HANDLED;
439 }
440
441 +static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
442 + struct spi_mem_op *op)
443 +{
444 + int opcode_len;
445 +
446 + if (op->data.dir != SPI_MEM_NO_DATA) {
447 + opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
448 + if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
449 + op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
450 + /* force data buffer dma-aligned. */
451 + op->data.nbytes -= op->data.nbytes % 4;
452 + }
453 + }
454 +
455 + return 0;
456 +}
457 +
458 +static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
459 + const struct spi_mem_op *op)
460 +{
461 + if (!spi_mem_default_supports_op(mem, op))
462 + return false;
463 +
464 + if (op->addr.nbytes && op->dummy.nbytes &&
465 + op->addr.buswidth != op->dummy.buswidth)
466 + return false;
467 +
468 + if (op->addr.nbytes + op->dummy.nbytes > 16)
469 + return false;
470 +
471 + if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
472 + if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
473 + MTK_SPI_IPM_PACKET_LOOP ||
474 + op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
475 + return false;
476 + }
477 +
478 + return true;
479 +}
480 +
481 +static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
482 + const struct spi_mem_op *op)
483 +{
484 + struct mtk_spi *mdata = spi_master_get_devdata(master);
485 +
486 + writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
487 + mdata->base + SPI_TX_SRC_REG);
488 +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
489 + if (mdata->dev_comp->dma_ext)
490 + writel((u32)(mdata->tx_dma >> 32),
491 + mdata->base + SPI_TX_SRC_REG_64);
492 +#endif
493 +
494 + if (op->data.dir == SPI_MEM_DATA_IN) {
495 + writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
496 + mdata->base + SPI_RX_DST_REG);
497 +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
498 + if (mdata->dev_comp->dma_ext)
499 + writel((u32)(mdata->rx_dma >> 32),
500 + mdata->base + SPI_RX_DST_REG_64);
501 +#endif
502 + }
503 +}
504 +
505 +static int mtk_spi_transfer_wait(struct spi_mem *mem,
506 + const struct spi_mem_op *op)
507 +{
508 + struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
509 + /*
510 + * For each byte we wait for 8 cycles of the SPI clock.
511 + * Since speed is defined in Hz and we want milliseconds,
512 + * so it should be 8 * 1000.
513 + */
514 + u64 ms = 8000LL;
515 +
516 + if (op->data.dir == SPI_MEM_NO_DATA)
517 + ms *= 32; /* prevent we may get 0 for short transfers. */
518 + else
519 + ms *= op->data.nbytes;
520 + ms = div_u64(ms, mem->spi->max_speed_hz);
521 + ms += ms + 1000; /* 1s tolerance */
522 +
523 + if (ms > UINT_MAX)
524 + ms = UINT_MAX;
525 +
526 + if (!wait_for_completion_timeout(&mdata->spimem_done,
527 + msecs_to_jiffies(ms))) {
528 + dev_err(mdata->dev, "spi-mem transfer timeout\n");
529 + return -ETIMEDOUT;
530 + }
531 +
532 + return 0;
533 +}
534 +
535 +static int mtk_spi_mem_exec_op(struct spi_mem *mem,
536 + const struct spi_mem_op *op)
537 +{
538 + struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
539 + u32 reg_val, nio, tx_size;
540 + char *tx_tmp_buf, *rx_tmp_buf;
541 + int ret = 0;
542 +
543 + mdata->use_spimem = true;
544 + reinit_completion(&mdata->spimem_done);
545 +
546 + mtk_spi_reset(mdata);
547 + mtk_spi_hw_init(mem->spi->master, mem->spi);
548 + mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
549 +
550 + reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
551 + /* opcode byte len */
552 + reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
553 + reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
554 +
555 + /* addr & dummy byte len */
556 + reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
557 + if (op->addr.nbytes || op->dummy.nbytes)
558 + reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
559 + SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
560 +
561 + /* data byte len */
562 + if (op->data.dir == SPI_MEM_NO_DATA) {
563 + reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
564 + writel(0, mdata->base + SPI_CFG1_REG);
565 + } else {
566 + reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
567 + mdata->xfer_len = op->data.nbytes;
568 + mtk_spi_setup_packet(mem->spi->master);
569 + }
570 +
571 + if (op->addr.nbytes || op->dummy.nbytes) {
572 + if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
573 + reg_val |= SPI_CFG3_IPM_XMODE_EN;
574 + else
575 + reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
576 + }
577 +
578 + if (op->addr.buswidth == 2 ||
579 + op->dummy.buswidth == 2 ||
580 + op->data.buswidth == 2)
581 + nio = 2;
582 + else if (op->addr.buswidth == 4 ||
583 + op->dummy.buswidth == 4 ||
584 + op->data.buswidth == 4)
585 + nio = 4;
586 + else
587 + nio = 1;
588 +
589 + reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
590 + reg_val |= PIN_MODE_CFG(nio);
591 +
592 + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
593 + if (op->data.dir == SPI_MEM_DATA_IN)
594 + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
595 + else
596 + reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
597 + writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
598 +
599 + tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
600 + if (op->data.dir == SPI_MEM_DATA_OUT)
601 + tx_size += op->data.nbytes;
602 +
603 + tx_size = max_t(u32, tx_size, 32);
604 +
605 + tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
606 + if (!tx_tmp_buf) {
607 + mdata->use_spimem = false;
608 + return -ENOMEM;
609 + }
610 +
611 + tx_tmp_buf[0] = op->cmd.opcode;
612 +
613 + if (op->addr.nbytes) {
614 + int i;
615 +
616 + for (i = 0; i < op->addr.nbytes; i++)
617 + tx_tmp_buf[i + 1] = op->addr.val >>
618 + (8 * (op->addr.nbytes - i - 1));
619 + }
620 +
621 + if (op->dummy.nbytes)
622 + memset(tx_tmp_buf + op->addr.nbytes + 1,
623 + 0xff,
624 + op->dummy.nbytes);
625 +
626 + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
627 + memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
628 + op->data.buf.out,
629 + op->data.nbytes);
630 +
631 + mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
632 + tx_size, DMA_TO_DEVICE);
633 + if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
634 + ret = -ENOMEM;
635 + goto err_exit;
636 + }
637 +
638 + if (op->data.dir == SPI_MEM_DATA_IN) {
639 + if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
640 + rx_tmp_buf = kzalloc(op->data.nbytes,
641 + GFP_KERNEL | GFP_DMA);
642 + if (!rx_tmp_buf) {
643 + ret = -ENOMEM;
644 + goto unmap_tx_dma;
645 + }
646 + } else {
647 + rx_tmp_buf = op->data.buf.in;
648 + }
649 +
650 + mdata->rx_dma = dma_map_single(mdata->dev,
651 + rx_tmp_buf,
652 + op->data.nbytes,
653 + DMA_FROM_DEVICE);
654 + if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
655 + ret = -ENOMEM;
656 + goto kfree_rx_tmp_buf;
657 + }
658 + }
659 +
660 + reg_val = readl(mdata->base + SPI_CMD_REG);
661 + reg_val |= SPI_CMD_TX_DMA;
662 + if (op->data.dir == SPI_MEM_DATA_IN)
663 + reg_val |= SPI_CMD_RX_DMA;
664 + writel(reg_val, mdata->base + SPI_CMD_REG);
665 +
666 + mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
667 +
668 + mtk_spi_enable_transfer(mem->spi->master);
669 +
670 + /* Wait for the interrupt. */
671 + ret = mtk_spi_transfer_wait(mem, op);
672 + if (ret)
673 + goto unmap_rx_dma;
674 +
675 + /* spi disable dma */
676 + reg_val = readl(mdata->base + SPI_CMD_REG);
677 + reg_val &= ~SPI_CMD_TX_DMA;
678 + if (op->data.dir == SPI_MEM_DATA_IN)
679 + reg_val &= ~SPI_CMD_RX_DMA;
680 + writel(reg_val, mdata->base + SPI_CMD_REG);
681 +
682 +unmap_rx_dma:
683 + if (op->data.dir == SPI_MEM_DATA_IN) {
684 + dma_unmap_single(mdata->dev, mdata->rx_dma,
685 + op->data.nbytes, DMA_FROM_DEVICE);
686 + if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
687 + memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
688 + }
689 +kfree_rx_tmp_buf:
690 + if (op->data.dir == SPI_MEM_DATA_IN &&
691 + !IS_ALIGNED((size_t)op->data.buf.in, 4))
692 + kfree(rx_tmp_buf);
693 +unmap_tx_dma:
694 + dma_unmap_single(mdata->dev, mdata->tx_dma,
695 + tx_size, DMA_TO_DEVICE);
696 +err_exit:
697 + kfree(tx_tmp_buf);
698 + mdata->use_spimem = false;
699 +
700 + return ret;
701 +}
702 +
703 +static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
704 + .adjust_op_size = mtk_spi_mem_adjust_op_size,
705 + .supports_op = mtk_spi_mem_supports_op,
706 + .exec_op = mtk_spi_mem_exec_op,
707 +};
708 +
709 static int mtk_spi_probe(struct platform_device *pdev)
710 {
711 struct spi_master *master;
712 @@ -739,6 +1103,7 @@ static int mtk_spi_probe(struct platform
713 master->can_dma = mtk_spi_can_dma;
714 master->setup = mtk_spi_setup;
715 master->set_cs_timing = mtk_spi_set_hw_cs_timing;
716 + master->use_gpio_descriptors = true;
717
718 of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
719 if (!of_id) {
720 @@ -755,6 +1120,14 @@ static int mtk_spi_probe(struct platform
721
722 if (mdata->dev_comp->must_tx)
723 master->flags = SPI_MASTER_MUST_TX;
724 + if (mdata->dev_comp->ipm_design)
725 + master->mode_bits |= SPI_LOOP;
726 +
727 + if (mdata->dev_comp->ipm_design) {
728 + mdata->dev = &pdev->dev;
729 + master->mem_ops = &mtk_spi_mem_ops;
730 + init_completion(&mdata->spimem_done);
731 + }
732
733 if (mdata->dev_comp->need_pad_sel) {
734 mdata->pad_num = of_property_count_u32_elems(
735 @@ -831,25 +1204,40 @@ static int mtk_spi_probe(struct platform
736 goto err_put_master;
737 }
738
739 + mdata->spi_hclk = devm_clk_get_optional(&pdev->dev, "hclk");
740 + if (IS_ERR(mdata->spi_hclk)) {
741 + ret = PTR_ERR(mdata->spi_hclk);
742 + dev_err(&pdev->dev, "failed to get hclk: %d\n", ret);
743 + goto err_put_master;
744 + }
745 +
746 + ret = clk_prepare_enable(mdata->spi_hclk);
747 + if (ret < 0) {
748 + dev_err(&pdev->dev, "failed to enable hclk (%d)\n", ret);
749 + goto err_put_master;
750 + }
751 +
752 ret = clk_prepare_enable(mdata->spi_clk);
753 if (ret < 0) {
754 dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
755 - goto err_put_master;
756 + goto err_disable_spi_hclk;
757 }
758
759 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
760 if (ret < 0) {
761 dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
762 - clk_disable_unprepare(mdata->spi_clk);
763 - goto err_put_master;
764 + goto err_disable_spi_clk;
765 }
766
767 mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
768
769 - if (mdata->dev_comp->no_need_unprepare)
770 + if (mdata->dev_comp->no_need_unprepare) {
771 clk_disable(mdata->spi_clk);
772 - else
773 + clk_disable(mdata->spi_hclk);
774 + } else {
775 clk_disable_unprepare(mdata->spi_clk);
776 + clk_disable_unprepare(mdata->spi_hclk);
777 + }
778
779 pm_runtime_enable(&pdev->dev);
780
781 @@ -862,25 +1250,12 @@ static int mtk_spi_probe(struct platform
782 goto err_disable_runtime_pm;
783 }
784
785 - if (!master->cs_gpios && master->num_chipselect > 1) {
786 + if (!master->cs_gpiods && master->num_chipselect > 1) {
787 dev_err(&pdev->dev,
788 "cs_gpios not specified and num_chipselect > 1\n");
789 ret = -EINVAL;
790 goto err_disable_runtime_pm;
791 }
792 -
793 - if (master->cs_gpios) {
794 - for (i = 0; i < master->num_chipselect; i++) {
795 - ret = devm_gpio_request(&pdev->dev,
796 - master->cs_gpios[i],
797 - dev_name(&pdev->dev));
798 - if (ret) {
799 - dev_err(&pdev->dev,
800 - "can't get CS GPIO %i\n", i);
801 - goto err_disable_runtime_pm;
802 - }
803 - }
804 - }
805 }
806
807 if (mdata->dev_comp->dma_ext)
808 @@ -902,6 +1277,10 @@ static int mtk_spi_probe(struct platform
809
810 err_disable_runtime_pm:
811 pm_runtime_disable(&pdev->dev);
812 +err_disable_spi_clk:
813 + clk_disable_unprepare(mdata->spi_clk);
814 +err_disable_spi_hclk:
815 + clk_disable_unprepare(mdata->spi_hclk);
816 err_put_master:
817 spi_master_put(master);
818
819 @@ -917,8 +1296,10 @@ static int mtk_spi_remove(struct platfor
820
821 mtk_spi_reset(mdata);
822
823 - if (mdata->dev_comp->no_need_unprepare)
824 + if (mdata->dev_comp->no_need_unprepare) {
825 clk_unprepare(mdata->spi_clk);
826 + clk_unprepare(mdata->spi_hclk);
827 + }
828
829 return 0;
830 }
831 @@ -934,8 +1315,10 @@ static int mtk_spi_suspend(struct device
832 if (ret)
833 return ret;
834
835 - if (!pm_runtime_suspended(dev))
836 + if (!pm_runtime_suspended(dev)) {
837 clk_disable_unprepare(mdata->spi_clk);
838 + clk_disable_unprepare(mdata->spi_hclk);
839 + }
840
841 return ret;
842 }
843 @@ -952,11 +1335,20 @@ static int mtk_spi_resume(struct device
844 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
845 return ret;
846 }
847 +
848 + ret = clk_prepare_enable(mdata->spi_hclk);
849 + if (ret < 0) {
850 + dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
851 + clk_disable_unprepare(mdata->spi_clk);
852 + return ret;
853 + }
854 }
855
856 ret = spi_master_resume(master);
857 - if (ret < 0)
858 + if (ret < 0) {
859 clk_disable_unprepare(mdata->spi_clk);
860 + clk_disable_unprepare(mdata->spi_hclk);
861 + }
862
863 return ret;
864 }
865 @@ -968,10 +1360,13 @@ static int mtk_spi_runtime_suspend(struc
866 struct spi_master *master = dev_get_drvdata(dev);
867 struct mtk_spi *mdata = spi_master_get_devdata(master);
868
869 - if (mdata->dev_comp->no_need_unprepare)
870 + if (mdata->dev_comp->no_need_unprepare) {
871 clk_disable(mdata->spi_clk);
872 - else
873 + clk_disable(mdata->spi_hclk);
874 + } else {
875 clk_disable_unprepare(mdata->spi_clk);
876 + clk_disable_unprepare(mdata->spi_hclk);
877 + }
878
879 return 0;
880 }
881 @@ -982,13 +1377,31 @@ static int mtk_spi_runtime_resume(struct
882 struct mtk_spi *mdata = spi_master_get_devdata(master);
883 int ret;
884
885 - if (mdata->dev_comp->no_need_unprepare)
886 + if (mdata->dev_comp->no_need_unprepare) {
887 ret = clk_enable(mdata->spi_clk);
888 - else
889 + if (ret < 0) {
890 + dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
891 + return ret;
892 + }
893 + ret = clk_enable(mdata->spi_hclk);
894 + if (ret < 0) {
895 + dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
896 + clk_disable(mdata->spi_clk);
897 + return ret;
898 + }
899 + } else {
900 ret = clk_prepare_enable(mdata->spi_clk);
901 - if (ret < 0) {
902 - dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
903 - return ret;
904 + if (ret < 0) {
905 + dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
906 + return ret;
907 + }
908 +
909 + ret = clk_prepare_enable(mdata->spi_hclk);
910 + if (ret < 0) {
911 + dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
912 + clk_disable_unprepare(mdata->spi_clk);
913 + return ret;
914 + }
915 }
916
917 return 0;