uboot-mediatek: replace patches with updated versions
[openwrt/staging/dedeckeh.git] / package / boot / uboot-mediatek / patches / 002-0016-spi-add-support-for-MediaTek-spi-mem-controller.patch
1 From e6b225ff8990635dc2d6d8dbd72e78dec1f36c62 Mon Sep 17 00:00:00 2001
2 From: Weijie Gao <weijie.gao@mediatek.com>
3 Date: Wed, 31 Aug 2022 19:04:45 +0800
4 Subject: [PATCH 16/32] spi: add support for MediaTek spi-mem controller
5
6 This patch adds support for spi-mem controller found on newer MediaTek SoCs
7 This controller supports Single/Dual/Quad SPI mode.
8
9 Reviewed-by: Simon Glass <sjg@chromium.org>
10 Signed-off-by: SkyLake.Huang <skylake.huang@mediatek.com>
11 ---
12 drivers/spi/Kconfig | 8 +
13 drivers/spi/Makefile | 1 +
14 drivers/spi/mtk_spim.c | 701 +++++++++++++++++++++++++++++++++++++++++
15 3 files changed, 710 insertions(+)
16 create mode 100644 drivers/spi/mtk_spim.c
17
18 --- a/drivers/spi/Kconfig
19 +++ b/drivers/spi/Kconfig
20 @@ -262,6 +262,14 @@ config MTK_SNFI_SPI
21 used to access SPI memory devices like SPI-NOR or SPI-NAND on
22 platforms embedding this IP core, like MT7622/M7629.
23
24 +config MTK_SPIM
25 + bool "Mediatek SPI-MEM master controller driver"
26 + depends on SPI_MEM
27 + help
28 + Enable MediaTek SPI-MEM master controller driver. This driver mainly
29 + supports SPI flashes. You can use single, dual or quad mode
30 + transmission on this controller.
31 +
32 config MVEBU_A3700_SPI
33 bool "Marvell Armada 3700 SPI driver"
34 select CLK_ARMADA_3720
35 --- a/drivers/spi/Makefile
36 +++ b/drivers/spi/Makefile
37 @@ -41,6 +41,7 @@ obj-$(CONFIG_MPC8XX_SPI) += mpc8xx_spi.o
38 obj-$(CONFIG_MPC8XXX_SPI) += mpc8xxx_spi.o
39 obj-$(CONFIG_MTK_SNFI_SPI) += mtk_snfi_spi.o
40 obj-$(CONFIG_MTK_SNOR) += mtk_snor.o
41 +obj-$(CONFIG_MTK_SPIM) += mtk_spim.o
42 obj-$(CONFIG_MT7620_SPI) += mt7620_spi.o
43 obj-$(CONFIG_MT7621_SPI) += mt7621_spi.o
44 obj-$(CONFIG_MSCC_BB_SPI) += mscc_bb_spi.o
45 --- /dev/null
46 +++ b/drivers/spi/mtk_spim.c
47 @@ -0,0 +1,701 @@
48 +// SPDX-License-Identifier: GPL-2.0
49 +/*
50 + * Copyright (C) 2022 MediaTek Inc. All Rights Reserved.
51 + *
52 + * Author: SkyLake.Huang <skylake.huang@mediatek.com>
53 + */
54 +
55 +#include <clk.h>
56 +#include <cpu_func.h>
57 +#include <div64.h>
58 +#include <dm.h>
59 +#include <spi.h>
60 +#include <spi-mem.h>
61 +#include <stdbool.h>
62 +#include <watchdog.h>
63 +#include <dm/device.h>
64 +#include <dm/device_compat.h>
65 +#include <dm/devres.h>
66 +#include <dm/pinctrl.h>
67 +#include <linux/bitops.h>
68 +#include <linux/completion.h>
69 +#include <linux/dma-mapping.h>
70 +#include <linux/io.h>
71 +#include <linux/iopoll.h>
72 +
73 +#define SPI_CFG0_REG 0x0000
74 +#define SPI_CFG1_REG 0x0004
75 +#define SPI_TX_SRC_REG 0x0008
76 +#define SPI_RX_DST_REG 0x000c
77 +#define SPI_TX_DATA_REG 0x0010
78 +#define SPI_RX_DATA_REG 0x0014
79 +#define SPI_CMD_REG 0x0018
80 +#define SPI_IRQ_REG 0x001c
81 +#define SPI_STATUS_REG 0x0020
82 +#define SPI_PAD_SEL_REG 0x0024
83 +#define SPI_CFG2_REG 0x0028
84 +#define SPI_TX_SRC_REG_64 0x002c
85 +#define SPI_RX_DST_REG_64 0x0030
86 +#define SPI_CFG3_IPM_REG 0x0040
87 +
88 +#define SPI_CFG0_SCK_HIGH_OFFSET 0
89 +#define SPI_CFG0_SCK_LOW_OFFSET 8
90 +#define SPI_CFG0_CS_HOLD_OFFSET 16
91 +#define SPI_CFG0_CS_SETUP_OFFSET 24
92 +#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
93 +#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
94 +
95 +#define SPI_CFG1_CS_IDLE_OFFSET 0
96 +#define SPI_CFG1_PACKET_LOOP_OFFSET 8
97 +#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
98 +#define SPI_CFG1_GET_TICKDLY_OFFSET 29
99 +
100 +#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
101 +#define SPI_CFG1_CS_IDLE_MASK 0xff
102 +#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
103 +#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
104 +#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
105 +#define SPI_CFG2_SCK_HIGH_OFFSET 0
106 +#define SPI_CFG2_SCK_LOW_OFFSET 16
107 +#define SPI_CFG2_SCK_HIGH_MASK GENMASK(15, 0)
108 +#define SPI_CFG2_SCK_LOW_MASK GENMASK(31, 16)
109 +
110 +#define SPI_CMD_ACT BIT(0)
111 +#define SPI_CMD_RESUME BIT(1)
112 +#define SPI_CMD_RST BIT(2)
113 +#define SPI_CMD_PAUSE_EN BIT(4)
114 +#define SPI_CMD_DEASSERT BIT(5)
115 +#define SPI_CMD_SAMPLE_SEL BIT(6)
116 +#define SPI_CMD_CS_POL BIT(7)
117 +#define SPI_CMD_CPHA BIT(8)
118 +#define SPI_CMD_CPOL BIT(9)
119 +#define SPI_CMD_RX_DMA BIT(10)
120 +#define SPI_CMD_TX_DMA BIT(11)
121 +#define SPI_CMD_TXMSBF BIT(12)
122 +#define SPI_CMD_RXMSBF BIT(13)
123 +#define SPI_CMD_RX_ENDIAN BIT(14)
124 +#define SPI_CMD_TX_ENDIAN BIT(15)
125 +#define SPI_CMD_FINISH_IE BIT(16)
126 +#define SPI_CMD_PAUSE_IE BIT(17)
127 +#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
128 +#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
129 +#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
130 +
131 +#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
132 +
133 +#define PIN_MODE_CFG(x) ((x) / 2)
134 +
135 +#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
136 +#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
137 +#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
138 +#define SPI_CFG3_IPM_XMODE_EN BIT(4)
139 +#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
140 +#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
141 +#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
142 +#define SPI_CFG3_IPM_DUMMY_BYTELEN_OFFSET 16
143 +
144 +#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
145 +#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
146 +#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
147 +#define SPI_CFG3_IPM_DUMMY_BYTELEN_MASK GENMASK(19, 16)
148 +
149 +#define MT8173_SPI_MAX_PAD_SEL 3
150 +
151 +#define MTK_SPI_PAUSE_INT_STATUS 0x2
152 +
153 +#define MTK_SPI_IDLE 0
154 +#define MTK_SPI_PAUSED 1
155 +
156 +#define MTK_SPI_MAX_FIFO_SIZE 32U
157 +#define MTK_SPI_PACKET_SIZE 1024
158 +#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
159 +#define MTK_SPI_IPM_PACKET_LOOP SZ_256
160 +
161 +#define MTK_SPI_32BITS_MASK 0xffffffff
162 +
163 +#define DMA_ADDR_EXT_BITS 36
164 +#define DMA_ADDR_DEF_BITS 32
165 +
166 +#define CLK_TO_US(freq, clkcnt) DIV_ROUND_UP((clkcnt), (freq) / 1000000)
167 +
168 +/* struct mtk_spim_capability
169 + * @enhance_timing: Some IC design adjust cfg register to enhance time accuracy
170 + * @dma_ext: Some IC support DMA addr extension
171 + * @ipm_design: The IPM IP design improves some features, and supports dual/quad mode
172 + * @support_quad: Whether quad mode is supported
173 + */
174 +struct mtk_spim_capability {
175 + bool enhance_timing;
176 + bool dma_ext;
177 + bool ipm_design;
178 + bool support_quad;
179 +};
180 +
181 +/* struct mtk_spim_priv
182 + * @base: Base address of the spi controller
183 + * @state: Controller state
184 + * @sel_clk: Pad clock
185 + * @spi_clk: Core clock
186 + * @xfer_len: Current length of data for transfer
187 + * @hw_cap: Controller capabilities
188 + * @tick_dly: Used to postpone SPI sampling time
189 + * @sample_sel: Sample edge of MISO
190 + * @dev: udevice of this spi controller
191 + * @tx_dma: Tx DMA address
192 + * @rx_dma: Rx DMA address
193 + */
194 +struct mtk_spim_priv {
195 + void __iomem *base;
196 + u32 state;
197 + struct clk sel_clk, spi_clk;
198 + u32 xfer_len;
199 + struct mtk_spim_capability hw_cap;
200 + u32 tick_dly;
201 + u32 sample_sel;
202 +
203 + struct device *dev;
204 + dma_addr_t tx_dma;
205 + dma_addr_t rx_dma;
206 +};
207 +
208 +static void mtk_spim_reset(struct mtk_spim_priv *priv)
209 +{
210 + /* set the software reset bit in SPI_CMD_REG. */
211 + setbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
212 + clrbits_le32(priv->base + SPI_CMD_REG, SPI_CMD_RST);
213 +}
214 +
215 +static int mtk_spim_hw_init(struct spi_slave *slave)
216 +{
217 + struct udevice *bus = dev_get_parent(slave->dev);
218 + struct mtk_spim_priv *priv = dev_get_priv(bus);
219 + u16 cpha, cpol;
220 + u32 reg_val;
221 +
222 + cpha = slave->mode & SPI_CPHA ? 1 : 0;
223 + cpol = slave->mode & SPI_CPOL ? 1 : 0;
224 +
225 + if (priv->hw_cap.enhance_timing) {
226 + if (priv->hw_cap.ipm_design) {
227 + /* CFG3 reg only used for spi-mem,
228 + * here write to default value
229 + */
230 + writel(0x0, priv->base + SPI_CFG3_IPM_REG);
231 + clrsetbits_le32(priv->base + SPI_CMD_REG,
232 + SPI_CMD_IPM_GET_TICKDLY_MASK,
233 + priv->tick_dly <<
234 + SPI_CMD_IPM_GET_TICKDLY_OFFSET);
235 + } else {
236 + clrsetbits_le32(priv->base + SPI_CFG1_REG,
237 + SPI_CFG1_GET_TICKDLY_MASK,
238 + priv->tick_dly <<
239 + SPI_CFG1_GET_TICKDLY_OFFSET);
240 + }
241 + }
242 +
243 + reg_val = readl(priv->base + SPI_CMD_REG);
244 + if (priv->hw_cap.ipm_design) {
245 + /* SPI transfer without idle time until packet length done */
246 + reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
247 + if (slave->mode & SPI_LOOP)
248 + reg_val |= SPI_CMD_IPM_SPIM_LOOP;
249 + else
250 + reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
251 + }
252 +
253 + if (cpha)
254 + reg_val |= SPI_CMD_CPHA;
255 + else
256 + reg_val &= ~SPI_CMD_CPHA;
257 + if (cpol)
258 + reg_val |= SPI_CMD_CPOL;
259 + else
260 + reg_val &= ~SPI_CMD_CPOL;
261 +
262 + /* set the mlsbx and mlsbtx */
263 + if (slave->mode & SPI_LSB_FIRST) {
264 + reg_val &= ~SPI_CMD_TXMSBF;
265 + reg_val &= ~SPI_CMD_RXMSBF;
266 + } else {
267 + reg_val |= SPI_CMD_TXMSBF;
268 + reg_val |= SPI_CMD_RXMSBF;
269 + }
270 +
271 + /* do not reverse tx/rx endian */
272 + reg_val &= ~SPI_CMD_TX_ENDIAN;
273 + reg_val &= ~SPI_CMD_RX_ENDIAN;
274 +
275 + if (priv->hw_cap.enhance_timing) {
276 + /* set CS polarity */
277 + if (slave->mode & SPI_CS_HIGH)
278 + reg_val |= SPI_CMD_CS_POL;
279 + else
280 + reg_val &= ~SPI_CMD_CS_POL;
281 +
282 + if (priv->sample_sel)
283 + reg_val |= SPI_CMD_SAMPLE_SEL;
284 + else
285 + reg_val &= ~SPI_CMD_SAMPLE_SEL;
286 + }
287 +
288 + /* disable dma mode */
289 + reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
290 +
291 + /* disable deassert mode */
292 + reg_val &= ~SPI_CMD_DEASSERT;
293 +
294 + writel(reg_val, priv->base + SPI_CMD_REG);
295 +
296 + return 0;
297 +}
298 +
299 +static void mtk_spim_prepare_transfer(struct mtk_spim_priv *priv,
300 + u32 speed_hz)
301 +{
302 + u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
303 +
304 + spi_clk_hz = clk_get_rate(&priv->spi_clk);
305 + if (speed_hz <= spi_clk_hz / 4)
306 + div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
307 + else
308 + div = 4;
309 +
310 + sck_time = (div + 1) / 2;
311 + cs_time = sck_time * 2;
312 +
313 + if (priv->hw_cap.enhance_timing) {
314 + reg_val = ((sck_time - 1) & 0xffff)
315 + << SPI_CFG2_SCK_HIGH_OFFSET;
316 + reg_val |= ((sck_time - 1) & 0xffff)
317 + << SPI_CFG2_SCK_LOW_OFFSET;
318 + writel(reg_val, priv->base + SPI_CFG2_REG);
319 +
320 + reg_val = ((cs_time - 1) & 0xffff)
321 + << SPI_ADJUST_CFG0_CS_HOLD_OFFSET;
322 + reg_val |= ((cs_time - 1) & 0xffff)
323 + << SPI_ADJUST_CFG0_CS_SETUP_OFFSET;
324 + writel(reg_val, priv->base + SPI_CFG0_REG);
325 + } else {
326 + reg_val = ((sck_time - 1) & 0xff)
327 + << SPI_CFG0_SCK_HIGH_OFFSET;
328 + reg_val |= ((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET;
329 + reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET;
330 + reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET;
331 + writel(reg_val, priv->base + SPI_CFG0_REG);
332 + }
333 +
334 + reg_val = readl(priv->base + SPI_CFG1_REG);
335 + reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
336 + reg_val |= ((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET;
337 + writel(reg_val, priv->base + SPI_CFG1_REG);
338 +}
339 +
340 +/**
341 + * mtk_spim_setup_packet() - setup packet format.
342 + * @priv: controller priv
343 + *
344 + * This controller sents/receives data in packets. The packet size is
345 + * configurable.
346 + *
347 + * This function calculates the maximum packet size available for current
348 + * data, and calculates the number of packets required to sent/receive data
349 + * as much as possible.
350 + */
351 +static void mtk_spim_setup_packet(struct mtk_spim_priv *priv)
352 +{
353 + u32 packet_size, packet_loop, reg_val;
354 +
355 + /* Calculate maximum packet size */
356 + if (priv->hw_cap.ipm_design)
357 + packet_size = min_t(u32,
358 + priv->xfer_len,
359 + MTK_SPI_IPM_PACKET_SIZE);
360 + else
361 + packet_size = min_t(u32,
362 + priv->xfer_len,
363 + MTK_SPI_PACKET_SIZE);
364 +
365 + /* Calculates number of packets to sent/receive */
366 + packet_loop = priv->xfer_len / packet_size;
367 +
368 + reg_val = readl(priv->base + SPI_CFG1_REG);
369 + if (priv->hw_cap.ipm_design)
370 + reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
371 + else
372 + reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
373 +
374 + reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
375 +
376 + reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
377 +
378 + reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
379 +
380 + writel(reg_val, priv->base + SPI_CFG1_REG);
381 +}
382 +
383 +static void mtk_spim_enable_transfer(struct mtk_spim_priv *priv)
384 +{
385 + u32 cmd;
386 +
387 + cmd = readl(priv->base + SPI_CMD_REG);
388 + if (priv->state == MTK_SPI_IDLE)
389 + cmd |= SPI_CMD_ACT;
390 + else
391 + cmd |= SPI_CMD_RESUME;
392 + writel(cmd, priv->base + SPI_CMD_REG);
393 +}
394 +
395 +static bool mtk_spim_supports_op(struct spi_slave *slave,
396 + const struct spi_mem_op *op)
397 +{
398 + struct udevice *bus = dev_get_parent(slave->dev);
399 + struct mtk_spim_priv *priv = dev_get_priv(bus);
400 +
401 + if (op->cmd.buswidth == 0 || op->cmd.buswidth > 4 ||
402 + op->addr.buswidth > 4 || op->dummy.buswidth > 4 ||
403 + op->data.buswidth > 4)
404 + return false;
405 +
406 + if (!priv->hw_cap.support_quad && (op->cmd.buswidth > 2 ||
407 + op->addr.buswidth > 2 || op->dummy.buswidth > 2 ||
408 + op->data.buswidth > 2))
409 + return false;
410 +
411 + if (op->addr.nbytes && op->dummy.nbytes &&
412 + op->addr.buswidth != op->dummy.buswidth)
413 + return false;
414 +
415 + if (op->addr.nbytes + op->dummy.nbytes > 16)
416 + return false;
417 +
418 + if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
419 + if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
420 + MTK_SPI_IPM_PACKET_LOOP ||
421 + op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
422 + return false;
423 + }
424 +
425 + return true;
426 +}
427 +
428 +static void mtk_spim_setup_dma_xfer(struct mtk_spim_priv *priv,
429 + const struct spi_mem_op *op)
430 +{
431 + writel((u32)(priv->tx_dma & MTK_SPI_32BITS_MASK),
432 + priv->base + SPI_TX_SRC_REG);
433 +
434 + if (priv->hw_cap.dma_ext)
435 + writel((u32)(priv->tx_dma >> 32),
436 + priv->base + SPI_TX_SRC_REG_64);
437 +
438 + if (op->data.dir == SPI_MEM_DATA_IN) {
439 + writel((u32)(priv->rx_dma & MTK_SPI_32BITS_MASK),
440 + priv->base + SPI_RX_DST_REG);
441 +
442 + if (priv->hw_cap.dma_ext)
443 + writel((u32)(priv->rx_dma >> 32),
444 + priv->base + SPI_RX_DST_REG_64);
445 + }
446 +}
447 +
448 +static int mtk_spim_transfer_wait(struct spi_slave *slave,
449 + const struct spi_mem_op *op)
450 +{
451 + struct udevice *bus = dev_get_parent(slave->dev);
452 + struct mtk_spim_priv *priv = dev_get_priv(bus);
453 + u32 sck_l, sck_h, spi_bus_clk, clk_count, reg;
454 + ulong us = 1;
455 + int ret = 0;
456 +
457 + if (op->data.dir == SPI_MEM_NO_DATA)
458 + clk_count = 32;
459 + else
460 + clk_count = op->data.nbytes;
461 +
462 + spi_bus_clk = clk_get_rate(&priv->spi_clk);
463 + sck_l = readl(priv->base + SPI_CFG2_REG) >> SPI_CFG2_SCK_LOW_OFFSET;
464 + sck_h = readl(priv->base + SPI_CFG2_REG) & SPI_CFG2_SCK_HIGH_MASK;
465 + do_div(spi_bus_clk, sck_l + sck_h + 2);
466 +
467 + us = CLK_TO_US(spi_bus_clk, clk_count * 8);
468 + us += 1000 * 1000; /* 1s tolerance */
469 +
470 + if (us > UINT_MAX)
471 + us = UINT_MAX;
472 +
473 + ret = readl_poll_timeout(priv->base + SPI_STATUS_REG, reg,
474 + reg & 0x1, us);
475 + if (ret < 0) {
476 + dev_err(priv->dev, "transfer timeout, val: 0x%lx\n", us);
477 + return -ETIMEDOUT;
478 + }
479 +
480 + return 0;
481 +}
482 +
483 +static int mtk_spim_exec_op(struct spi_slave *slave,
484 + const struct spi_mem_op *op)
485 +{
486 + struct udevice *bus = dev_get_parent(slave->dev);
487 + struct mtk_spim_priv *priv = dev_get_priv(bus);
488 + u32 reg_val, nio = 1, tx_size;
489 + char *tx_tmp_buf;
490 + char *rx_tmp_buf;
491 + int i, ret = 0;
492 +
493 + mtk_spim_reset(priv);
494 + mtk_spim_hw_init(slave);
495 + mtk_spim_prepare_transfer(priv, slave->max_hz);
496 +
497 + reg_val = readl(priv->base + SPI_CFG3_IPM_REG);
498 + /* opcode byte len */
499 + reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
500 + reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
501 +
502 + /* addr & dummy byte len */
503 + if (op->addr.nbytes || op->dummy.nbytes)
504 + reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
505 + SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
506 +
507 + /* data byte len */
508 + if (!op->data.nbytes) {
509 + reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
510 + writel(0, priv->base + SPI_CFG1_REG);
511 + } else {
512 + reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
513 + priv->xfer_len = op->data.nbytes;
514 + mtk_spim_setup_packet(priv);
515 + }
516 +
517 + if (op->addr.nbytes || op->dummy.nbytes) {
518 + if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
519 + reg_val |= SPI_CFG3_IPM_XMODE_EN;
520 + else
521 + reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
522 + }
523 +
524 + if (op->addr.buswidth == 2 ||
525 + op->dummy.buswidth == 2 ||
526 + op->data.buswidth == 2)
527 + nio = 2;
528 + else if (op->addr.buswidth == 4 ||
529 + op->dummy.buswidth == 4 ||
530 + op->data.buswidth == 4)
531 + nio = 4;
532 +
533 + reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
534 + reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
535 +
536 + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
537 + if (op->data.dir == SPI_MEM_DATA_IN)
538 + reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
539 + else
540 + reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
541 + writel(reg_val, priv->base + SPI_CFG3_IPM_REG);
542 +
543 + tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
544 + if (op->data.dir == SPI_MEM_DATA_OUT)
545 + tx_size += op->data.nbytes;
546 +
547 + tx_size = max(tx_size, (u32)32);
548 +
549 + /* Fill up tx data */
550 + tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL);
551 + if (!tx_tmp_buf) {
552 + ret = -ENOMEM;
553 + goto exit;
554 + }
555 +
556 + tx_tmp_buf[0] = op->cmd.opcode;
557 +
558 + if (op->addr.nbytes) {
559 + for (i = 0; i < op->addr.nbytes; i++)
560 + tx_tmp_buf[i + 1] = op->addr.val >>
561 + (8 * (op->addr.nbytes - i - 1));
562 + }
563 +
564 + if (op->dummy.nbytes)
565 + memset(tx_tmp_buf + op->addr.nbytes + 1, 0xff,
566 + op->dummy.nbytes);
567 +
568 + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
569 + memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
570 + op->data.buf.out, op->data.nbytes);
571 + /* Finish filling up tx data */
572 +
573 + priv->tx_dma = dma_map_single(tx_tmp_buf, tx_size, DMA_TO_DEVICE);
574 + if (dma_mapping_error(priv->dev, priv->tx_dma)) {
575 + ret = -ENOMEM;
576 + goto tx_free;
577 + }
578 +
579 + if (op->data.dir == SPI_MEM_DATA_IN) {
580 + if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
581 + rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL);
582 + if (!rx_tmp_buf) {
583 + ret = -ENOMEM;
584 + goto tx_unmap;
585 + }
586 + } else {
587 + rx_tmp_buf = op->data.buf.in;
588 + }
589 +
590 + priv->rx_dma = dma_map_single(rx_tmp_buf, op->data.nbytes,
591 + DMA_FROM_DEVICE);
592 + if (dma_mapping_error(priv->dev, priv->rx_dma)) {
593 + ret = -ENOMEM;
594 + goto rx_free;
595 + }
596 + }
597 +
598 + reg_val = readl(priv->base + SPI_CMD_REG);
599 + reg_val |= SPI_CMD_TX_DMA;
600 + if (op->data.dir == SPI_MEM_DATA_IN)
601 + reg_val |= SPI_CMD_RX_DMA;
602 +
603 + writel(reg_val, priv->base + SPI_CMD_REG);
604 +
605 + mtk_spim_setup_dma_xfer(priv, op);
606 +
607 + mtk_spim_enable_transfer(priv);
608 +
609 + /* Wait for the interrupt. */
610 + ret = mtk_spim_transfer_wait(slave, op);
611 + if (ret)
612 + goto rx_unmap;
613 +
614 + if (op->data.dir == SPI_MEM_DATA_IN &&
615 + !IS_ALIGNED((size_t)op->data.buf.in, 4))
616 + memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
617 +
618 +rx_unmap:
619 + /* spi disable dma */
620 + reg_val = readl(priv->base + SPI_CMD_REG);
621 + reg_val &= ~SPI_CMD_TX_DMA;
622 + if (op->data.dir == SPI_MEM_DATA_IN)
623 + reg_val &= ~SPI_CMD_RX_DMA;
624 + writel(reg_val, priv->base + SPI_CMD_REG);
625 +
626 + writel(0, priv->base + SPI_TX_SRC_REG);
627 + writel(0, priv->base + SPI_RX_DST_REG);
628 +
629 + if (op->data.dir == SPI_MEM_DATA_IN)
630 + dma_unmap_single(priv->rx_dma,
631 + op->data.nbytes, DMA_FROM_DEVICE);
632 +rx_free:
633 + if (op->data.dir == SPI_MEM_DATA_IN &&
634 + !IS_ALIGNED((size_t)op->data.buf.in, 4))
635 + kfree(rx_tmp_buf);
636 +tx_unmap:
637 + dma_unmap_single(priv->tx_dma,
638 + tx_size, DMA_TO_DEVICE);
639 +tx_free:
640 + kfree(tx_tmp_buf);
641 +exit:
642 + return ret;
643 +}
644 +
645 +static int mtk_spim_adjust_op_size(struct spi_slave *slave,
646 + struct spi_mem_op *op)
647 +{
648 + int opcode_len;
649 +
650 + if (!op->data.nbytes)
651 + return 0;
652 +
653 + if (op->data.dir != SPI_MEM_NO_DATA) {
654 + opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
655 + if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
656 + op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
657 + /* force data buffer dma-aligned. */
658 + op->data.nbytes -= op->data.nbytes % 4;
659 + }
660 + }
661 +
662 + return 0;
663 +}
664 +
665 +static int mtk_spim_get_attr(struct mtk_spim_priv *priv, struct udevice *dev)
666 +{
667 + int ret;
668 +
669 + priv->hw_cap.enhance_timing = dev_read_bool(dev, "enhance_timing");
670 + priv->hw_cap.dma_ext = dev_read_bool(dev, "dma_ext");
671 + priv->hw_cap.ipm_design = dev_read_bool(dev, "ipm_design");
672 + priv->hw_cap.support_quad = dev_read_bool(dev, "support_quad");
673 +
674 + ret = dev_read_u32(dev, "tick_dly", &priv->tick_dly);
675 + if (ret < 0)
676 + dev_err(priv->dev, "tick dly not set.\n");
677 +
678 + ret = dev_read_u32(dev, "sample_sel", &priv->sample_sel);
679 + if (ret < 0)
680 + dev_err(priv->dev, "sample sel not set.\n");
681 +
682 + return ret;
683 +}
684 +
685 +static int mtk_spim_probe(struct udevice *dev)
686 +{
687 + struct mtk_spim_priv *priv = dev_get_priv(dev);
688 + int ret;
689 +
690 + priv->base = (void __iomem *)devfdt_get_addr(dev);
691 + if (!priv->base)
692 + return -EINVAL;
693 +
694 + mtk_spim_get_attr(priv, dev);
695 +
696 + ret = clk_get_by_name(dev, "sel-clk", &priv->sel_clk);
697 + if (ret < 0) {
698 + dev_err(dev, "failed to get sel-clk\n");
699 + return ret;
700 + }
701 +
702 + ret = clk_get_by_name(dev, "spi-clk", &priv->spi_clk);
703 + if (ret < 0) {
704 + dev_err(dev, "failed to get spi-clk\n");
705 + return ret;
706 + }
707 +
708 + clk_enable(&priv->sel_clk);
709 + clk_enable(&priv->spi_clk);
710 +
711 + return 0;
712 +}
713 +
714 +static int mtk_spim_set_speed(struct udevice *dev, uint speed)
715 +{
716 + return 0;
717 +}
718 +
719 +static int mtk_spim_set_mode(struct udevice *dev, uint mode)
720 +{
721 + return 0;
722 +}
723 +
724 +static const struct spi_controller_mem_ops mtk_spim_mem_ops = {
725 + .adjust_op_size = mtk_spim_adjust_op_size,
726 + .supports_op = mtk_spim_supports_op,
727 + .exec_op = mtk_spim_exec_op
728 +};
729 +
730 +static const struct dm_spi_ops mtk_spim_ops = {
731 + .mem_ops = &mtk_spim_mem_ops,
732 + .set_speed = mtk_spim_set_speed,
733 + .set_mode = mtk_spim_set_mode,
734 +};
735 +
736 +static const struct udevice_id mtk_spim_ids[] = {
737 + { .compatible = "mediatek,ipm-spi" },
738 + {}
739 +};
740 +
741 +U_BOOT_DRIVER(mtk_spim) = {
742 + .name = "mtk_spim",
743 + .id = UCLASS_SPI,
744 + .of_match = mtk_spim_ids,
745 + .ops = &mtk_spim_ops,
746 + .priv_auto = sizeof(struct mtk_spim_priv),
747 + .probe = mtk_spim_probe,
748 +};