mediatek: add a new spi-nand driver for kernel 5.10
[openwrt/openwrt.git] / target / linux / mediatek / files-5.10 / drivers / mtd / mtk-snand / mtk-snand.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8 #include "mtk-snand-def.h"
9
10 /* NFI registers */
11 #define NFI_CNFG 0x000
12 #define CNFG_OP_MODE_S 12
13 #define CNFG_OP_MODE_CUST 6
14 #define CNFG_OP_MODE_PROGRAM 3
15 #define CNFG_AUTO_FMT_EN BIT(9)
16 #define CNFG_HW_ECC_EN BIT(8)
17 #define CNFG_DMA_BURST_EN BIT(2)
18 #define CNFG_READ_MODE BIT(1)
19 #define CNFG_DMA_MODE BIT(0)
20
21 #define NFI_PAGEFMT 0x0004
22 #define NFI_SPARE_SIZE_LS_S 16
23 #define NFI_FDM_ECC_NUM_S 12
24 #define NFI_FDM_NUM_S 8
25 #define NFI_SPARE_SIZE_S 4
26 #define NFI_SEC_SEL_512 BIT(2)
27 #define NFI_PAGE_SIZE_S 0
28 #define NFI_PAGE_SIZE_512_2K 0
29 #define NFI_PAGE_SIZE_2K_4K 1
30 #define NFI_PAGE_SIZE_4K_8K 2
31 #define NFI_PAGE_SIZE_8K_16K 3
32
33 #define NFI_CON 0x008
34 #define CON_SEC_NUM_S 12
35 #define CON_BWR BIT(9)
36 #define CON_BRD BIT(8)
37 #define CON_NFI_RST BIT(1)
38 #define CON_FIFO_FLUSH BIT(0)
39
40 #define NFI_INTR_EN 0x010
41 #define NFI_INTR_STA 0x014
42 #define NFI_IRQ_INTR_EN BIT(31)
43 #define NFI_IRQ_CUS_READ BIT(8)
44 #define NFI_IRQ_CUS_PG BIT(7)
45
46 #define NFI_CMD 0x020
47
48 #define NFI_STRDATA 0x040
49 #define STR_DATA BIT(0)
50
51 #define NFI_STA 0x060
52 #define NFI_NAND_FSM GENMASK(28, 24)
53 #define NFI_FSM GENMASK(19, 16)
54 #define READ_EMPTY BIT(12)
55
56 #define NFI_FIFOSTA 0x064
57 #define FIFO_WR_REMAIN_S 8
58 #define FIFO_RD_REMAIN_S 0
59
60 #define NFI_ADDRCNTR 0x070
61 #define SEC_CNTR GENMASK(16, 12)
62 #define SEC_CNTR_S 12
63 #define NFI_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
64
65 #define NFI_STRADDR 0x080
66
67 #define NFI_BYTELEN 0x084
68 #define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
69
70 #define NFI_FDM0L 0x0a0
71 #define NFI_FDM0M 0x0a4
72 #define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
73 #define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
74
75 #define NFI_DEBUG_CON1 0x220
76 #define WBUF_EN BIT(2)
77
78 #define NFI_MASTERSTA 0x224
79 #define MAS_ADDR GENMASK(11, 9)
80 #define MAS_RD GENMASK(8, 6)
81 #define MAS_WR GENMASK(5, 3)
82 #define MAS_RDDLY GENMASK(2, 0)
83 #define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
84
85 /* SNFI registers */
86 #define SNF_MAC_CTL 0x500
87 #define MAC_XIO_SEL BIT(4)
88 #define SF_MAC_EN BIT(3)
89 #define SF_TRIG BIT(2)
90 #define WIP_READY BIT(1)
91 #define WIP BIT(0)
92
93 #define SNF_MAC_OUTL 0x504
94 #define SNF_MAC_INL 0x508
95
96 #define SNF_RD_CTL2 0x510
97 #define DATA_READ_DUMMY_S 8
98 #define DATA_READ_CMD_S 0
99
100 #define SNF_RD_CTL3 0x514
101
102 #define SNF_PG_CTL1 0x524
103 #define PG_LOAD_CMD_S 8
104
105 #define SNF_PG_CTL2 0x528
106
107 #define SNF_MISC_CTL 0x538
108 #define SW_RST BIT(28)
109 #define FIFO_RD_LTC_S 25
110 #define PG_LOAD_X4_EN BIT(20)
111 #define DATA_READ_MODE_S 16
112 #define DATA_READ_MODE GENMASK(18, 16)
113 #define DATA_READ_MODE_X1 0
114 #define DATA_READ_MODE_X2 1
115 #define DATA_READ_MODE_X4 2
116 #define DATA_READ_MODE_DUAL 5
117 #define DATA_READ_MODE_QUAD 6
118 #define PG_LOAD_CUSTOM_EN BIT(7)
119 #define DATARD_CUSTOM_EN BIT(6)
120 #define CS_DESELECT_CYC_S 0
121
122 #define SNF_MISC_CTL2 0x53c
123 #define PROGRAM_LOAD_BYTE_NUM_S 16
124 #define READ_DATA_BYTE_NUM_S 11
125
126 #define SNF_DLY_CTL3 0x548
127 #define SFCK_SAM_DLY_S 0
128
129 #define SNF_STA_CTL1 0x550
130 #define CUS_PG_DONE BIT(28)
131 #define CUS_READ_DONE BIT(27)
132 #define SPI_STATE_S 0
133 #define SPI_STATE GENMASK(3, 0)
134
135 #define SNF_CFG 0x55c
136 #define SPI_MODE BIT(0)
137
138 #define SNF_GPRAM 0x800
139 #define SNF_GPRAM_SIZE 0xa0
140
141 #define SNFI_POLL_INTERVAL 1000000
142
143 static const uint8_t mt7622_spare_sizes[] = { 16, 26, 27, 28 };
144
145 static const struct mtk_snand_soc_data mtk_snand_socs[__SNAND_SOC_MAX] = {
146 [SNAND_SOC_MT7622] = {
147 .sector_size = 512,
148 .max_sectors = 8,
149 .fdm_size = 8,
150 .fdm_ecc_size = 1,
151 .fifo_size = 32,
152 .bbm_swap = false,
153 .empty_page_check = false,
154 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
155 .spare_sizes = mt7622_spare_sizes,
156 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
157 },
158 [SNAND_SOC_MT7629] = {
159 .sector_size = 512,
160 .max_sectors = 8,
161 .fdm_size = 8,
162 .fdm_ecc_size = 1,
163 .fifo_size = 32,
164 .bbm_swap = true,
165 .empty_page_check = false,
166 .mastersta_mask = NFI_MASTERSTA_MASK_7622,
167 .spare_sizes = mt7622_spare_sizes,
168 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes)
169 },
170 };
171
172 static inline uint32_t nfi_read32(struct mtk_snand *snf, uint32_t reg)
173 {
174 return readl(snf->nfi_base + reg);
175 }
176
177 static inline void nfi_write32(struct mtk_snand *snf, uint32_t reg,
178 uint32_t val)
179 {
180 writel(val, snf->nfi_base + reg);
181 }
182
183 static inline void nfi_write16(struct mtk_snand *snf, uint32_t reg,
184 uint16_t val)
185 {
186 writew(val, snf->nfi_base + reg);
187 }
188
189 static inline void nfi_rmw32(struct mtk_snand *snf, uint32_t reg, uint32_t clr,
190 uint32_t set)
191 {
192 uint32_t val;
193
194 val = readl(snf->nfi_base + reg);
195 val &= ~clr;
196 val |= set;
197 writel(val, snf->nfi_base + reg);
198 }
199
200 static void nfi_write_data(struct mtk_snand *snf, uint32_t reg,
201 const uint8_t *data, uint32_t len)
202 {
203 uint32_t i, val = 0, es = sizeof(uint32_t);
204
205 for (i = reg; i < reg + len; i++) {
206 val |= ((uint32_t)*data++) << (8 * (i % es));
207
208 if (i % es == es - 1 || i == reg + len - 1) {
209 nfi_write32(snf, i & ~(es - 1), val);
210 val = 0;
211 }
212 }
213 }
214
215 static void nfi_read_data(struct mtk_snand *snf, uint32_t reg, uint8_t *data,
216 uint32_t len)
217 {
218 uint32_t i, val = 0, es = sizeof(uint32_t);
219
220 for (i = reg; i < reg + len; i++) {
221 if (i == reg || i % es == 0)
222 val = nfi_read32(snf, i & ~(es - 1));
223
224 *data++ = (uint8_t)(val >> (8 * (i % es)));
225 }
226 }
227
228 static inline void do_bm_swap(uint8_t *bm1, uint8_t *bm2)
229 {
230 uint8_t tmp = *bm1;
231 *bm1 = *bm2;
232 *bm2 = tmp;
233 }
234
235 static void mtk_snand_bm_swap_raw(struct mtk_snand *snf)
236 {
237 uint32_t fdm_bbm_pos;
238
239 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
240 return;
241
242 fdm_bbm_pos = (snf->ecc_steps - 1) * snf->raw_sector_size +
243 snf->nfi_soc->sector_size;
244 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
245 &snf->page_cache[snf->writesize]);
246 }
247
248 static void mtk_snand_bm_swap(struct mtk_snand *snf)
249 {
250 uint32_t buf_bbm_pos, fdm_bbm_pos;
251
252 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
253 return;
254
255 buf_bbm_pos = snf->writesize -
256 (snf->ecc_steps - 1) * snf->spare_per_sector;
257 fdm_bbm_pos = snf->writesize +
258 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
259 do_bm_swap(&snf->page_cache[fdm_bbm_pos],
260 &snf->page_cache[buf_bbm_pos]);
261 }
262
263 static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand *snf)
264 {
265 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
266
267 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
268 return;
269
270 fdm_bbm_pos1 = snf->nfi_soc->sector_size;
271 fdm_bbm_pos2 = (snf->ecc_steps - 1) * snf->raw_sector_size +
272 snf->nfi_soc->sector_size;
273 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
274 &snf->page_cache[fdm_bbm_pos2]);
275 }
276
277 static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
278 {
279 uint32_t fdm_bbm_pos1, fdm_bbm_pos2;
280
281 if (!snf->nfi_soc->bbm_swap || snf->ecc_steps == 1)
282 return;
283
284 fdm_bbm_pos1 = snf->writesize;
285 fdm_bbm_pos2 = snf->writesize +
286 (snf->ecc_steps - 1) * snf->nfi_soc->fdm_size;
287 do_bm_swap(&snf->page_cache[fdm_bbm_pos1],
288 &snf->page_cache[fdm_bbm_pos2]);
289 }
290
291 static int mtk_nfi_reset(struct mtk_snand *snf)
292 {
293 uint32_t val, fifo_mask;
294 int ret;
295
296 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
297
298 ret = read16_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
299 !(val & snf->nfi_soc->mastersta_mask), 0,
300 SNFI_POLL_INTERVAL);
301 if (ret) {
302 snand_log_nfi(snf->pdev,
303 "NFI master is still busy after reset\n");
304 return ret;
305 }
306
307 ret = read32_poll_timeout(snf->nfi_base + NFI_STA, val,
308 !(val & (NFI_FSM | NFI_NAND_FSM)), 0,
309 SNFI_POLL_INTERVAL);
310 if (ret) {
311 snand_log_nfi(snf->pdev, "Failed to reset NFI\n");
312 return ret;
313 }
314
315 fifo_mask = ((snf->nfi_soc->fifo_size - 1) << FIFO_RD_REMAIN_S) |
316 ((snf->nfi_soc->fifo_size - 1) << FIFO_WR_REMAIN_S);
317 ret = read16_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
318 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL);
319 if (ret) {
320 snand_log_nfi(snf->pdev, "NFI FIFOs are not empty\n");
321 return ret;
322 }
323
324 return 0;
325 }
326
327 static int mtk_snand_mac_reset(struct mtk_snand *snf)
328 {
329 int ret;
330 uint32_t val;
331
332 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
333
334 ret = read32_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
335 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL);
336 if (ret)
337 snand_log_snfi(snf->pdev, "Failed to reset SNFI MAC\n");
338
339 nfi_write32(snf, SNF_MISC_CTL, (2 << FIFO_RD_LTC_S) |
340 (10 << CS_DESELECT_CYC_S));
341
342 return ret;
343 }
344
345 static int mtk_snand_mac_trigger(struct mtk_snand *snf, uint32_t outlen,
346 uint32_t inlen)
347 {
348 int ret;
349 uint32_t val;
350
351 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
352 nfi_write32(snf, SNF_MAC_OUTL, outlen);
353 nfi_write32(snf, SNF_MAC_INL, inlen);
354
355 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
356
357 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
358 val & WIP_READY, 0, SNFI_POLL_INTERVAL);
359 if (ret) {
360 snand_log_snfi(snf->pdev, "Timed out waiting for WIP_READY\n");
361 goto cleanup;
362 }
363
364 ret = read32_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
365 !(val & WIP), 0, SNFI_POLL_INTERVAL);
366 if (ret) {
367 snand_log_snfi(snf->pdev,
368 "Timed out waiting for WIP cleared\n");
369 }
370
371 cleanup:
372 nfi_write32(snf, SNF_MAC_CTL, 0);
373
374 return ret;
375 }
376
377 int mtk_snand_mac_io(struct mtk_snand *snf, const uint8_t *out, uint32_t outlen,
378 uint8_t *in, uint32_t inlen)
379 {
380 int ret;
381
382 if (outlen + inlen > SNF_GPRAM_SIZE)
383 return -EINVAL;
384
385 mtk_snand_mac_reset(snf);
386
387 nfi_write_data(snf, SNF_GPRAM, out, outlen);
388
389 ret = mtk_snand_mac_trigger(snf, outlen, inlen);
390 if (ret)
391 return ret;
392
393 if (!inlen)
394 return 0;
395
396 nfi_read_data(snf, SNF_GPRAM + outlen, in, inlen);
397
398 return 0;
399 }
400
401 static int mtk_snand_get_feature(struct mtk_snand *snf, uint32_t addr)
402 {
403 uint8_t op[2], val;
404 int ret;
405
406 op[0] = SNAND_CMD_GET_FEATURE;
407 op[1] = (uint8_t)addr;
408
409 ret = mtk_snand_mac_io(snf, op, sizeof(op), &val, 1);
410 if (ret)
411 return ret;
412
413 return val;
414 }
415
416 int mtk_snand_set_feature(struct mtk_snand *snf, uint32_t addr, uint32_t val)
417 {
418 uint8_t op[3];
419
420 op[0] = SNAND_CMD_SET_FEATURE;
421 op[1] = (uint8_t)addr;
422 op[2] = (uint8_t)val;
423
424 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
425 }
426
427 static int mtk_snand_poll_status(struct mtk_snand *snf, uint32_t wait_us)
428 {
429 int val;
430 mtk_snand_time_t time_start, tmo;
431
432 time_start = timer_get_ticks();
433 tmo = timer_time_to_tick(wait_us);
434
435 do {
436 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
437 if (!(val & SNAND_STATUS_OIP))
438 return val & (SNAND_STATUS_ERASE_FAIL |
439 SNAND_STATUS_PROGRAM_FAIL);
440 } while (!timer_is_timeout(time_start, tmo));
441
442 return -ETIMEDOUT;
443 }
444
445 int mtk_snand_chip_reset(struct mtk_snand *snf)
446 {
447 uint8_t op = SNAND_CMD_RESET;
448 int ret;
449
450 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
451 if (ret)
452 return ret;
453
454 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
455 if (ret < 0)
456 return ret;
457
458 return 0;
459 }
460
461 static int mtk_snand_config_feature(struct mtk_snand *snf, uint8_t clr,
462 uint8_t set)
463 {
464 int val, newval;
465 int ret;
466
467 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
468 if (val < 0) {
469 snand_log_chip(snf->pdev,
470 "Failed to get configuration feature\n");
471 return val;
472 }
473
474 newval = (val & (~clr)) | set;
475
476 if (newval == val)
477 return 0;
478
479 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_CONFIG_ADDR,
480 (uint8_t)newval);
481 if (val < 0) {
482 snand_log_chip(snf->pdev,
483 "Failed to set configuration feature\n");
484 return ret;
485 }
486
487 val = mtk_snand_get_feature(snf, SNAND_FEATURE_CONFIG_ADDR);
488 if (val < 0) {
489 snand_log_chip(snf->pdev,
490 "Failed to get configuration feature\n");
491 return val;
492 }
493
494 if (newval != val)
495 return -ENOTSUPP;
496
497 return 0;
498 }
499
500 static int mtk_snand_ondie_ecc_control(struct mtk_snand *snf, bool enable)
501 {
502 int ret;
503
504 if (enable)
505 ret = mtk_snand_config_feature(snf, 0, SNAND_FEATURE_ECC_EN);
506 else
507 ret = mtk_snand_config_feature(snf, SNAND_FEATURE_ECC_EN, 0);
508
509 if (ret) {
510 snand_log_chip(snf->pdev, "Failed to %s On-Die ECC engine\n",
511 enable ? "enable" : "disable");
512 }
513
514 return ret;
515 }
516
517 static int mtk_snand_qspi_control(struct mtk_snand *snf, bool enable)
518 {
519 int ret;
520
521 if (enable) {
522 ret = mtk_snand_config_feature(snf, 0,
523 SNAND_FEATURE_QUAD_ENABLE);
524 } else {
525 ret = mtk_snand_config_feature(snf,
526 SNAND_FEATURE_QUAD_ENABLE, 0);
527 }
528
529 if (ret) {
530 snand_log_chip(snf->pdev, "Failed to %s quad spi\n",
531 enable ? "enable" : "disable");
532 }
533
534 return ret;
535 }
536
537 static int mtk_snand_unlock(struct mtk_snand *snf)
538 {
539 int ret;
540
541 ret = mtk_snand_set_feature(snf, SNAND_FEATURE_PROTECT_ADDR, 0);
542 if (ret) {
543 snand_log_chip(snf->pdev, "Failed to set protection feature\n");
544 return ret;
545 }
546
547 return 0;
548 }
549
550 static int mtk_snand_write_enable(struct mtk_snand *snf)
551 {
552 uint8_t op = SNAND_CMD_WRITE_ENABLE;
553 int ret, val;
554
555 ret = mtk_snand_mac_io(snf, &op, 1, NULL, 0);
556 if (ret)
557 return ret;
558
559 val = mtk_snand_get_feature(snf, SNAND_FEATURE_STATUS_ADDR);
560 if (val < 0)
561 return ret;
562
563 if (val & SNAND_STATUS_WEL)
564 return 0;
565
566 snand_log_chip(snf->pdev, "Failed to send write-enable command\n");
567
568 return -ENOTSUPP;
569 }
570
571 static int mtk_snand_select_die(struct mtk_snand *snf, uint32_t dieidx)
572 {
573 if (!snf->select_die)
574 return 0;
575
576 return snf->select_die(snf, dieidx);
577 }
578
579 static uint64_t mtk_snand_select_die_address(struct mtk_snand *snf,
580 uint64_t addr)
581 {
582 uint32_t dieidx;
583
584 if (!snf->select_die)
585 return addr;
586
587 dieidx = addr >> snf->die_shift;
588
589 mtk_snand_select_die(snf, dieidx);
590
591 return addr & snf->die_mask;
592 }
593
594 static uint32_t mtk_snand_get_plane_address(struct mtk_snand *snf,
595 uint32_t page)
596 {
597 uint32_t pages_per_block;
598
599 pages_per_block = 1 << (snf->erasesize_shift - snf->writesize_shift);
600
601 if (page & pages_per_block)
602 return 1 << (snf->writesize_shift + 1);
603
604 return 0;
605 }
606
607 static int mtk_snand_page_op(struct mtk_snand *snf, uint32_t page, uint8_t cmd)
608 {
609 uint8_t op[4];
610
611 op[0] = cmd;
612 op[1] = (page >> 16) & 0xff;
613 op[2] = (page >> 8) & 0xff;
614 op[3] = page & 0xff;
615
616 return mtk_snand_mac_io(snf, op, sizeof(op), NULL, 0);
617 }
618
619 static void mtk_snand_read_fdm(struct mtk_snand *snf, uint8_t *buf)
620 {
621 uint32_t vall, valm;
622 uint8_t *oobptr = buf;
623 int i, j;
624
625 for (i = 0; i < snf->ecc_steps; i++) {
626 vall = nfi_read32(snf, NFI_FDML(i));
627 valm = nfi_read32(snf, NFI_FDMM(i));
628
629 for (j = 0; j < snf->nfi_soc->fdm_size; j++)
630 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
631
632 oobptr += snf->nfi_soc->fdm_size;
633 }
634 }
635
636 static int mtk_snand_read_ecc_parity(struct mtk_snand *snf, uint32_t page,
637 uint32_t sect, uint8_t *oob)
638 {
639 uint32_t ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
640 uint32_t coladdr, raw_offs, offs;
641 uint8_t op[4];
642
643 if (sizeof(op) + ecc_bytes > SNF_GPRAM_SIZE) {
644 snand_log_snfi(snf->pdev,
645 "ECC parity size does not fit the GPRAM\n");
646 return -ENOTSUPP;
647 }
648
649 raw_offs = sect * snf->raw_sector_size + snf->nfi_soc->sector_size +
650 snf->nfi_soc->fdm_size;
651 offs = snf->ecc_steps * snf->nfi_soc->fdm_size + sect * ecc_bytes;
652
653 /* Column address with plane bit */
654 coladdr = raw_offs | mtk_snand_get_plane_address(snf, page);
655
656 op[0] = SNAND_CMD_READ_FROM_CACHE;
657 op[1] = (coladdr >> 8) & 0xff;
658 op[2] = coladdr & 0xff;
659 op[3] = 0;
660
661 return mtk_snand_mac_io(snf, op, sizeof(op), oob + offs, ecc_bytes);
662 }
663
664 static int mtk_snand_check_ecc_result(struct mtk_snand *snf, uint32_t page)
665 {
666 uint8_t *oob = snf->page_cache + snf->writesize;
667 int i, rc, ret = 0, max_bitflips = 0;
668
669 for (i = 0; i < snf->ecc_steps; i++) {
670 if (snf->sect_bf[i] >= 0) {
671 if (snf->sect_bf[i] > max_bitflips)
672 max_bitflips = snf->sect_bf[i];
673 continue;
674 }
675
676 rc = mtk_snand_read_ecc_parity(snf, page, i, oob);
677 if (rc)
678 return rc;
679
680 rc = mtk_ecc_fixup_empty_sector(snf, i);
681 if (rc < 0) {
682 ret = -EBADMSG;
683
684 snand_log_ecc(snf->pdev,
685 "Uncorrectable bitflips in page %u sect %u\n",
686 page, i);
687 } else if (rc) {
688 snf->sect_bf[i] = rc;
689
690 if (snf->sect_bf[i] > max_bitflips)
691 max_bitflips = snf->sect_bf[i];
692
693 snand_log_ecc(snf->pdev,
694 "%u bitflip%s corrected in page %u sect %u\n",
695 rc, rc > 1 ? "s" : "", page, i);
696 } else {
697 snf->sect_bf[i] = 0;
698 }
699 }
700
701 return ret ? ret : max_bitflips;
702 }
703
704 static int mtk_snand_read_cache(struct mtk_snand *snf, uint32_t page, bool raw)
705 {
706 uint32_t coladdr, rwbytes, mode, len, val;
707 uintptr_t dma_addr;
708 int ret;
709
710 /* Column address with plane bit */
711 coladdr = mtk_snand_get_plane_address(snf, page);
712
713 mtk_snand_mac_reset(snf);
714 mtk_nfi_reset(snf);
715
716 /* Command and dummy cycles */
717 nfi_write32(snf, SNF_RD_CTL2,
718 ((uint32_t)snf->dummy_rfc << DATA_READ_DUMMY_S) |
719 (snf->opcode_rfc << DATA_READ_CMD_S));
720
721 /* Column address */
722 nfi_write32(snf, SNF_RD_CTL3, coladdr);
723
724 /* Set read mode */
725 mode = (uint32_t)snf->mode_rfc << DATA_READ_MODE_S;
726 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, mode | DATARD_CUSTOM_EN);
727
728 /* Set bytes to read */
729 rwbytes = snf->ecc_steps * snf->raw_sector_size;
730 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
731 rwbytes);
732
733 /* NFI read prepare */
734 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
735 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) |
736 CNFG_DMA_BURST_EN | CNFG_READ_MODE | CNFG_DMA_MODE | mode);
737
738 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
739
740 /* Prepare for DMA read */
741 len = snf->writesize + snf->oobsize;
742 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, false);
743 if (ret) {
744 snand_log_nfi(snf->pdev,
745 "DMA map from device failed with %d\n", ret);
746 return ret;
747 }
748
749 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
750
751 if (!raw)
752 mtk_snand_ecc_decoder_start(snf);
753
754 /* Prepare for custom read interrupt */
755 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
756 irq_completion_init(snf->pdev);
757
758 /* Trigger NFI into custom mode */
759 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
760
761 /* Start DMA read */
762 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
763 nfi_write16(snf, NFI_STRDATA, STR_DATA);
764
765 /* Wait for operation finished */
766 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
767 CUS_READ_DONE, SNFI_POLL_INTERVAL);
768 if (ret) {
769 snand_log_nfi(snf->pdev,
770 "DMA timed out for reading from cache\n");
771 goto cleanup;
772 }
773
774 /* Wait for BUS_SEC_CNTR returning expected value */
775 ret = read32_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
776 BUS_SEC_CNTR(val) >= snf->ecc_steps,
777 0, SNFI_POLL_INTERVAL);
778 if (ret) {
779 snand_log_nfi(snf->pdev,
780 "Timed out waiting for BUS_SEC_CNTR\n");
781 goto cleanup;
782 }
783
784 /* Wait for bus becoming idle */
785 ret = read32_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
786 !(val & snf->nfi_soc->mastersta_mask),
787 0, SNFI_POLL_INTERVAL);
788 if (ret) {
789 snand_log_nfi(snf->pdev,
790 "Timed out waiting for bus becoming idle\n");
791 goto cleanup;
792 }
793
794 if (!raw) {
795 ret = mtk_ecc_wait_decoder_done(snf);
796 if (ret)
797 goto cleanup;
798
799 mtk_snand_read_fdm(snf, snf->page_cache + snf->writesize);
800
801 mtk_ecc_check_decode_error(snf);
802 mtk_snand_ecc_decoder_stop(snf);
803
804 ret = mtk_snand_check_ecc_result(snf, page);
805 }
806
807 cleanup:
808 /* DMA cleanup */
809 dma_mem_unmap(snf->pdev, dma_addr, len, false);
810
811 /* Stop read */
812 nfi_write32(snf, NFI_CON, 0);
813 nfi_write16(snf, NFI_CNFG, 0);
814
815 /* Clear SNF done flag */
816 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
817 nfi_write32(snf, SNF_STA_CTL1, 0);
818
819 /* Disable interrupt */
820 nfi_read32(snf, NFI_INTR_STA);
821 nfi_write32(snf, NFI_INTR_EN, 0);
822
823 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
824
825 return ret;
826 }
827
828 static void mtk_snand_from_raw_page(struct mtk_snand *snf, void *buf, void *oob)
829 {
830 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
831 uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
832 uint8_t *bufptr = buf, *oobptr = oob, *raw_sector;
833
834 for (i = 0; i < snf->ecc_steps; i++) {
835 raw_sector = snf->page_cache + i * snf->raw_sector_size;
836
837 if (buf) {
838 memcpy(bufptr, raw_sector, snf->nfi_soc->sector_size);
839 bufptr += snf->nfi_soc->sector_size;
840 }
841
842 raw_sector += snf->nfi_soc->sector_size;
843
844 if (oob) {
845 memcpy(oobptr, raw_sector, snf->nfi_soc->fdm_size);
846 oobptr += snf->nfi_soc->fdm_size;
847 raw_sector += snf->nfi_soc->fdm_size;
848
849 memcpy(eccptr, raw_sector, ecc_bytes);
850 eccptr += ecc_bytes;
851 }
852 }
853 }
854
855 static int mtk_snand_do_read_page(struct mtk_snand *snf, uint64_t addr,
856 void *buf, void *oob, bool raw, bool format)
857 {
858 uint64_t die_addr;
859 uint32_t page;
860 int ret;
861
862 die_addr = mtk_snand_select_die_address(snf, addr);
863 page = die_addr >> snf->writesize_shift;
864
865 ret = mtk_snand_page_op(snf, page, SNAND_CMD_READ_TO_CACHE);
866 if (ret)
867 return ret;
868
869 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
870 if (ret < 0) {
871 snand_log_chip(snf->pdev, "Read to cache command timed out\n");
872 return ret;
873 }
874
875 ret = mtk_snand_read_cache(snf, page, raw);
876 if (ret < 0 && ret != -EBADMSG)
877 return ret;
878
879 if (raw) {
880 if (format) {
881 mtk_snand_bm_swap_raw(snf);
882 mtk_snand_fdm_bm_swap_raw(snf);
883 mtk_snand_from_raw_page(snf, buf, oob);
884 } else {
885 if (buf)
886 memcpy(buf, snf->page_cache, snf->writesize);
887
888 if (oob) {
889 memset(oob, 0xff, snf->oobsize);
890 memcpy(oob, snf->page_cache + snf->writesize,
891 snf->ecc_steps * snf->spare_per_sector);
892 }
893 }
894 } else {
895 mtk_snand_bm_swap(snf);
896 mtk_snand_fdm_bm_swap(snf);
897
898 if (buf)
899 memcpy(buf, snf->page_cache, snf->writesize);
900
901 if (oob) {
902 memset(oob, 0xff, snf->oobsize);
903 memcpy(oob, snf->page_cache + snf->writesize,
904 snf->ecc_steps * snf->nfi_soc->fdm_size);
905 }
906 }
907
908 return ret;
909 }
910
911 int mtk_snand_read_page(struct mtk_snand *snf, uint64_t addr, void *buf,
912 void *oob, bool raw)
913 {
914 if (!snf || (!buf && !oob))
915 return -EINVAL;
916
917 if (addr >= snf->size)
918 return -EINVAL;
919
920 return mtk_snand_do_read_page(snf, addr, buf, oob, raw, true);
921 }
922
923 static void mtk_snand_write_fdm(struct mtk_snand *snf, const uint8_t *buf)
924 {
925 uint32_t vall, valm, fdm_size = snf->nfi_soc->fdm_size;
926 const uint8_t *oobptr = buf;
927 int i, j;
928
929 for (i = 0; i < snf->ecc_steps; i++) {
930 vall = 0;
931 valm = 0;
932
933 for (j = 0; j < 8; j++) {
934 if (j < 4)
935 vall |= (j < fdm_size ? oobptr[j] : 0xff)
936 << (j * 8);
937 else
938 valm |= (j < fdm_size ? oobptr[j] : 0xff)
939 << ((j - 4) * 8);
940 }
941
942 nfi_write32(snf, NFI_FDML(i), vall);
943 nfi_write32(snf, NFI_FDMM(i), valm);
944
945 oobptr += fdm_size;
946 }
947 }
948
949 static int mtk_snand_program_load(struct mtk_snand *snf, uint32_t page,
950 bool raw)
951 {
952 uint32_t coladdr, rwbytes, mode, len, val;
953 uintptr_t dma_addr;
954 int ret;
955
956 /* Column address with plane bit */
957 coladdr = mtk_snand_get_plane_address(snf, page);
958
959 mtk_snand_mac_reset(snf);
960 mtk_nfi_reset(snf);
961
962 /* Write FDM registers if necessary */
963 if (!raw)
964 mtk_snand_write_fdm(snf, snf->page_cache + snf->writesize);
965
966 /* Command */
967 nfi_write32(snf, SNF_PG_CTL1, (snf->opcode_pl << PG_LOAD_CMD_S));
968
969 /* Column address */
970 nfi_write32(snf, SNF_PG_CTL2, coladdr);
971
972 /* Set write mode */
973 mode = snf->mode_pl ? PG_LOAD_X4_EN : 0;
974 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, mode | PG_LOAD_CUSTOM_EN);
975
976 /* Set bytes to write */
977 rwbytes = snf->ecc_steps * snf->raw_sector_size;
978 nfi_write32(snf, SNF_MISC_CTL2, (rwbytes << PROGRAM_LOAD_BYTE_NUM_S) |
979 rwbytes);
980
981 /* NFI write prepare */
982 mode = raw ? 0 : CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
983 nfi_write16(snf, NFI_CNFG, (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) |
984 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | mode);
985
986 nfi_write32(snf, NFI_CON, (snf->ecc_steps << CON_SEC_NUM_S));
987
988 /* Prepare for DMA write */
989 len = snf->writesize + snf->oobsize;
990 ret = dma_mem_map(snf->pdev, snf->page_cache, &dma_addr, len, true);
991 if (ret) {
992 snand_log_nfi(snf->pdev,
993 "DMA map to device failed with %d\n", ret);
994 return ret;
995 }
996
997 nfi_write32(snf, NFI_STRADDR, (uint32_t)dma_addr);
998
999 if (!raw)
1000 mtk_snand_ecc_encoder_start(snf);
1001
1002 /* Prepare for custom write interrupt */
1003 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1004 irq_completion_init(snf->pdev);
1005
1006 /* Trigger NFI into custom mode */
1007 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1008
1009 /* Start DMA write */
1010 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1011 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1012
1013 /* Wait for operation finished */
1014 ret = irq_completion_wait(snf->pdev, snf->nfi_base + SNF_STA_CTL1,
1015 CUS_PG_DONE, SNFI_POLL_INTERVAL);
1016 if (ret) {
1017 snand_log_nfi(snf->pdev,
1018 "DMA timed out for program load\n");
1019 goto cleanup;
1020 }
1021
1022 /* Wait for NFI_SEC_CNTR returning expected value */
1023 ret = read32_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1024 NFI_SEC_CNTR(val) >= snf->ecc_steps,
1025 0, SNFI_POLL_INTERVAL);
1026 if (ret) {
1027 snand_log_nfi(snf->pdev,
1028 "Timed out waiting for NFI_SEC_CNTR\n");
1029 goto cleanup;
1030 }
1031
1032 if (!raw)
1033 mtk_snand_ecc_encoder_stop(snf);
1034
1035 cleanup:
1036 /* DMA cleanup */
1037 dma_mem_unmap(snf->pdev, dma_addr, len, true);
1038
1039 /* Stop write */
1040 nfi_write32(snf, NFI_CON, 0);
1041 nfi_write16(snf, NFI_CNFG, 0);
1042
1043 /* Clear SNF done flag */
1044 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1045 nfi_write32(snf, SNF_STA_CTL1, 0);
1046
1047 /* Disable interrupt */
1048 nfi_read32(snf, NFI_INTR_STA);
1049 nfi_write32(snf, NFI_INTR_EN, 0);
1050
1051 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1052
1053 return ret;
1054 }
1055
1056 static void mtk_snand_to_raw_page(struct mtk_snand *snf,
1057 const void *buf, const void *oob,
1058 bool empty_ecc)
1059 {
1060 uint32_t i, ecc_bytes = snf->spare_per_sector - snf->nfi_soc->fdm_size;
1061 const uint8_t *eccptr = oob + snf->ecc_steps * snf->nfi_soc->fdm_size;
1062 const uint8_t *bufptr = buf, *oobptr = oob;
1063 uint8_t *raw_sector;
1064
1065 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1066 for (i = 0; i < snf->ecc_steps; i++) {
1067 raw_sector = snf->page_cache + i * snf->raw_sector_size;
1068
1069 if (buf) {
1070 memcpy(raw_sector, bufptr, snf->nfi_soc->sector_size);
1071 bufptr += snf->nfi_soc->sector_size;
1072 }
1073
1074 raw_sector += snf->nfi_soc->sector_size;
1075
1076 if (oob) {
1077 memcpy(raw_sector, oobptr, snf->nfi_soc->fdm_size);
1078 oobptr += snf->nfi_soc->fdm_size;
1079 raw_sector += snf->nfi_soc->fdm_size;
1080
1081 if (empty_ecc)
1082 memset(raw_sector, 0xff, ecc_bytes);
1083 else
1084 memcpy(raw_sector, eccptr, ecc_bytes);
1085 eccptr += ecc_bytes;
1086 }
1087 }
1088 }
1089
1090 static bool mtk_snand_is_empty_page(struct mtk_snand *snf, const void *buf,
1091 const void *oob)
1092 {
1093 const uint8_t *p = buf;
1094 uint32_t i, j;
1095
1096 if (buf) {
1097 for (i = 0; i < snf->writesize; i++) {
1098 if (p[i] != 0xff)
1099 return false;
1100 }
1101 }
1102
1103 if (oob) {
1104 for (j = 0; j < snf->ecc_steps; j++) {
1105 p = oob + j * snf->nfi_soc->fdm_size;
1106
1107 for (i = 0; i < snf->nfi_soc->fdm_ecc_size; i++) {
1108 if (p[i] != 0xff)
1109 return false;
1110 }
1111 }
1112 }
1113
1114 return true;
1115 }
1116
1117 static int mtk_snand_do_write_page(struct mtk_snand *snf, uint64_t addr,
1118 const void *buf, const void *oob,
1119 bool raw, bool format)
1120 {
1121 uint64_t die_addr;
1122 bool empty_ecc = false;
1123 uint32_t page;
1124 int ret;
1125
1126 die_addr = mtk_snand_select_die_address(snf, addr);
1127 page = die_addr >> snf->writesize_shift;
1128
1129 if (!raw && mtk_snand_is_empty_page(snf, buf, oob)) {
1130 /*
1131 * If the data in the page to be ecc-ed is full 0xff,
1132 * change to raw write mode
1133 */
1134 raw = true;
1135 format = true;
1136
1137 /* fill ecc parity code region with 0xff */
1138 empty_ecc = true;
1139 }
1140
1141 if (raw) {
1142 if (format) {
1143 mtk_snand_to_raw_page(snf, buf, oob, empty_ecc);
1144 mtk_snand_fdm_bm_swap_raw(snf);
1145 mtk_snand_bm_swap_raw(snf);
1146 } else {
1147 memset(snf->page_cache, 0xff,
1148 snf->writesize + snf->oobsize);
1149
1150 if (buf)
1151 memcpy(snf->page_cache, buf, snf->writesize);
1152
1153 if (oob) {
1154 memcpy(snf->page_cache + snf->writesize, oob,
1155 snf->ecc_steps * snf->spare_per_sector);
1156 }
1157 }
1158 } else {
1159 memset(snf->page_cache, 0xff, snf->writesize + snf->oobsize);
1160 if (buf)
1161 memcpy(snf->page_cache, buf, snf->writesize);
1162
1163 if (oob) {
1164 memcpy(snf->page_cache + snf->writesize, oob,
1165 snf->ecc_steps * snf->nfi_soc->fdm_size);
1166 }
1167
1168 mtk_snand_fdm_bm_swap(snf);
1169 mtk_snand_bm_swap(snf);
1170 }
1171
1172 ret = mtk_snand_write_enable(snf);
1173 if (ret)
1174 return ret;
1175
1176 ret = mtk_snand_program_load(snf, page, raw);
1177 if (ret)
1178 return ret;
1179
1180 ret = mtk_snand_page_op(snf, page, SNAND_CMD_PROGRAM_EXECUTE);
1181 if (ret)
1182 return ret;
1183
1184 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1185 if (ret < 0) {
1186 snand_log_chip(snf->pdev,
1187 "Page program command timed out on page %u\n",
1188 page);
1189 return ret;
1190 }
1191
1192 if (ret & SNAND_STATUS_PROGRAM_FAIL) {
1193 snand_log_chip(snf->pdev,
1194 "Page program failed on page %u\n", page);
1195 return -EIO;
1196 }
1197
1198 return 0;
1199 }
1200
1201 int mtk_snand_write_page(struct mtk_snand *snf, uint64_t addr, const void *buf,
1202 const void *oob, bool raw)
1203 {
1204 if (!snf || (!buf && !oob))
1205 return -EINVAL;
1206
1207 if (addr >= snf->size)
1208 return -EINVAL;
1209
1210 return mtk_snand_do_write_page(snf, addr, buf, oob, raw, true);
1211 }
1212
1213 int mtk_snand_erase_block(struct mtk_snand *snf, uint64_t addr)
1214 {
1215 uint64_t die_addr;
1216 uint32_t page, block;
1217 int ret;
1218
1219 if (!snf)
1220 return -EINVAL;
1221
1222 if (addr >= snf->size)
1223 return -EINVAL;
1224
1225 die_addr = mtk_snand_select_die_address(snf, addr);
1226 block = die_addr >> snf->erasesize_shift;
1227 page = block << (snf->erasesize_shift - snf->writesize_shift);
1228
1229 ret = mtk_snand_write_enable(snf);
1230 if (ret)
1231 return ret;
1232
1233 ret = mtk_snand_page_op(snf, page, SNAND_CMD_BLOCK_ERASE);
1234 if (ret)
1235 return ret;
1236
1237 ret = mtk_snand_poll_status(snf, SNFI_POLL_INTERVAL);
1238 if (ret < 0) {
1239 snand_log_chip(snf->pdev,
1240 "Block erase command timed out on block %u\n",
1241 block);
1242 return ret;
1243 }
1244
1245 if (ret & SNAND_STATUS_ERASE_FAIL) {
1246 snand_log_chip(snf->pdev,
1247 "Block erase failed on block %u\n", block);
1248 return -EIO;
1249 }
1250
1251 return 0;
1252 }
1253
1254 static int mtk_snand_block_isbad_std(struct mtk_snand *snf, uint64_t addr)
1255 {
1256 int ret;
1257
1258 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1259 false);
1260 if (ret && ret != -EBADMSG)
1261 return ret;
1262
1263 return snf->buf_cache[0] != 0xff;
1264 }
1265
1266 static int mtk_snand_block_isbad_mtk(struct mtk_snand *snf, uint64_t addr)
1267 {
1268 int ret;
1269
1270 ret = mtk_snand_do_read_page(snf, addr, NULL, snf->buf_cache, true,
1271 true);
1272 if (ret && ret != -EBADMSG)
1273 return ret;
1274
1275 return snf->buf_cache[0] != 0xff;
1276 }
1277
1278 int mtk_snand_block_isbad(struct mtk_snand *snf, uint64_t addr)
1279 {
1280 if (!snf)
1281 return -EINVAL;
1282
1283 if (addr >= snf->size)
1284 return -EINVAL;
1285
1286 addr &= ~snf->erasesize_mask;
1287
1288 if (snf->nfi_soc->bbm_swap)
1289 return mtk_snand_block_isbad_std(snf, addr);
1290
1291 return mtk_snand_block_isbad_mtk(snf, addr);
1292 }
1293
1294 static int mtk_snand_block_markbad_std(struct mtk_snand *snf, uint64_t addr)
1295 {
1296 /* Standard BBM position */
1297 memset(snf->buf_cache, 0xff, snf->oobsize);
1298 snf->buf_cache[0] = 0;
1299
1300 return mtk_snand_do_write_page(snf, addr, NULL, snf->buf_cache, true,
1301 false);
1302 }
1303
1304 static int mtk_snand_block_markbad_mtk(struct mtk_snand *snf, uint64_t addr)
1305 {
1306 /* Write the whole page with zeros */
1307 memset(snf->buf_cache, 0, snf->writesize + snf->oobsize);
1308
1309 return mtk_snand_do_write_page(snf, addr, snf->buf_cache,
1310 snf->buf_cache + snf->writesize, true,
1311 true);
1312 }
1313
1314 int mtk_snand_block_markbad(struct mtk_snand *snf, uint64_t addr)
1315 {
1316 if (!snf)
1317 return -EINVAL;
1318
1319 if (addr >= snf->size)
1320 return -EINVAL;
1321
1322 addr &= ~snf->erasesize_mask;
1323
1324 if (snf->nfi_soc->bbm_swap)
1325 return mtk_snand_block_markbad_std(snf, addr);
1326
1327 return mtk_snand_block_markbad_mtk(snf, addr);
1328 }
1329
1330 int mtk_snand_fill_oob(struct mtk_snand *snf, uint8_t *oobraw,
1331 const uint8_t *oobbuf, size_t ooblen)
1332 {
1333 size_t len = ooblen, sect_fdm_len;
1334 const uint8_t *oob = oobbuf;
1335 uint32_t step = 0;
1336
1337 if (!snf || !oobraw || !oob)
1338 return -EINVAL;
1339
1340 while (len && step < snf->ecc_steps) {
1341 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1342 if (sect_fdm_len > len)
1343 sect_fdm_len = len;
1344
1345 memcpy(oobraw + step * snf->nfi_soc->fdm_size + 1, oob,
1346 sect_fdm_len);
1347
1348 len -= sect_fdm_len;
1349 oob += sect_fdm_len;
1350 step++;
1351 }
1352
1353 return len;
1354 }
1355
1356 int mtk_snand_transfer_oob(struct mtk_snand *snf, uint8_t *oobbuf,
1357 size_t ooblen, const uint8_t *oobraw)
1358 {
1359 size_t len = ooblen, sect_fdm_len;
1360 uint8_t *oob = oobbuf;
1361 uint32_t step = 0;
1362
1363 if (!snf || !oobraw || !oob)
1364 return -EINVAL;
1365
1366 while (len && step < snf->ecc_steps) {
1367 sect_fdm_len = snf->nfi_soc->fdm_size - 1;
1368 if (sect_fdm_len > len)
1369 sect_fdm_len = len;
1370
1371 memcpy(oob, oobraw + step * snf->nfi_soc->fdm_size + 1,
1372 sect_fdm_len);
1373
1374 len -= sect_fdm_len;
1375 oob += sect_fdm_len;
1376 step++;
1377 }
1378
1379 return len;
1380 }
1381
1382 int mtk_snand_read_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1383 void *buf, void *oob, size_t ooblen,
1384 size_t *actualooblen, bool raw)
1385 {
1386 int ret, oobremain;
1387
1388 if (!snf)
1389 return -EINVAL;
1390
1391 if (!oob)
1392 return mtk_snand_read_page(snf, addr, buf, NULL, raw);
1393
1394 ret = mtk_snand_read_page(snf, addr, buf, snf->buf_cache, raw);
1395 if (ret && ret != -EBADMSG) {
1396 if (actualooblen)
1397 *actualooblen = 0;
1398 return ret;
1399 }
1400
1401 oobremain = mtk_snand_transfer_oob(snf, oob, ooblen, snf->buf_cache);
1402 if (actualooblen)
1403 *actualooblen = ooblen - oobremain;
1404
1405 return ret;
1406 }
1407
1408 int mtk_snand_write_page_auto_oob(struct mtk_snand *snf, uint64_t addr,
1409 const void *buf, const void *oob,
1410 size_t ooblen, size_t *actualooblen, bool raw)
1411 {
1412 int oobremain;
1413
1414 if (!snf)
1415 return -EINVAL;
1416
1417 if (!oob)
1418 return mtk_snand_write_page(snf, addr, buf, NULL, raw);
1419
1420 memset(snf->buf_cache, 0xff, snf->oobsize);
1421 oobremain = mtk_snand_fill_oob(snf, snf->buf_cache, oob, ooblen);
1422 if (actualooblen)
1423 *actualooblen = ooblen - oobremain;
1424
1425 return mtk_snand_write_page(snf, addr, buf, snf->buf_cache, raw);
1426 }
1427
1428 int mtk_snand_get_chip_info(struct mtk_snand *snf,
1429 struct mtk_snand_chip_info *info)
1430 {
1431 if (!snf || !info)
1432 return -EINVAL;
1433
1434 info->model = snf->model;
1435 info->chipsize = snf->size;
1436 info->blocksize = snf->erasesize;
1437 info->pagesize = snf->writesize;
1438 info->sparesize = snf->oobsize;
1439 info->spare_per_sector = snf->spare_per_sector;
1440 info->fdm_size = snf->nfi_soc->fdm_size;
1441 info->fdm_ecc_size = snf->nfi_soc->fdm_ecc_size;
1442 info->num_sectors = snf->ecc_steps;
1443 info->sector_size = snf->nfi_soc->sector_size;
1444 info->ecc_strength = snf->ecc_strength;
1445 info->ecc_bytes = snf->ecc_bytes;
1446
1447 return 0;
1448 }
1449
1450 int mtk_snand_irq_process(struct mtk_snand *snf)
1451 {
1452 uint32_t sta, ien;
1453
1454 if (!snf)
1455 return -EINVAL;
1456
1457 sta = nfi_read32(snf, NFI_INTR_STA);
1458 ien = nfi_read32(snf, NFI_INTR_EN);
1459
1460 if (!(sta & ien))
1461 return 0;
1462
1463 nfi_write32(snf, NFI_INTR_EN, 0);
1464 irq_completion_done(snf->pdev);
1465
1466 return 1;
1467 }
1468
1469 static int mtk_snand_select_spare_per_sector(struct mtk_snand *snf)
1470 {
1471 uint32_t spare_per_step = snf->oobsize / snf->ecc_steps;
1472 int i, mul = 1;
1473
1474 /*
1475 * If we're using the 1KB sector size, HW will automatically
1476 * double the spare size. So we should only use half of the value.
1477 */
1478 if (snf->nfi_soc->sector_size == 1024)
1479 mul = 2;
1480
1481 spare_per_step /= mul;
1482
1483 for (i = snf->nfi_soc->num_spare_size - 1; i >= 0; i--) {
1484 if (snf->nfi_soc->spare_sizes[i] <= spare_per_step) {
1485 snf->spare_per_sector = snf->nfi_soc->spare_sizes[i];
1486 snf->spare_per_sector *= mul;
1487 return i;
1488 }
1489 }
1490
1491 snand_log_nfi(snf->pdev,
1492 "Page size %u+%u is not supported\n", snf->writesize,
1493 snf->oobsize);
1494
1495 return -1;
1496 }
1497
1498 static int mtk_snand_pagefmt_setup(struct mtk_snand *snf)
1499 {
1500 uint32_t spare_size_idx, spare_size_shift, pagesize_idx;
1501 uint32_t sector_size_512;
1502
1503 if (snf->nfi_soc->sector_size == 512) {
1504 sector_size_512 = NFI_SEC_SEL_512;
1505 spare_size_shift = NFI_SPARE_SIZE_S;
1506 } else {
1507 sector_size_512 = 0;
1508 spare_size_shift = NFI_SPARE_SIZE_LS_S;
1509 }
1510
1511 switch (snf->writesize) {
1512 case SZ_512:
1513 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1514 break;
1515 case SZ_2K:
1516 if (snf->nfi_soc->sector_size == 512)
1517 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1518 else
1519 pagesize_idx = NFI_PAGE_SIZE_512_2K;
1520 break;
1521 case SZ_4K:
1522 if (snf->nfi_soc->sector_size == 512)
1523 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1524 else
1525 pagesize_idx = NFI_PAGE_SIZE_2K_4K;
1526 break;
1527 case SZ_8K:
1528 if (snf->nfi_soc->sector_size == 512)
1529 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1530 else
1531 pagesize_idx = NFI_PAGE_SIZE_4K_8K;
1532 break;
1533 case SZ_16K:
1534 pagesize_idx = NFI_PAGE_SIZE_8K_16K;
1535 break;
1536 default:
1537 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1538 snf->writesize);
1539 return -ENOTSUPP;
1540 }
1541
1542 spare_size_idx = mtk_snand_select_spare_per_sector(snf);
1543 if (unlikely(spare_size_idx < 0))
1544 return -ENOTSUPP;
1545
1546 snf->raw_sector_size = snf->nfi_soc->sector_size +
1547 snf->spare_per_sector;
1548
1549 /* Setup page format */
1550 nfi_write32(snf, NFI_PAGEFMT,
1551 (snf->nfi_soc->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
1552 (snf->nfi_soc->fdm_size << NFI_FDM_NUM_S) |
1553 (spare_size_idx << spare_size_shift) |
1554 (pagesize_idx << NFI_PAGE_SIZE_S) |
1555 sector_size_512);
1556
1557 return 0;
1558 }
1559
1560 static enum snand_flash_io mtk_snand_select_opcode(struct mtk_snand *snf,
1561 uint32_t snfi_caps, uint8_t *opcode,
1562 uint8_t *dummy,
1563 const struct snand_io_cap *op_cap)
1564 {
1565 uint32_t i, caps;
1566
1567 caps = snfi_caps & op_cap->caps;
1568
1569 i = fls(caps);
1570 if (i > 0) {
1571 *opcode = op_cap->opcodes[i - 1].opcode;
1572 if (dummy)
1573 *dummy = op_cap->opcodes[i - 1].dummy;
1574 return i - 1;
1575 }
1576
1577 return __SNAND_IO_MAX;
1578 }
1579
1580 static int mtk_snand_select_opcode_rfc(struct mtk_snand *snf,
1581 uint32_t snfi_caps,
1582 const struct snand_io_cap *op_cap)
1583 {
1584 enum snand_flash_io idx;
1585
1586 static const uint8_t rfc_modes[__SNAND_IO_MAX] = {
1587 [SNAND_IO_1_1_1] = DATA_READ_MODE_X1,
1588 [SNAND_IO_1_1_2] = DATA_READ_MODE_X2,
1589 [SNAND_IO_1_2_2] = DATA_READ_MODE_DUAL,
1590 [SNAND_IO_1_1_4] = DATA_READ_MODE_X4,
1591 [SNAND_IO_1_4_4] = DATA_READ_MODE_QUAD,
1592 };
1593
1594 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_rfc,
1595 &snf->dummy_rfc, op_cap);
1596 if (idx >= __SNAND_IO_MAX) {
1597 snand_log_snfi(snf->pdev,
1598 "No capable opcode for read from cache\n");
1599 return -ENOTSUPP;
1600 }
1601
1602 snf->mode_rfc = rfc_modes[idx];
1603
1604 if (idx == SNAND_IO_1_1_4 || idx == SNAND_IO_1_4_4)
1605 snf->quad_spi_op = true;
1606
1607 return 0;
1608 }
1609
1610 static int mtk_snand_select_opcode_pl(struct mtk_snand *snf, uint32_t snfi_caps,
1611 const struct snand_io_cap *op_cap)
1612 {
1613 enum snand_flash_io idx;
1614
1615 static const uint8_t pl_modes[__SNAND_IO_MAX] = {
1616 [SNAND_IO_1_1_1] = 0,
1617 [SNAND_IO_1_1_4] = 1,
1618 };
1619
1620 idx = mtk_snand_select_opcode(snf, snfi_caps, &snf->opcode_pl,
1621 NULL, op_cap);
1622 if (idx >= __SNAND_IO_MAX) {
1623 snand_log_snfi(snf->pdev,
1624 "No capable opcode for program load\n");
1625 return -ENOTSUPP;
1626 }
1627
1628 snf->mode_pl = pl_modes[idx];
1629
1630 if (idx == SNAND_IO_1_1_4)
1631 snf->quad_spi_op = true;
1632
1633 return 0;
1634 }
1635
1636 static int mtk_snand_setup(struct mtk_snand *snf,
1637 const struct snand_flash_info *snand_info)
1638 {
1639 const struct snand_mem_org *memorg = &snand_info->memorg;
1640 uint32_t i, msg_size, snfi_caps;
1641 int ret;
1642
1643 /* Calculate flash memory organization */
1644 snf->model = snand_info->model;
1645 snf->writesize = memorg->pagesize;
1646 snf->oobsize = memorg->sparesize;
1647 snf->erasesize = snf->writesize * memorg->pages_per_block;
1648 snf->die_size = (uint64_t)snf->erasesize * memorg->blocks_per_die;
1649 snf->size = snf->die_size * memorg->ndies;
1650 snf->num_dies = memorg->ndies;
1651
1652 snf->writesize_mask = snf->writesize - 1;
1653 snf->erasesize_mask = snf->erasesize - 1;
1654 snf->die_mask = snf->die_size - 1;
1655
1656 snf->writesize_shift = ffs(snf->writesize) - 1;
1657 snf->erasesize_shift = ffs(snf->erasesize) - 1;
1658 snf->die_shift = mtk_snand_ffs64(snf->die_size) - 1;
1659
1660 snf->select_die = snand_info->select_die;
1661
1662 /* Determine opcodes for read from cache/program load */
1663 snfi_caps = SPI_IO_1_1_1 | SPI_IO_1_1_2 | SPI_IO_1_2_2;
1664 if (snf->snfi_quad_spi)
1665 snfi_caps |= SPI_IO_1_1_4 | SPI_IO_1_4_4;
1666
1667 ret = mtk_snand_select_opcode_rfc(snf, snfi_caps, snand_info->cap_rd);
1668 if (ret)
1669 return ret;
1670
1671 ret = mtk_snand_select_opcode_pl(snf, snfi_caps, snand_info->cap_pl);
1672 if (ret)
1673 return ret;
1674
1675 /* ECC and page format */
1676 snf->ecc_steps = snf->writesize / snf->nfi_soc->sector_size;
1677 if (snf->ecc_steps > snf->nfi_soc->max_sectors) {
1678 snand_log_nfi(snf->pdev, "Page size %u is not supported\n",
1679 snf->writesize);
1680 return -ENOTSUPP;
1681 }
1682
1683 ret = mtk_snand_pagefmt_setup(snf);
1684 if (ret)
1685 return ret;
1686
1687 msg_size = snf->nfi_soc->sector_size + snf->nfi_soc->fdm_ecc_size;
1688 ret = mtk_ecc_setup(snf, snf->nfi_base + NFI_FDM0L,
1689 snf->spare_per_sector - snf->nfi_soc->fdm_size,
1690 msg_size);
1691 if (ret)
1692 return ret;
1693
1694 nfi_write16(snf, NFI_CNFG, 0);
1695
1696 /* Tuning options */
1697 nfi_write16(snf, NFI_DEBUG_CON1, WBUF_EN);
1698 nfi_write32(snf, SNF_DLY_CTL3, (40 << SFCK_SAM_DLY_S));
1699
1700 /* Interrupts */
1701 nfi_read32(snf, NFI_INTR_STA);
1702 nfi_write32(snf, NFI_INTR_EN, 0);
1703
1704 /* Clear SNF done flag */
1705 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE | CUS_PG_DONE);
1706 nfi_write32(snf, SNF_STA_CTL1, 0);
1707
1708 /* Initialization on all dies */
1709 for (i = 0; i < snf->num_dies; i++) {
1710 mtk_snand_select_die(snf, i);
1711
1712 /* Disable On-Die ECC engine */
1713 ret = mtk_snand_ondie_ecc_control(snf, false);
1714 if (ret)
1715 return ret;
1716
1717 /* Disable block protection */
1718 mtk_snand_unlock(snf);
1719
1720 /* Enable/disable quad-spi */
1721 mtk_snand_qspi_control(snf, snf->quad_spi_op);
1722 }
1723
1724 mtk_snand_select_die(snf, 0);
1725
1726 return 0;
1727 }
1728
1729 static int mtk_snand_id_probe(struct mtk_snand *snf,
1730 const struct snand_flash_info **snand_info)
1731 {
1732 uint8_t id[4], op[2];
1733 int ret;
1734
1735 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1736 op[0] = SNAND_CMD_READID;
1737 op[1] = 0;
1738 ret = mtk_snand_mac_io(snf, op, 2, id, sizeof(id));
1739 if (ret)
1740 return ret;
1741
1742 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1743 if (*snand_info)
1744 return 0;
1745
1746 /* Read SPI-NAND JEDEC ID, OP + ID */
1747 op[0] = SNAND_CMD_READID;
1748 ret = mtk_snand_mac_io(snf, op, 1, id, sizeof(id));
1749 if (ret)
1750 return ret;
1751
1752 *snand_info = snand_flash_id_lookup(SNAND_ID_DYMMY, id);
1753 if (*snand_info)
1754 return 0;
1755
1756 snand_log_chip(snf->pdev,
1757 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1758 id[0], id[1], id[2], id[3]);
1759
1760 return -EINVAL;
1761 }
1762
1763 int mtk_snand_init(void *dev, const struct mtk_snand_platdata *pdata,
1764 struct mtk_snand **psnf)
1765 {
1766 const struct snand_flash_info *snand_info;
1767 uint32_t rawpage_size, sect_bf_size;
1768 struct mtk_snand tmpsnf, *snf;
1769 int ret;
1770
1771 if (!pdata || !psnf)
1772 return -EINVAL;
1773
1774 if (pdata->soc >= __SNAND_SOC_MAX) {
1775 snand_log_chip(dev, "Invalid SOC %u for MTK-SNAND\n",
1776 pdata->soc);
1777 return -EINVAL;
1778 }
1779
1780 /* Dummy instance only for initial reset and id probe */
1781 tmpsnf.nfi_base = pdata->nfi_base;
1782 tmpsnf.ecc_base = pdata->ecc_base;
1783 tmpsnf.soc = pdata->soc;
1784 tmpsnf.nfi_soc = &mtk_snand_socs[pdata->soc];
1785 tmpsnf.pdev = dev;
1786
1787 /* Switch to SNFI mode */
1788 writel(SPI_MODE, tmpsnf.nfi_base + SNF_CFG);
1789
1790 /* Reset SNFI & NFI */
1791 mtk_snand_mac_reset(&tmpsnf);
1792 mtk_nfi_reset(&tmpsnf);
1793
1794 /* Reset SPI-NAND chip */
1795 ret = mtk_snand_chip_reset(&tmpsnf);
1796 if (ret) {
1797 snand_log_chip(dev, "Failed to reset SPI-NAND chip\n");
1798 return ret;
1799 }
1800
1801 /* Probe SPI-NAND flash by JEDEC ID */
1802 ret = mtk_snand_id_probe(&tmpsnf, &snand_info);
1803 if (ret)
1804 return ret;
1805
1806 rawpage_size = snand_info->memorg.pagesize +
1807 snand_info->memorg.sparesize;
1808
1809 sect_bf_size = mtk_snand_socs[pdata->soc].max_sectors *
1810 sizeof(*snf->sect_bf);
1811
1812 /* Allocate memory for instance and cache */
1813 snf = generic_mem_alloc(dev,
1814 sizeof(*snf) + rawpage_size + sect_bf_size);
1815 if (!snf) {
1816 snand_log_chip(dev, "Failed to allocate memory for instance\n");
1817 return -ENOMEM;
1818 }
1819
1820 snf->sect_bf = (int *)((uintptr_t)snf + sizeof(*snf));
1821 snf->buf_cache = (uint8_t *)((uintptr_t)snf->sect_bf + sect_bf_size);
1822
1823 /* Allocate memory for DMA buffer */
1824 snf->page_cache = dma_mem_alloc(dev, rawpage_size);
1825 if (!snf->page_cache) {
1826 generic_mem_free(dev, snf);
1827 snand_log_chip(dev,
1828 "Failed to allocate memory for DMA buffer\n");
1829 return -ENOMEM;
1830 }
1831
1832 /* Fill up instance */
1833 snf->pdev = dev;
1834 snf->nfi_base = pdata->nfi_base;
1835 snf->ecc_base = pdata->ecc_base;
1836 snf->soc = pdata->soc;
1837 snf->nfi_soc = &mtk_snand_socs[pdata->soc];
1838 snf->snfi_quad_spi = pdata->quad_spi;
1839
1840 /* Initialize SNFI & ECC engine */
1841 ret = mtk_snand_setup(snf, snand_info);
1842 if (ret) {
1843 dma_mem_free(dev, snf->page_cache);
1844 generic_mem_free(dev, snf);
1845 return ret;
1846 }
1847
1848 *psnf = snf;
1849
1850 return 0;
1851 }
1852
1853 int mtk_snand_cleanup(struct mtk_snand *snf)
1854 {
1855 if (!snf)
1856 return 0;
1857
1858 dma_mem_free(snf->pdev, snf->page_cache);
1859 generic_mem_free(snf->pdev, snf);
1860
1861 return 0;
1862 }