1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
8 #include "mtk-snand-def.h"
11 #define NFI_CNFG 0x000
12 #define CNFG_OP_MODE_S 12
13 #define CNFG_OP_MODE_CUST 6
14 #define CNFG_OP_MODE_PROGRAM 3
15 #define CNFG_AUTO_FMT_EN BIT(9)
16 #define CNFG_HW_ECC_EN BIT(8)
17 #define CNFG_DMA_BURST_EN BIT(2)
18 #define CNFG_READ_MODE BIT(1)
19 #define CNFG_DMA_MODE BIT(0)
21 #define NFI_PAGEFMT 0x0004
22 #define NFI_SPARE_SIZE_LS_S 16
23 #define NFI_FDM_ECC_NUM_S 12
24 #define NFI_FDM_NUM_S 8
25 #define NFI_SPARE_SIZE_S 4
26 #define NFI_SEC_SEL_512 BIT(2)
27 #define NFI_PAGE_SIZE_S 0
28 #define NFI_PAGE_SIZE_512_2K 0
29 #define NFI_PAGE_SIZE_2K_4K 1
30 #define NFI_PAGE_SIZE_4K_8K 2
31 #define NFI_PAGE_SIZE_8K_16K 3
34 #define CON_SEC_NUM_S 12
35 #define CON_BWR BIT(9)
36 #define CON_BRD BIT(8)
37 #define CON_NFI_RST BIT(1)
38 #define CON_FIFO_FLUSH BIT(0)
40 #define NFI_INTR_EN 0x010
41 #define NFI_INTR_STA 0x014
42 #define NFI_IRQ_INTR_EN BIT(31)
43 #define NFI_IRQ_CUS_READ BIT(8)
44 #define NFI_IRQ_CUS_PG BIT(7)
48 #define NFI_STRDATA 0x040
49 #define STR_DATA BIT(0)
52 #define NFI_NAND_FSM GENMASK(28, 24)
53 #define NFI_FSM GENMASK(19, 16)
54 #define READ_EMPTY BIT(12)
56 #define NFI_FIFOSTA 0x064
57 #define FIFO_WR_REMAIN_S 8
58 #define FIFO_RD_REMAIN_S 0
60 #define NFI_ADDRCNTR 0x070
61 #define SEC_CNTR GENMASK(16, 12)
63 #define NFI_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
65 #define NFI_STRADDR 0x080
67 #define NFI_BYTELEN 0x084
68 #define BUS_SEC_CNTR(val) (((val) & SEC_CNTR) >> SEC_CNTR_S)
70 #define NFI_FDM0L 0x0a0
71 #define NFI_FDM0M 0x0a4
72 #define NFI_FDML(n) (NFI_FDM0L + (n) * 8)
73 #define NFI_FDMM(n) (NFI_FDM0M + (n) * 8)
75 #define NFI_DEBUG_CON1 0x220
76 #define WBUF_EN BIT(2)
78 #define NFI_MASTERSTA 0x224
79 #define MAS_ADDR GENMASK(11, 9)
80 #define MAS_RD GENMASK(8, 6)
81 #define MAS_WR GENMASK(5, 3)
82 #define MAS_RDDLY GENMASK(2, 0)
83 #define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY)
86 #define SNF_MAC_CTL 0x500
87 #define MAC_XIO_SEL BIT(4)
88 #define SF_MAC_EN BIT(3)
89 #define SF_TRIG BIT(2)
90 #define WIP_READY BIT(1)
93 #define SNF_MAC_OUTL 0x504
94 #define SNF_MAC_INL 0x508
96 #define SNF_RD_CTL2 0x510
97 #define DATA_READ_DUMMY_S 8
98 #define DATA_READ_CMD_S 0
100 #define SNF_RD_CTL3 0x514
102 #define SNF_PG_CTL1 0x524
103 #define PG_LOAD_CMD_S 8
105 #define SNF_PG_CTL2 0x528
107 #define SNF_MISC_CTL 0x538
108 #define SW_RST BIT(28)
109 #define FIFO_RD_LTC_S 25
110 #define PG_LOAD_X4_EN BIT(20)
111 #define DATA_READ_MODE_S 16
112 #define DATA_READ_MODE GENMASK(18, 16)
113 #define DATA_READ_MODE_X1 0
114 #define DATA_READ_MODE_X2 1
115 #define DATA_READ_MODE_X4 2
116 #define DATA_READ_MODE_DUAL 5
117 #define DATA_READ_MODE_QUAD 6
118 #define PG_LOAD_CUSTOM_EN BIT(7)
119 #define DATARD_CUSTOM_EN BIT(6)
120 #define CS_DESELECT_CYC_S 0
122 #define SNF_MISC_CTL2 0x53c
123 #define PROGRAM_LOAD_BYTE_NUM_S 16
124 #define READ_DATA_BYTE_NUM_S 11
126 #define SNF_DLY_CTL3 0x548
127 #define SFCK_SAM_DLY_S 0
129 #define SNF_STA_CTL1 0x550
130 #define CUS_PG_DONE BIT(28)
131 #define CUS_READ_DONE BIT(27)
132 #define SPI_STATE_S 0
133 #define SPI_STATE GENMASK(3, 0)
135 #define SNF_CFG 0x55c
136 #define SPI_MODE BIT(0)
138 #define SNF_GPRAM 0x800
139 #define SNF_GPRAM_SIZE 0xa0
141 #define SNFI_POLL_INTERVAL 1000000
143 static const uint8_t mt7622_spare_sizes
[] = { 16, 26, 27, 28 };
145 static const struct mtk_snand_soc_data mtk_snand_socs
[__SNAND_SOC_MAX
] = {
146 [SNAND_SOC_MT7622
] = {
153 .empty_page_check
= false,
154 .mastersta_mask
= NFI_MASTERSTA_MASK_7622
,
155 .spare_sizes
= mt7622_spare_sizes
,
156 .num_spare_size
= ARRAY_SIZE(mt7622_spare_sizes
)
158 [SNAND_SOC_MT7629
] = {
165 .empty_page_check
= false,
166 .mastersta_mask
= NFI_MASTERSTA_MASK_7622
,
167 .spare_sizes
= mt7622_spare_sizes
,
168 .num_spare_size
= ARRAY_SIZE(mt7622_spare_sizes
)
172 static inline uint32_t nfi_read32(struct mtk_snand
*snf
, uint32_t reg
)
174 return readl(snf
->nfi_base
+ reg
);
177 static inline void nfi_write32(struct mtk_snand
*snf
, uint32_t reg
,
180 writel(val
, snf
->nfi_base
+ reg
);
183 static inline void nfi_write16(struct mtk_snand
*snf
, uint32_t reg
,
186 writew(val
, snf
->nfi_base
+ reg
);
189 static inline void nfi_rmw32(struct mtk_snand
*snf
, uint32_t reg
, uint32_t clr
,
194 val
= readl(snf
->nfi_base
+ reg
);
197 writel(val
, snf
->nfi_base
+ reg
);
200 static void nfi_write_data(struct mtk_snand
*snf
, uint32_t reg
,
201 const uint8_t *data
, uint32_t len
)
203 uint32_t i
, val
= 0, es
= sizeof(uint32_t);
205 for (i
= reg
; i
< reg
+ len
; i
++) {
206 val
|= ((uint32_t)*data
++) << (8 * (i
% es
));
208 if (i
% es
== es
- 1 || i
== reg
+ len
- 1) {
209 nfi_write32(snf
, i
& ~(es
- 1), val
);
215 static void nfi_read_data(struct mtk_snand
*snf
, uint32_t reg
, uint8_t *data
,
218 uint32_t i
, val
= 0, es
= sizeof(uint32_t);
220 for (i
= reg
; i
< reg
+ len
; i
++) {
221 if (i
== reg
|| i
% es
== 0)
222 val
= nfi_read32(snf
, i
& ~(es
- 1));
224 *data
++ = (uint8_t)(val
>> (8 * (i
% es
)));
228 static inline void do_bm_swap(uint8_t *bm1
, uint8_t *bm2
)
235 static void mtk_snand_bm_swap_raw(struct mtk_snand
*snf
)
237 uint32_t fdm_bbm_pos
;
239 if (!snf
->nfi_soc
->bbm_swap
|| snf
->ecc_steps
== 1)
242 fdm_bbm_pos
= (snf
->ecc_steps
- 1) * snf
->raw_sector_size
+
243 snf
->nfi_soc
->sector_size
;
244 do_bm_swap(&snf
->page_cache
[fdm_bbm_pos
],
245 &snf
->page_cache
[snf
->writesize
]);
248 static void mtk_snand_bm_swap(struct mtk_snand
*snf
)
250 uint32_t buf_bbm_pos
, fdm_bbm_pos
;
252 if (!snf
->nfi_soc
->bbm_swap
|| snf
->ecc_steps
== 1)
255 buf_bbm_pos
= snf
->writesize
-
256 (snf
->ecc_steps
- 1) * snf
->spare_per_sector
;
257 fdm_bbm_pos
= snf
->writesize
+
258 (snf
->ecc_steps
- 1) * snf
->nfi_soc
->fdm_size
;
259 do_bm_swap(&snf
->page_cache
[fdm_bbm_pos
],
260 &snf
->page_cache
[buf_bbm_pos
]);
263 static void mtk_snand_fdm_bm_swap_raw(struct mtk_snand
*snf
)
265 uint32_t fdm_bbm_pos1
, fdm_bbm_pos2
;
267 if (!snf
->nfi_soc
->bbm_swap
|| snf
->ecc_steps
== 1)
270 fdm_bbm_pos1
= snf
->nfi_soc
->sector_size
;
271 fdm_bbm_pos2
= (snf
->ecc_steps
- 1) * snf
->raw_sector_size
+
272 snf
->nfi_soc
->sector_size
;
273 do_bm_swap(&snf
->page_cache
[fdm_bbm_pos1
],
274 &snf
->page_cache
[fdm_bbm_pos2
]);
277 static void mtk_snand_fdm_bm_swap(struct mtk_snand
*snf
)
279 uint32_t fdm_bbm_pos1
, fdm_bbm_pos2
;
281 if (!snf
->nfi_soc
->bbm_swap
|| snf
->ecc_steps
== 1)
284 fdm_bbm_pos1
= snf
->writesize
;
285 fdm_bbm_pos2
= snf
->writesize
+
286 (snf
->ecc_steps
- 1) * snf
->nfi_soc
->fdm_size
;
287 do_bm_swap(&snf
->page_cache
[fdm_bbm_pos1
],
288 &snf
->page_cache
[fdm_bbm_pos2
]);
291 static int mtk_nfi_reset(struct mtk_snand
*snf
)
293 uint32_t val
, fifo_mask
;
296 nfi_write32(snf
, NFI_CON
, CON_FIFO_FLUSH
| CON_NFI_RST
);
298 ret
= read16_poll_timeout(snf
->nfi_base
+ NFI_MASTERSTA
, val
,
299 !(val
& snf
->nfi_soc
->mastersta_mask
), 0,
302 snand_log_nfi(snf
->pdev
,
303 "NFI master is still busy after reset\n");
307 ret
= read32_poll_timeout(snf
->nfi_base
+ NFI_STA
, val
,
308 !(val
& (NFI_FSM
| NFI_NAND_FSM
)), 0,
311 snand_log_nfi(snf
->pdev
, "Failed to reset NFI\n");
315 fifo_mask
= ((snf
->nfi_soc
->fifo_size
- 1) << FIFO_RD_REMAIN_S
) |
316 ((snf
->nfi_soc
->fifo_size
- 1) << FIFO_WR_REMAIN_S
);
317 ret
= read16_poll_timeout(snf
->nfi_base
+ NFI_FIFOSTA
, val
,
318 !(val
& fifo_mask
), 0, SNFI_POLL_INTERVAL
);
320 snand_log_nfi(snf
->pdev
, "NFI FIFOs are not empty\n");
327 static int mtk_snand_mac_reset(struct mtk_snand
*snf
)
332 nfi_rmw32(snf
, SNF_MISC_CTL
, 0, SW_RST
);
334 ret
= read32_poll_timeout(snf
->nfi_base
+ SNF_STA_CTL1
, val
,
335 !(val
& SPI_STATE
), 0, SNFI_POLL_INTERVAL
);
337 snand_log_snfi(snf
->pdev
, "Failed to reset SNFI MAC\n");
339 nfi_write32(snf
, SNF_MISC_CTL
, (2 << FIFO_RD_LTC_S
) |
340 (10 << CS_DESELECT_CYC_S
));
345 static int mtk_snand_mac_trigger(struct mtk_snand
*snf
, uint32_t outlen
,
351 nfi_write32(snf
, SNF_MAC_CTL
, SF_MAC_EN
);
352 nfi_write32(snf
, SNF_MAC_OUTL
, outlen
);
353 nfi_write32(snf
, SNF_MAC_INL
, inlen
);
355 nfi_write32(snf
, SNF_MAC_CTL
, SF_MAC_EN
| SF_TRIG
);
357 ret
= read32_poll_timeout(snf
->nfi_base
+ SNF_MAC_CTL
, val
,
358 val
& WIP_READY
, 0, SNFI_POLL_INTERVAL
);
360 snand_log_snfi(snf
->pdev
, "Timed out waiting for WIP_READY\n");
364 ret
= read32_poll_timeout(snf
->nfi_base
+ SNF_MAC_CTL
, val
,
365 !(val
& WIP
), 0, SNFI_POLL_INTERVAL
);
367 snand_log_snfi(snf
->pdev
,
368 "Timed out waiting for WIP cleared\n");
372 nfi_write32(snf
, SNF_MAC_CTL
, 0);
377 int mtk_snand_mac_io(struct mtk_snand
*snf
, const uint8_t *out
, uint32_t outlen
,
378 uint8_t *in
, uint32_t inlen
)
382 if (outlen
+ inlen
> SNF_GPRAM_SIZE
)
385 mtk_snand_mac_reset(snf
);
387 nfi_write_data(snf
, SNF_GPRAM
, out
, outlen
);
389 ret
= mtk_snand_mac_trigger(snf
, outlen
, inlen
);
396 nfi_read_data(snf
, SNF_GPRAM
+ outlen
, in
, inlen
);
401 static int mtk_snand_get_feature(struct mtk_snand
*snf
, uint32_t addr
)
406 op
[0] = SNAND_CMD_GET_FEATURE
;
407 op
[1] = (uint8_t)addr
;
409 ret
= mtk_snand_mac_io(snf
, op
, sizeof(op
), &val
, 1);
416 int mtk_snand_set_feature(struct mtk_snand
*snf
, uint32_t addr
, uint32_t val
)
420 op
[0] = SNAND_CMD_SET_FEATURE
;
421 op
[1] = (uint8_t)addr
;
422 op
[2] = (uint8_t)val
;
424 return mtk_snand_mac_io(snf
, op
, sizeof(op
), NULL
, 0);
427 static int mtk_snand_poll_status(struct mtk_snand
*snf
, uint32_t wait_us
)
430 mtk_snand_time_t time_start
, tmo
;
432 time_start
= timer_get_ticks();
433 tmo
= timer_time_to_tick(wait_us
);
436 val
= mtk_snand_get_feature(snf
, SNAND_FEATURE_STATUS_ADDR
);
437 if (!(val
& SNAND_STATUS_OIP
))
438 return val
& (SNAND_STATUS_ERASE_FAIL
|
439 SNAND_STATUS_PROGRAM_FAIL
);
440 } while (!timer_is_timeout(time_start
, tmo
));
445 int mtk_snand_chip_reset(struct mtk_snand
*snf
)
447 uint8_t op
= SNAND_CMD_RESET
;
450 ret
= mtk_snand_mac_io(snf
, &op
, 1, NULL
, 0);
454 ret
= mtk_snand_poll_status(snf
, SNFI_POLL_INTERVAL
);
461 static int mtk_snand_config_feature(struct mtk_snand
*snf
, uint8_t clr
,
467 val
= mtk_snand_get_feature(snf
, SNAND_FEATURE_CONFIG_ADDR
);
469 snand_log_chip(snf
->pdev
,
470 "Failed to get configuration feature\n");
474 newval
= (val
& (~clr
)) | set
;
479 ret
= mtk_snand_set_feature(snf
, SNAND_FEATURE_CONFIG_ADDR
,
482 snand_log_chip(snf
->pdev
,
483 "Failed to set configuration feature\n");
487 val
= mtk_snand_get_feature(snf
, SNAND_FEATURE_CONFIG_ADDR
);
489 snand_log_chip(snf
->pdev
,
490 "Failed to get configuration feature\n");
500 static int mtk_snand_ondie_ecc_control(struct mtk_snand
*snf
, bool enable
)
505 ret
= mtk_snand_config_feature(snf
, 0, SNAND_FEATURE_ECC_EN
);
507 ret
= mtk_snand_config_feature(snf
, SNAND_FEATURE_ECC_EN
, 0);
510 snand_log_chip(snf
->pdev
, "Failed to %s On-Die ECC engine\n",
511 enable
? "enable" : "disable");
517 static int mtk_snand_qspi_control(struct mtk_snand
*snf
, bool enable
)
522 ret
= mtk_snand_config_feature(snf
, 0,
523 SNAND_FEATURE_QUAD_ENABLE
);
525 ret
= mtk_snand_config_feature(snf
,
526 SNAND_FEATURE_QUAD_ENABLE
, 0);
530 snand_log_chip(snf
->pdev
, "Failed to %s quad spi\n",
531 enable
? "enable" : "disable");
537 static int mtk_snand_unlock(struct mtk_snand
*snf
)
541 ret
= mtk_snand_set_feature(snf
, SNAND_FEATURE_PROTECT_ADDR
, 0);
543 snand_log_chip(snf
->pdev
, "Failed to set protection feature\n");
550 static int mtk_snand_write_enable(struct mtk_snand
*snf
)
552 uint8_t op
= SNAND_CMD_WRITE_ENABLE
;
555 ret
= mtk_snand_mac_io(snf
, &op
, 1, NULL
, 0);
559 val
= mtk_snand_get_feature(snf
, SNAND_FEATURE_STATUS_ADDR
);
563 if (val
& SNAND_STATUS_WEL
)
566 snand_log_chip(snf
->pdev
, "Failed to send write-enable command\n");
571 static int mtk_snand_select_die(struct mtk_snand
*snf
, uint32_t dieidx
)
573 if (!snf
->select_die
)
576 return snf
->select_die(snf
, dieidx
);
579 static uint64_t mtk_snand_select_die_address(struct mtk_snand
*snf
,
584 if (!snf
->select_die
)
587 dieidx
= addr
>> snf
->die_shift
;
589 mtk_snand_select_die(snf
, dieidx
);
591 return addr
& snf
->die_mask
;
594 static uint32_t mtk_snand_get_plane_address(struct mtk_snand
*snf
,
597 uint32_t pages_per_block
;
599 pages_per_block
= 1 << (snf
->erasesize_shift
- snf
->writesize_shift
);
601 if (page
& pages_per_block
)
602 return 1 << (snf
->writesize_shift
+ 1);
607 static int mtk_snand_page_op(struct mtk_snand
*snf
, uint32_t page
, uint8_t cmd
)
612 op
[1] = (page
>> 16) & 0xff;
613 op
[2] = (page
>> 8) & 0xff;
616 return mtk_snand_mac_io(snf
, op
, sizeof(op
), NULL
, 0);
619 static void mtk_snand_read_fdm(struct mtk_snand
*snf
, uint8_t *buf
)
622 uint8_t *oobptr
= buf
;
625 for (i
= 0; i
< snf
->ecc_steps
; i
++) {
626 vall
= nfi_read32(snf
, NFI_FDML(i
));
627 valm
= nfi_read32(snf
, NFI_FDMM(i
));
629 for (j
= 0; j
< snf
->nfi_soc
->fdm_size
; j
++)
630 oobptr
[j
] = (j
>= 4 ? valm
: vall
) >> ((j
% 4) * 8);
632 oobptr
+= snf
->nfi_soc
->fdm_size
;
636 static int mtk_snand_read_ecc_parity(struct mtk_snand
*snf
, uint32_t page
,
637 uint32_t sect
, uint8_t *oob
)
639 uint32_t ecc_bytes
= snf
->spare_per_sector
- snf
->nfi_soc
->fdm_size
;
640 uint32_t coladdr
, raw_offs
, offs
;
643 if (sizeof(op
) + ecc_bytes
> SNF_GPRAM_SIZE
) {
644 snand_log_snfi(snf
->pdev
,
645 "ECC parity size does not fit the GPRAM\n");
649 raw_offs
= sect
* snf
->raw_sector_size
+ snf
->nfi_soc
->sector_size
+
650 snf
->nfi_soc
->fdm_size
;
651 offs
= snf
->ecc_steps
* snf
->nfi_soc
->fdm_size
+ sect
* ecc_bytes
;
653 /* Column address with plane bit */
654 coladdr
= raw_offs
| mtk_snand_get_plane_address(snf
, page
);
656 op
[0] = SNAND_CMD_READ_FROM_CACHE
;
657 op
[1] = (coladdr
>> 8) & 0xff;
658 op
[2] = coladdr
& 0xff;
661 return mtk_snand_mac_io(snf
, op
, sizeof(op
), oob
+ offs
, ecc_bytes
);
664 static int mtk_snand_check_ecc_result(struct mtk_snand
*snf
, uint32_t page
)
666 uint8_t *oob
= snf
->page_cache
+ snf
->writesize
;
667 int i
, rc
, ret
= 0, max_bitflips
= 0;
669 for (i
= 0; i
< snf
->ecc_steps
; i
++) {
670 if (snf
->sect_bf
[i
] >= 0) {
671 if (snf
->sect_bf
[i
] > max_bitflips
)
672 max_bitflips
= snf
->sect_bf
[i
];
676 rc
= mtk_snand_read_ecc_parity(snf
, page
, i
, oob
);
680 rc
= mtk_ecc_fixup_empty_sector(snf
, i
);
684 snand_log_ecc(snf
->pdev
,
685 "Uncorrectable bitflips in page %u sect %u\n",
688 snf
->sect_bf
[i
] = rc
;
690 if (snf
->sect_bf
[i
] > max_bitflips
)
691 max_bitflips
= snf
->sect_bf
[i
];
693 snand_log_ecc(snf
->pdev
,
694 "%u bitflip%s corrected in page %u sect %u\n",
695 rc
, rc
> 1 ? "s" : "", page
, i
);
701 return ret
? ret
: max_bitflips
;
704 static int mtk_snand_read_cache(struct mtk_snand
*snf
, uint32_t page
, bool raw
)
706 uint32_t coladdr
, rwbytes
, mode
, len
, val
;
710 /* Column address with plane bit */
711 coladdr
= mtk_snand_get_plane_address(snf
, page
);
713 mtk_snand_mac_reset(snf
);
716 /* Command and dummy cycles */
717 nfi_write32(snf
, SNF_RD_CTL2
,
718 ((uint32_t)snf
->dummy_rfc
<< DATA_READ_DUMMY_S
) |
719 (snf
->opcode_rfc
<< DATA_READ_CMD_S
));
722 nfi_write32(snf
, SNF_RD_CTL3
, coladdr
);
725 mode
= (uint32_t)snf
->mode_rfc
<< DATA_READ_MODE_S
;
726 nfi_rmw32(snf
, SNF_MISC_CTL
, DATA_READ_MODE
, mode
| DATARD_CUSTOM_EN
);
728 /* Set bytes to read */
729 rwbytes
= snf
->ecc_steps
* snf
->raw_sector_size
;
730 nfi_write32(snf
, SNF_MISC_CTL2
, (rwbytes
<< PROGRAM_LOAD_BYTE_NUM_S
) |
733 /* NFI read prepare */
734 mode
= raw
? 0 : CNFG_HW_ECC_EN
| CNFG_AUTO_FMT_EN
;
735 nfi_write16(snf
, NFI_CNFG
, (CNFG_OP_MODE_CUST
<< CNFG_OP_MODE_S
) |
736 CNFG_DMA_BURST_EN
| CNFG_READ_MODE
| CNFG_DMA_MODE
| mode
);
738 nfi_write32(snf
, NFI_CON
, (snf
->ecc_steps
<< CON_SEC_NUM_S
));
740 /* Prepare for DMA read */
741 len
= snf
->writesize
+ snf
->oobsize
;
742 ret
= dma_mem_map(snf
->pdev
, snf
->page_cache
, &dma_addr
, len
, false);
744 snand_log_nfi(snf
->pdev
,
745 "DMA map from device failed with %d\n", ret
);
749 nfi_write32(snf
, NFI_STRADDR
, (uint32_t)dma_addr
);
752 mtk_snand_ecc_decoder_start(snf
);
754 /* Prepare for custom read interrupt */
755 nfi_write32(snf
, NFI_INTR_EN
, NFI_IRQ_INTR_EN
| NFI_IRQ_CUS_READ
);
756 irq_completion_init(snf
->pdev
);
758 /* Trigger NFI into custom mode */
759 nfi_write16(snf
, NFI_CMD
, NFI_CMD_DUMMY_READ
);
762 nfi_rmw32(snf
, NFI_CON
, 0, CON_BRD
);
763 nfi_write16(snf
, NFI_STRDATA
, STR_DATA
);
765 /* Wait for operation finished */
766 ret
= irq_completion_wait(snf
->pdev
, snf
->nfi_base
+ SNF_STA_CTL1
,
767 CUS_READ_DONE
, SNFI_POLL_INTERVAL
);
769 snand_log_nfi(snf
->pdev
,
770 "DMA timed out for reading from cache\n");
774 /* Wait for BUS_SEC_CNTR returning expected value */
775 ret
= read32_poll_timeout(snf
->nfi_base
+ NFI_BYTELEN
, val
,
776 BUS_SEC_CNTR(val
) >= snf
->ecc_steps
,
777 0, SNFI_POLL_INTERVAL
);
779 snand_log_nfi(snf
->pdev
,
780 "Timed out waiting for BUS_SEC_CNTR\n");
784 /* Wait for bus becoming idle */
785 ret
= read32_poll_timeout(snf
->nfi_base
+ NFI_MASTERSTA
, val
,
786 !(val
& snf
->nfi_soc
->mastersta_mask
),
787 0, SNFI_POLL_INTERVAL
);
789 snand_log_nfi(snf
->pdev
,
790 "Timed out waiting for bus becoming idle\n");
795 ret
= mtk_ecc_wait_decoder_done(snf
);
799 mtk_snand_read_fdm(snf
, snf
->page_cache
+ snf
->writesize
);
801 mtk_ecc_check_decode_error(snf
);
802 mtk_snand_ecc_decoder_stop(snf
);
804 ret
= mtk_snand_check_ecc_result(snf
, page
);
809 dma_mem_unmap(snf
->pdev
, dma_addr
, len
, false);
812 nfi_write32(snf
, NFI_CON
, 0);
813 nfi_write16(snf
, NFI_CNFG
, 0);
815 /* Clear SNF done flag */
816 nfi_rmw32(snf
, SNF_STA_CTL1
, 0, CUS_READ_DONE
);
817 nfi_write32(snf
, SNF_STA_CTL1
, 0);
819 /* Disable interrupt */
820 nfi_read32(snf
, NFI_INTR_STA
);
821 nfi_write32(snf
, NFI_INTR_EN
, 0);
823 nfi_rmw32(snf
, SNF_MISC_CTL
, DATARD_CUSTOM_EN
, 0);
828 static void mtk_snand_from_raw_page(struct mtk_snand
*snf
, void *buf
, void *oob
)
830 uint32_t i
, ecc_bytes
= snf
->spare_per_sector
- snf
->nfi_soc
->fdm_size
;
831 uint8_t *eccptr
= oob
+ snf
->ecc_steps
* snf
->nfi_soc
->fdm_size
;
832 uint8_t *bufptr
= buf
, *oobptr
= oob
, *raw_sector
;
834 for (i
= 0; i
< snf
->ecc_steps
; i
++) {
835 raw_sector
= snf
->page_cache
+ i
* snf
->raw_sector_size
;
838 memcpy(bufptr
, raw_sector
, snf
->nfi_soc
->sector_size
);
839 bufptr
+= snf
->nfi_soc
->sector_size
;
842 raw_sector
+= snf
->nfi_soc
->sector_size
;
845 memcpy(oobptr
, raw_sector
, snf
->nfi_soc
->fdm_size
);
846 oobptr
+= snf
->nfi_soc
->fdm_size
;
847 raw_sector
+= snf
->nfi_soc
->fdm_size
;
849 memcpy(eccptr
, raw_sector
, ecc_bytes
);
855 static int mtk_snand_do_read_page(struct mtk_snand
*snf
, uint64_t addr
,
856 void *buf
, void *oob
, bool raw
, bool format
)
862 die_addr
= mtk_snand_select_die_address(snf
, addr
);
863 page
= die_addr
>> snf
->writesize_shift
;
865 ret
= mtk_snand_page_op(snf
, page
, SNAND_CMD_READ_TO_CACHE
);
869 ret
= mtk_snand_poll_status(snf
, SNFI_POLL_INTERVAL
);
871 snand_log_chip(snf
->pdev
, "Read to cache command timed out\n");
875 ret
= mtk_snand_read_cache(snf
, page
, raw
);
876 if (ret
< 0 && ret
!= -EBADMSG
)
881 mtk_snand_bm_swap_raw(snf
);
882 mtk_snand_fdm_bm_swap_raw(snf
);
883 mtk_snand_from_raw_page(snf
, buf
, oob
);
886 memcpy(buf
, snf
->page_cache
, snf
->writesize
);
889 memset(oob
, 0xff, snf
->oobsize
);
890 memcpy(oob
, snf
->page_cache
+ snf
->writesize
,
891 snf
->ecc_steps
* snf
->spare_per_sector
);
895 mtk_snand_bm_swap(snf
);
896 mtk_snand_fdm_bm_swap(snf
);
899 memcpy(buf
, snf
->page_cache
, snf
->writesize
);
902 memset(oob
, 0xff, snf
->oobsize
);
903 memcpy(oob
, snf
->page_cache
+ snf
->writesize
,
904 snf
->ecc_steps
* snf
->nfi_soc
->fdm_size
);
911 int mtk_snand_read_page(struct mtk_snand
*snf
, uint64_t addr
, void *buf
,
914 if (!snf
|| (!buf
&& !oob
))
917 if (addr
>= snf
->size
)
920 return mtk_snand_do_read_page(snf
, addr
, buf
, oob
, raw
, true);
923 static void mtk_snand_write_fdm(struct mtk_snand
*snf
, const uint8_t *buf
)
925 uint32_t vall
, valm
, fdm_size
= snf
->nfi_soc
->fdm_size
;
926 const uint8_t *oobptr
= buf
;
929 for (i
= 0; i
< snf
->ecc_steps
; i
++) {
933 for (j
= 0; j
< 8; j
++) {
935 vall
|= (j
< fdm_size
? oobptr
[j
] : 0xff)
938 valm
|= (j
< fdm_size
? oobptr
[j
] : 0xff)
942 nfi_write32(snf
, NFI_FDML(i
), vall
);
943 nfi_write32(snf
, NFI_FDMM(i
), valm
);
949 static int mtk_snand_program_load(struct mtk_snand
*snf
, uint32_t page
,
952 uint32_t coladdr
, rwbytes
, mode
, len
, val
;
956 /* Column address with plane bit */
957 coladdr
= mtk_snand_get_plane_address(snf
, page
);
959 mtk_snand_mac_reset(snf
);
962 /* Write FDM registers if necessary */
964 mtk_snand_write_fdm(snf
, snf
->page_cache
+ snf
->writesize
);
967 nfi_write32(snf
, SNF_PG_CTL1
, (snf
->opcode_pl
<< PG_LOAD_CMD_S
));
970 nfi_write32(snf
, SNF_PG_CTL2
, coladdr
);
973 mode
= snf
->mode_pl
? PG_LOAD_X4_EN
: 0;
974 nfi_rmw32(snf
, SNF_MISC_CTL
, PG_LOAD_X4_EN
, mode
| PG_LOAD_CUSTOM_EN
);
976 /* Set bytes to write */
977 rwbytes
= snf
->ecc_steps
* snf
->raw_sector_size
;
978 nfi_write32(snf
, SNF_MISC_CTL2
, (rwbytes
<< PROGRAM_LOAD_BYTE_NUM_S
) |
981 /* NFI write prepare */
982 mode
= raw
? 0 : CNFG_HW_ECC_EN
| CNFG_AUTO_FMT_EN
;
983 nfi_write16(snf
, NFI_CNFG
, (CNFG_OP_MODE_PROGRAM
<< CNFG_OP_MODE_S
) |
984 CNFG_DMA_BURST_EN
| CNFG_DMA_MODE
| mode
);
986 nfi_write32(snf
, NFI_CON
, (snf
->ecc_steps
<< CON_SEC_NUM_S
));
988 /* Prepare for DMA write */
989 len
= snf
->writesize
+ snf
->oobsize
;
990 ret
= dma_mem_map(snf
->pdev
, snf
->page_cache
, &dma_addr
, len
, true);
992 snand_log_nfi(snf
->pdev
,
993 "DMA map to device failed with %d\n", ret
);
997 nfi_write32(snf
, NFI_STRADDR
, (uint32_t)dma_addr
);
1000 mtk_snand_ecc_encoder_start(snf
);
1002 /* Prepare for custom write interrupt */
1003 nfi_write32(snf
, NFI_INTR_EN
, NFI_IRQ_INTR_EN
| NFI_IRQ_CUS_PG
);
1004 irq_completion_init(snf
->pdev
);
1006 /* Trigger NFI into custom mode */
1007 nfi_write16(snf
, NFI_CMD
, NFI_CMD_DUMMY_WRITE
);
1009 /* Start DMA write */
1010 nfi_rmw32(snf
, NFI_CON
, 0, CON_BWR
);
1011 nfi_write16(snf
, NFI_STRDATA
, STR_DATA
);
1013 /* Wait for operation finished */
1014 ret
= irq_completion_wait(snf
->pdev
, snf
->nfi_base
+ SNF_STA_CTL1
,
1015 CUS_PG_DONE
, SNFI_POLL_INTERVAL
);
1017 snand_log_nfi(snf
->pdev
,
1018 "DMA timed out for program load\n");
1022 /* Wait for NFI_SEC_CNTR returning expected value */
1023 ret
= read32_poll_timeout(snf
->nfi_base
+ NFI_ADDRCNTR
, val
,
1024 NFI_SEC_CNTR(val
) >= snf
->ecc_steps
,
1025 0, SNFI_POLL_INTERVAL
);
1027 snand_log_nfi(snf
->pdev
,
1028 "Timed out waiting for NFI_SEC_CNTR\n");
1033 mtk_snand_ecc_encoder_stop(snf
);
1037 dma_mem_unmap(snf
->pdev
, dma_addr
, len
, true);
1040 nfi_write32(snf
, NFI_CON
, 0);
1041 nfi_write16(snf
, NFI_CNFG
, 0);
1043 /* Clear SNF done flag */
1044 nfi_rmw32(snf
, SNF_STA_CTL1
, 0, CUS_PG_DONE
);
1045 nfi_write32(snf
, SNF_STA_CTL1
, 0);
1047 /* Disable interrupt */
1048 nfi_read32(snf
, NFI_INTR_STA
);
1049 nfi_write32(snf
, NFI_INTR_EN
, 0);
1051 nfi_rmw32(snf
, SNF_MISC_CTL
, PG_LOAD_CUSTOM_EN
, 0);
1056 static void mtk_snand_to_raw_page(struct mtk_snand
*snf
,
1057 const void *buf
, const void *oob
,
1060 uint32_t i
, ecc_bytes
= snf
->spare_per_sector
- snf
->nfi_soc
->fdm_size
;
1061 const uint8_t *eccptr
= oob
+ snf
->ecc_steps
* snf
->nfi_soc
->fdm_size
;
1062 const uint8_t *bufptr
= buf
, *oobptr
= oob
;
1063 uint8_t *raw_sector
;
1065 memset(snf
->page_cache
, 0xff, snf
->writesize
+ snf
->oobsize
);
1066 for (i
= 0; i
< snf
->ecc_steps
; i
++) {
1067 raw_sector
= snf
->page_cache
+ i
* snf
->raw_sector_size
;
1070 memcpy(raw_sector
, bufptr
, snf
->nfi_soc
->sector_size
);
1071 bufptr
+= snf
->nfi_soc
->sector_size
;
1074 raw_sector
+= snf
->nfi_soc
->sector_size
;
1077 memcpy(raw_sector
, oobptr
, snf
->nfi_soc
->fdm_size
);
1078 oobptr
+= snf
->nfi_soc
->fdm_size
;
1079 raw_sector
+= snf
->nfi_soc
->fdm_size
;
1082 memset(raw_sector
, 0xff, ecc_bytes
);
1084 memcpy(raw_sector
, eccptr
, ecc_bytes
);
1085 eccptr
+= ecc_bytes
;
1090 static bool mtk_snand_is_empty_page(struct mtk_snand
*snf
, const void *buf
,
1093 const uint8_t *p
= buf
;
1097 for (i
= 0; i
< snf
->writesize
; i
++) {
1104 for (j
= 0; j
< snf
->ecc_steps
; j
++) {
1105 p
= oob
+ j
* snf
->nfi_soc
->fdm_size
;
1107 for (i
= 0; i
< snf
->nfi_soc
->fdm_ecc_size
; i
++) {
1117 static int mtk_snand_do_write_page(struct mtk_snand
*snf
, uint64_t addr
,
1118 const void *buf
, const void *oob
,
1119 bool raw
, bool format
)
1122 bool empty_ecc
= false;
1126 die_addr
= mtk_snand_select_die_address(snf
, addr
);
1127 page
= die_addr
>> snf
->writesize_shift
;
1129 if (!raw
&& mtk_snand_is_empty_page(snf
, buf
, oob
)) {
1131 * If the data in the page to be ecc-ed is full 0xff,
1132 * change to raw write mode
1137 /* fill ecc parity code region with 0xff */
1143 mtk_snand_to_raw_page(snf
, buf
, oob
, empty_ecc
);
1144 mtk_snand_fdm_bm_swap_raw(snf
);
1145 mtk_snand_bm_swap_raw(snf
);
1147 memset(snf
->page_cache
, 0xff,
1148 snf
->writesize
+ snf
->oobsize
);
1151 memcpy(snf
->page_cache
, buf
, snf
->writesize
);
1154 memcpy(snf
->page_cache
+ snf
->writesize
, oob
,
1155 snf
->ecc_steps
* snf
->spare_per_sector
);
1159 memset(snf
->page_cache
, 0xff, snf
->writesize
+ snf
->oobsize
);
1161 memcpy(snf
->page_cache
, buf
, snf
->writesize
);
1164 memcpy(snf
->page_cache
+ snf
->writesize
, oob
,
1165 snf
->ecc_steps
* snf
->nfi_soc
->fdm_size
);
1168 mtk_snand_fdm_bm_swap(snf
);
1169 mtk_snand_bm_swap(snf
);
1172 ret
= mtk_snand_write_enable(snf
);
1176 ret
= mtk_snand_program_load(snf
, page
, raw
);
1180 ret
= mtk_snand_page_op(snf
, page
, SNAND_CMD_PROGRAM_EXECUTE
);
1184 ret
= mtk_snand_poll_status(snf
, SNFI_POLL_INTERVAL
);
1186 snand_log_chip(snf
->pdev
,
1187 "Page program command timed out on page %u\n",
1192 if (ret
& SNAND_STATUS_PROGRAM_FAIL
) {
1193 snand_log_chip(snf
->pdev
,
1194 "Page program failed on page %u\n", page
);
1201 int mtk_snand_write_page(struct mtk_snand
*snf
, uint64_t addr
, const void *buf
,
1202 const void *oob
, bool raw
)
1204 if (!snf
|| (!buf
&& !oob
))
1207 if (addr
>= snf
->size
)
1210 return mtk_snand_do_write_page(snf
, addr
, buf
, oob
, raw
, true);
1213 int mtk_snand_erase_block(struct mtk_snand
*snf
, uint64_t addr
)
1216 uint32_t page
, block
;
1222 if (addr
>= snf
->size
)
1225 die_addr
= mtk_snand_select_die_address(snf
, addr
);
1226 block
= die_addr
>> snf
->erasesize_shift
;
1227 page
= block
<< (snf
->erasesize_shift
- snf
->writesize_shift
);
1229 ret
= mtk_snand_write_enable(snf
);
1233 ret
= mtk_snand_page_op(snf
, page
, SNAND_CMD_BLOCK_ERASE
);
1237 ret
= mtk_snand_poll_status(snf
, SNFI_POLL_INTERVAL
);
1239 snand_log_chip(snf
->pdev
,
1240 "Block erase command timed out on block %u\n",
1245 if (ret
& SNAND_STATUS_ERASE_FAIL
) {
1246 snand_log_chip(snf
->pdev
,
1247 "Block erase failed on block %u\n", block
);
1254 static int mtk_snand_block_isbad_std(struct mtk_snand
*snf
, uint64_t addr
)
1258 ret
= mtk_snand_do_read_page(snf
, addr
, NULL
, snf
->buf_cache
, true,
1260 if (ret
&& ret
!= -EBADMSG
)
1263 return snf
->buf_cache
[0] != 0xff;
1266 static int mtk_snand_block_isbad_mtk(struct mtk_snand
*snf
, uint64_t addr
)
1270 ret
= mtk_snand_do_read_page(snf
, addr
, NULL
, snf
->buf_cache
, true,
1272 if (ret
&& ret
!= -EBADMSG
)
1275 return snf
->buf_cache
[0] != 0xff;
1278 int mtk_snand_block_isbad(struct mtk_snand
*snf
, uint64_t addr
)
1283 if (addr
>= snf
->size
)
1286 addr
&= ~snf
->erasesize_mask
;
1288 if (snf
->nfi_soc
->bbm_swap
)
1289 return mtk_snand_block_isbad_std(snf
, addr
);
1291 return mtk_snand_block_isbad_mtk(snf
, addr
);
1294 static int mtk_snand_block_markbad_std(struct mtk_snand
*snf
, uint64_t addr
)
1296 /* Standard BBM position */
1297 memset(snf
->buf_cache
, 0xff, snf
->oobsize
);
1298 snf
->buf_cache
[0] = 0;
1300 return mtk_snand_do_write_page(snf
, addr
, NULL
, snf
->buf_cache
, true,
1304 static int mtk_snand_block_markbad_mtk(struct mtk_snand
*snf
, uint64_t addr
)
1306 /* Write the whole page with zeros */
1307 memset(snf
->buf_cache
, 0, snf
->writesize
+ snf
->oobsize
);
1309 return mtk_snand_do_write_page(snf
, addr
, snf
->buf_cache
,
1310 snf
->buf_cache
+ snf
->writesize
, true,
1314 int mtk_snand_block_markbad(struct mtk_snand
*snf
, uint64_t addr
)
1319 if (addr
>= snf
->size
)
1322 addr
&= ~snf
->erasesize_mask
;
1324 if (snf
->nfi_soc
->bbm_swap
)
1325 return mtk_snand_block_markbad_std(snf
, addr
);
1327 return mtk_snand_block_markbad_mtk(snf
, addr
);
1330 int mtk_snand_fill_oob(struct mtk_snand
*snf
, uint8_t *oobraw
,
1331 const uint8_t *oobbuf
, size_t ooblen
)
1333 size_t len
= ooblen
, sect_fdm_len
;
1334 const uint8_t *oob
= oobbuf
;
1337 if (!snf
|| !oobraw
|| !oob
)
1340 while (len
&& step
< snf
->ecc_steps
) {
1341 sect_fdm_len
= snf
->nfi_soc
->fdm_size
- 1;
1342 if (sect_fdm_len
> len
)
1345 memcpy(oobraw
+ step
* snf
->nfi_soc
->fdm_size
+ 1, oob
,
1348 len
-= sect_fdm_len
;
1349 oob
+= sect_fdm_len
;
1356 int mtk_snand_transfer_oob(struct mtk_snand
*snf
, uint8_t *oobbuf
,
1357 size_t ooblen
, const uint8_t *oobraw
)
1359 size_t len
= ooblen
, sect_fdm_len
;
1360 uint8_t *oob
= oobbuf
;
1363 if (!snf
|| !oobraw
|| !oob
)
1366 while (len
&& step
< snf
->ecc_steps
) {
1367 sect_fdm_len
= snf
->nfi_soc
->fdm_size
- 1;
1368 if (sect_fdm_len
> len
)
1371 memcpy(oob
, oobraw
+ step
* snf
->nfi_soc
->fdm_size
+ 1,
1374 len
-= sect_fdm_len
;
1375 oob
+= sect_fdm_len
;
1382 int mtk_snand_read_page_auto_oob(struct mtk_snand
*snf
, uint64_t addr
,
1383 void *buf
, void *oob
, size_t ooblen
,
1384 size_t *actualooblen
, bool raw
)
1392 return mtk_snand_read_page(snf
, addr
, buf
, NULL
, raw
);
1394 ret
= mtk_snand_read_page(snf
, addr
, buf
, snf
->buf_cache
, raw
);
1395 if (ret
&& ret
!= -EBADMSG
) {
1401 oobremain
= mtk_snand_transfer_oob(snf
, oob
, ooblen
, snf
->buf_cache
);
1403 *actualooblen
= ooblen
- oobremain
;
1408 int mtk_snand_write_page_auto_oob(struct mtk_snand
*snf
, uint64_t addr
,
1409 const void *buf
, const void *oob
,
1410 size_t ooblen
, size_t *actualooblen
, bool raw
)
1418 return mtk_snand_write_page(snf
, addr
, buf
, NULL
, raw
);
1420 memset(snf
->buf_cache
, 0xff, snf
->oobsize
);
1421 oobremain
= mtk_snand_fill_oob(snf
, snf
->buf_cache
, oob
, ooblen
);
1423 *actualooblen
= ooblen
- oobremain
;
1425 return mtk_snand_write_page(snf
, addr
, buf
, snf
->buf_cache
, raw
);
1428 int mtk_snand_get_chip_info(struct mtk_snand
*snf
,
1429 struct mtk_snand_chip_info
*info
)
1434 info
->model
= snf
->model
;
1435 info
->chipsize
= snf
->size
;
1436 info
->blocksize
= snf
->erasesize
;
1437 info
->pagesize
= snf
->writesize
;
1438 info
->sparesize
= snf
->oobsize
;
1439 info
->spare_per_sector
= snf
->spare_per_sector
;
1440 info
->fdm_size
= snf
->nfi_soc
->fdm_size
;
1441 info
->fdm_ecc_size
= snf
->nfi_soc
->fdm_ecc_size
;
1442 info
->num_sectors
= snf
->ecc_steps
;
1443 info
->sector_size
= snf
->nfi_soc
->sector_size
;
1444 info
->ecc_strength
= snf
->ecc_strength
;
1445 info
->ecc_bytes
= snf
->ecc_bytes
;
1450 int mtk_snand_irq_process(struct mtk_snand
*snf
)
1457 sta
= nfi_read32(snf
, NFI_INTR_STA
);
1458 ien
= nfi_read32(snf
, NFI_INTR_EN
);
1463 nfi_write32(snf
, NFI_INTR_EN
, 0);
1464 irq_completion_done(snf
->pdev
);
1469 static int mtk_snand_select_spare_per_sector(struct mtk_snand
*snf
)
1471 uint32_t spare_per_step
= snf
->oobsize
/ snf
->ecc_steps
;
1475 * If we're using the 1KB sector size, HW will automatically
1476 * double the spare size. So we should only use half of the value.
1478 if (snf
->nfi_soc
->sector_size
== 1024)
1481 spare_per_step
/= mul
;
1483 for (i
= snf
->nfi_soc
->num_spare_size
- 1; i
>= 0; i
--) {
1484 if (snf
->nfi_soc
->spare_sizes
[i
] <= spare_per_step
) {
1485 snf
->spare_per_sector
= snf
->nfi_soc
->spare_sizes
[i
];
1486 snf
->spare_per_sector
*= mul
;
1491 snand_log_nfi(snf
->pdev
,
1492 "Page size %u+%u is not supported\n", snf
->writesize
,
1498 static int mtk_snand_pagefmt_setup(struct mtk_snand
*snf
)
1500 uint32_t spare_size_idx
, spare_size_shift
, pagesize_idx
;
1501 uint32_t sector_size_512
;
1503 if (snf
->nfi_soc
->sector_size
== 512) {
1504 sector_size_512
= NFI_SEC_SEL_512
;
1505 spare_size_shift
= NFI_SPARE_SIZE_S
;
1507 sector_size_512
= 0;
1508 spare_size_shift
= NFI_SPARE_SIZE_LS_S
;
1511 switch (snf
->writesize
) {
1513 pagesize_idx
= NFI_PAGE_SIZE_512_2K
;
1516 if (snf
->nfi_soc
->sector_size
== 512)
1517 pagesize_idx
= NFI_PAGE_SIZE_2K_4K
;
1519 pagesize_idx
= NFI_PAGE_SIZE_512_2K
;
1522 if (snf
->nfi_soc
->sector_size
== 512)
1523 pagesize_idx
= NFI_PAGE_SIZE_4K_8K
;
1525 pagesize_idx
= NFI_PAGE_SIZE_2K_4K
;
1528 if (snf
->nfi_soc
->sector_size
== 512)
1529 pagesize_idx
= NFI_PAGE_SIZE_8K_16K
;
1531 pagesize_idx
= NFI_PAGE_SIZE_4K_8K
;
1534 pagesize_idx
= NFI_PAGE_SIZE_8K_16K
;
1537 snand_log_nfi(snf
->pdev
, "Page size %u is not supported\n",
1542 spare_size_idx
= mtk_snand_select_spare_per_sector(snf
);
1543 if (unlikely(spare_size_idx
< 0))
1546 snf
->raw_sector_size
= snf
->nfi_soc
->sector_size
+
1547 snf
->spare_per_sector
;
1549 /* Setup page format */
1550 nfi_write32(snf
, NFI_PAGEFMT
,
1551 (snf
->nfi_soc
->fdm_ecc_size
<< NFI_FDM_ECC_NUM_S
) |
1552 (snf
->nfi_soc
->fdm_size
<< NFI_FDM_NUM_S
) |
1553 (spare_size_idx
<< spare_size_shift
) |
1554 (pagesize_idx
<< NFI_PAGE_SIZE_S
) |
1560 static enum snand_flash_io
mtk_snand_select_opcode(struct mtk_snand
*snf
,
1561 uint32_t snfi_caps
, uint8_t *opcode
,
1563 const struct snand_io_cap
*op_cap
)
1567 caps
= snfi_caps
& op_cap
->caps
;
1571 *opcode
= op_cap
->opcodes
[i
- 1].opcode
;
1573 *dummy
= op_cap
->opcodes
[i
- 1].dummy
;
1577 return __SNAND_IO_MAX
;
1580 static int mtk_snand_select_opcode_rfc(struct mtk_snand
*snf
,
1582 const struct snand_io_cap
*op_cap
)
1584 enum snand_flash_io idx
;
1586 static const uint8_t rfc_modes
[__SNAND_IO_MAX
] = {
1587 [SNAND_IO_1_1_1
] = DATA_READ_MODE_X1
,
1588 [SNAND_IO_1_1_2
] = DATA_READ_MODE_X2
,
1589 [SNAND_IO_1_2_2
] = DATA_READ_MODE_DUAL
,
1590 [SNAND_IO_1_1_4
] = DATA_READ_MODE_X4
,
1591 [SNAND_IO_1_4_4
] = DATA_READ_MODE_QUAD
,
1594 idx
= mtk_snand_select_opcode(snf
, snfi_caps
, &snf
->opcode_rfc
,
1595 &snf
->dummy_rfc
, op_cap
);
1596 if (idx
>= __SNAND_IO_MAX
) {
1597 snand_log_snfi(snf
->pdev
,
1598 "No capable opcode for read from cache\n");
1602 snf
->mode_rfc
= rfc_modes
[idx
];
1604 if (idx
== SNAND_IO_1_1_4
|| idx
== SNAND_IO_1_4_4
)
1605 snf
->quad_spi_op
= true;
1610 static int mtk_snand_select_opcode_pl(struct mtk_snand
*snf
, uint32_t snfi_caps
,
1611 const struct snand_io_cap
*op_cap
)
1613 enum snand_flash_io idx
;
1615 static const uint8_t pl_modes
[__SNAND_IO_MAX
] = {
1616 [SNAND_IO_1_1_1
] = 0,
1617 [SNAND_IO_1_1_4
] = 1,
1620 idx
= mtk_snand_select_opcode(snf
, snfi_caps
, &snf
->opcode_pl
,
1622 if (idx
>= __SNAND_IO_MAX
) {
1623 snand_log_snfi(snf
->pdev
,
1624 "No capable opcode for program load\n");
1628 snf
->mode_pl
= pl_modes
[idx
];
1630 if (idx
== SNAND_IO_1_1_4
)
1631 snf
->quad_spi_op
= true;
1636 static int mtk_snand_setup(struct mtk_snand
*snf
,
1637 const struct snand_flash_info
*snand_info
)
1639 const struct snand_mem_org
*memorg
= &snand_info
->memorg
;
1640 uint32_t i
, msg_size
, snfi_caps
;
1643 /* Calculate flash memory organization */
1644 snf
->model
= snand_info
->model
;
1645 snf
->writesize
= memorg
->pagesize
;
1646 snf
->oobsize
= memorg
->sparesize
;
1647 snf
->erasesize
= snf
->writesize
* memorg
->pages_per_block
;
1648 snf
->die_size
= (uint64_t)snf
->erasesize
* memorg
->blocks_per_die
;
1649 snf
->size
= snf
->die_size
* memorg
->ndies
;
1650 snf
->num_dies
= memorg
->ndies
;
1652 snf
->writesize_mask
= snf
->writesize
- 1;
1653 snf
->erasesize_mask
= snf
->erasesize
- 1;
1654 snf
->die_mask
= snf
->die_size
- 1;
1656 snf
->writesize_shift
= ffs(snf
->writesize
) - 1;
1657 snf
->erasesize_shift
= ffs(snf
->erasesize
) - 1;
1658 snf
->die_shift
= mtk_snand_ffs64(snf
->die_size
) - 1;
1660 snf
->select_die
= snand_info
->select_die
;
1662 /* Determine opcodes for read from cache/program load */
1663 snfi_caps
= SPI_IO_1_1_1
| SPI_IO_1_1_2
| SPI_IO_1_2_2
;
1664 if (snf
->snfi_quad_spi
)
1665 snfi_caps
|= SPI_IO_1_1_4
| SPI_IO_1_4_4
;
1667 ret
= mtk_snand_select_opcode_rfc(snf
, snfi_caps
, snand_info
->cap_rd
);
1671 ret
= mtk_snand_select_opcode_pl(snf
, snfi_caps
, snand_info
->cap_pl
);
1675 /* ECC and page format */
1676 snf
->ecc_steps
= snf
->writesize
/ snf
->nfi_soc
->sector_size
;
1677 if (snf
->ecc_steps
> snf
->nfi_soc
->max_sectors
) {
1678 snand_log_nfi(snf
->pdev
, "Page size %u is not supported\n",
1683 ret
= mtk_snand_pagefmt_setup(snf
);
1687 msg_size
= snf
->nfi_soc
->sector_size
+ snf
->nfi_soc
->fdm_ecc_size
;
1688 ret
= mtk_ecc_setup(snf
, snf
->nfi_base
+ NFI_FDM0L
,
1689 snf
->spare_per_sector
- snf
->nfi_soc
->fdm_size
,
1694 nfi_write16(snf
, NFI_CNFG
, 0);
1696 /* Tuning options */
1697 nfi_write16(snf
, NFI_DEBUG_CON1
, WBUF_EN
);
1698 nfi_write32(snf
, SNF_DLY_CTL3
, (40 << SFCK_SAM_DLY_S
));
1701 nfi_read32(snf
, NFI_INTR_STA
);
1702 nfi_write32(snf
, NFI_INTR_EN
, 0);
1704 /* Clear SNF done flag */
1705 nfi_rmw32(snf
, SNF_STA_CTL1
, 0, CUS_READ_DONE
| CUS_PG_DONE
);
1706 nfi_write32(snf
, SNF_STA_CTL1
, 0);
1708 /* Initialization on all dies */
1709 for (i
= 0; i
< snf
->num_dies
; i
++) {
1710 mtk_snand_select_die(snf
, i
);
1712 /* Disable On-Die ECC engine */
1713 ret
= mtk_snand_ondie_ecc_control(snf
, false);
1717 /* Disable block protection */
1718 mtk_snand_unlock(snf
);
1720 /* Enable/disable quad-spi */
1721 mtk_snand_qspi_control(snf
, snf
->quad_spi_op
);
1724 mtk_snand_select_die(snf
, 0);
1729 static int mtk_snand_id_probe(struct mtk_snand
*snf
,
1730 const struct snand_flash_info
**snand_info
)
1732 uint8_t id
[4], op
[2];
1735 /* Read SPI-NAND JEDEC ID, OP + dummy/addr + ID */
1736 op
[0] = SNAND_CMD_READID
;
1738 ret
= mtk_snand_mac_io(snf
, op
, 2, id
, sizeof(id
));
1742 *snand_info
= snand_flash_id_lookup(SNAND_ID_DYMMY
, id
);
1746 /* Read SPI-NAND JEDEC ID, OP + ID */
1747 op
[0] = SNAND_CMD_READID
;
1748 ret
= mtk_snand_mac_io(snf
, op
, 1, id
, sizeof(id
));
1752 *snand_info
= snand_flash_id_lookup(SNAND_ID_DYMMY
, id
);
1756 snand_log_chip(snf
->pdev
,
1757 "Unrecognized SPI-NAND ID: %02x %02x %02x %02x\n",
1758 id
[0], id
[1], id
[2], id
[3]);
1763 int mtk_snand_init(void *dev
, const struct mtk_snand_platdata
*pdata
,
1764 struct mtk_snand
**psnf
)
1766 const struct snand_flash_info
*snand_info
;
1767 uint32_t rawpage_size
, sect_bf_size
;
1768 struct mtk_snand tmpsnf
, *snf
;
1771 if (!pdata
|| !psnf
)
1774 if (pdata
->soc
>= __SNAND_SOC_MAX
) {
1775 snand_log_chip(dev
, "Invalid SOC %u for MTK-SNAND\n",
1780 /* Dummy instance only for initial reset and id probe */
1781 tmpsnf
.nfi_base
= pdata
->nfi_base
;
1782 tmpsnf
.ecc_base
= pdata
->ecc_base
;
1783 tmpsnf
.soc
= pdata
->soc
;
1784 tmpsnf
.nfi_soc
= &mtk_snand_socs
[pdata
->soc
];
1787 /* Switch to SNFI mode */
1788 writel(SPI_MODE
, tmpsnf
.nfi_base
+ SNF_CFG
);
1790 /* Reset SNFI & NFI */
1791 mtk_snand_mac_reset(&tmpsnf
);
1792 mtk_nfi_reset(&tmpsnf
);
1794 /* Reset SPI-NAND chip */
1795 ret
= mtk_snand_chip_reset(&tmpsnf
);
1797 snand_log_chip(dev
, "Failed to reset SPI-NAND chip\n");
1801 /* Probe SPI-NAND flash by JEDEC ID */
1802 ret
= mtk_snand_id_probe(&tmpsnf
, &snand_info
);
1806 rawpage_size
= snand_info
->memorg
.pagesize
+
1807 snand_info
->memorg
.sparesize
;
1809 sect_bf_size
= mtk_snand_socs
[pdata
->soc
].max_sectors
*
1810 sizeof(*snf
->sect_bf
);
1812 /* Allocate memory for instance and cache */
1813 snf
= generic_mem_alloc(dev
,
1814 sizeof(*snf
) + rawpage_size
+ sect_bf_size
);
1816 snand_log_chip(dev
, "Failed to allocate memory for instance\n");
1820 snf
->sect_bf
= (int *)((uintptr_t)snf
+ sizeof(*snf
));
1821 snf
->buf_cache
= (uint8_t *)((uintptr_t)snf
->sect_bf
+ sect_bf_size
);
1823 /* Allocate memory for DMA buffer */
1824 snf
->page_cache
= dma_mem_alloc(dev
, rawpage_size
);
1825 if (!snf
->page_cache
) {
1826 generic_mem_free(dev
, snf
);
1828 "Failed to allocate memory for DMA buffer\n");
1832 /* Fill up instance */
1834 snf
->nfi_base
= pdata
->nfi_base
;
1835 snf
->ecc_base
= pdata
->ecc_base
;
1836 snf
->soc
= pdata
->soc
;
1837 snf
->nfi_soc
= &mtk_snand_socs
[pdata
->soc
];
1838 snf
->snfi_quad_spi
= pdata
->quad_spi
;
1840 /* Initialize SNFI & ECC engine */
1841 ret
= mtk_snand_setup(snf
, snand_info
);
1843 dma_mem_free(dev
, snf
->page_cache
);
1844 generic_mem_free(dev
, snf
);
1853 int mtk_snand_cleanup(struct mtk_snand
*snf
)
1858 dma_mem_free(snf
->pdev
, snf
->page_cache
);
1859 generic_mem_free(snf
->pdev
, snf
);