Add Broadcom / Netgear changes from RAXE 1.0.0.48
[project/bcm63xx/u-boot.git] / drivers / mtd / nand / raw / brcmnand / brcmnand_spl.c
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Copyright 2019 Broadcom Ltd.
4 */
5 //#define DEBUG
6
7 #include <common.h>
8 #include <nand.h>
9 #include "brcmnand.h"
10 #include "brcmnand_spl.h"
11
12 #define SPARE_MAX_SIZE (27 * 16)
13 #define CTRLR_CACHE_SIZE 512
14 #define FC_WORDS (CTRLR_CACHE_SIZE >> 2)
15
16 #define NAND_CHIPID(chip) ((chip)->chip_device_id >> 16)
17
18 /* Flash manufacturers. */
19 #define FLASHTYPE_SAMSUNG 0xec
20 #define FLASHTYPE_ST 0x20
21 #define FLASHTYPE_MICRON 0x2c
22 #define FLASHTYPE_HYNIX 0xad
23 #define FLASHTYPE_TOSHIBA 0x98
24 #define FLASHTYPE_MXIC 0xc2
25 #define FLASHTYPE_SPANSION 0x01
26
27 /* Samsung flash parts. */
28 #define SAMSUNG_K9F5608U0A 0x55
29 #define SAMSUNG_K9F1208U0 0x76
30 #define SAMSUNG_K9F1G08U0 0xf1
31
32 /* ST flash parts. */
33 #define ST_NAND512W3A2CN6 0x76
34 #define ST_NAND01GW3B2CN6 0xf1
35
36 /* Micron flash parts. */
37 #define MICRON_MT29F1G08AAC 0xf1
38 #define MICRON_MT29F2G08ABA 0xda
39 #define MICRON_MT29F4G08ABA 0xdc
40 #define MICRON_MT29F8G08ABA 0x38
41 #define MICRON_MT29F8G16ABA 0xd3
42
43 /* Hynix flash parts. */
44 #define HYNIX_H27U1G8F2B 0xf1
45 #define HYNIX_H27U518S2C 0x76
46
47 /* MXIC flash parts */
48 #define MXIC_MX30LF1208AA 0xf0
49 #define MXIC_MX30LF1G08AA 0xf1
50
51 /* SPANSION flash parts */
52 #define SPANSION_S34ML01G1 0xf1
53 #define SPANSION_S34ML02G1 0xda
54 #define SPANSION_S34ML04G1 0xdc
55
56 /* Flash id to name mapping. */
57 #define NAND_MAKE_ID(A,B) \
58 (((unsigned short) (A) << 8) | ((unsigned short) B & 0xff))
59
60 #define NAND_FLASH_DEVICES \
61 {{NAND_MAKE_ID(FLASHTYPE_SAMSUNG,SAMSUNG_K9F5608U0A),"Samsung K9F5608U0"}, \
62 {NAND_MAKE_ID(FLASHTYPE_SAMSUNG,SAMSUNG_K9F1208U0),"Samsung K9F1208U0"}, \
63 {NAND_MAKE_ID(FLASHTYPE_SAMSUNG,SAMSUNG_K9F1G08U0),"Samsung K9F1G08U0"}, \
64 {NAND_MAKE_ID(FLASHTYPE_ST,ST_NAND512W3A2CN6),"ST NAND512W3A2CN6"}, \
65 {NAND_MAKE_ID(FLASHTYPE_ST,ST_NAND01GW3B2CN6),"ST NAND01GW3B2CN6"}, \
66 {NAND_MAKE_ID(FLASHTYPE_MICRON,MICRON_MT29F1G08AAC),"Micron MT29F1G08AAC"},\
67 {NAND_MAKE_ID(FLASHTYPE_MICRON,MICRON_MT29F2G08ABA),"Micron MT29F2G08ABA"},\
68 {NAND_MAKE_ID(FLASHTYPE_MICRON,MICRON_MT29F4G08ABA),"Micron MT29F4G08ABA"},\
69 {NAND_MAKE_ID(FLASHTYPE_MICRON,MICRON_MT29F8G08ABA),"Micron MT29F8G08ABA"},\
70 {NAND_MAKE_ID(FLASHTYPE_MICRON,MICRON_MT29F8G16ABA),"Micron MT29F8G16ABA"},\
71 {NAND_MAKE_ID(FLASHTYPE_HYNIX,HYNIX_H27U1G8F2B),"Hynix H27U1G8F2B"}, \
72 {NAND_MAKE_ID(FLASHTYPE_HYNIX,HYNIX_H27U518S2C),"Hynix H27U518S2C"}, \
73 {NAND_MAKE_ID(FLASHTYPE_MXIC,MXIC_MX30LF1208AA),"MXIC MX30LF1208AA"}, \
74 {NAND_MAKE_ID(FLASHTYPE_MXIC,MXIC_MX30LF1G08AA),"MXIC MX30LF1G08AA"}, \
75 {NAND_MAKE_ID(FLASHTYPE_SPANSION,SPANSION_S34ML01G1),"Spansion S34ML01G1"},\
76 {NAND_MAKE_ID(FLASHTYPE_SPANSION,SPANSION_S34ML02G1),"Spansion S34ML02G1"},\
77 {NAND_MAKE_ID(FLASHTYPE_SPANSION,SPANSION_S34ML04G1),"Spansion S34ML04G1"},\
78 {0,""} \
79 }
80
81 #define NAND_FLASH_MANUFACTURERS \
82 {{FLASHTYPE_SAMSUNG, "Samsung"}, \
83 {FLASHTYPE_ST, "ST"}, \
84 {FLASHTYPE_MICRON, "Micron"}, \
85 {FLASHTYPE_HYNIX, "Hynix"}, \
86 {FLASHTYPE_TOSHIBA, "Toshiba"}, \
87 {FLASHTYPE_MXIC, "MXIC"}, \
88 {FLASHTYPE_SPANSION, "Spansion"}, \
89 {0,""} \
90 }
91
92 /* Condition to determine the spare layout. */
93 #define LAYOUT_PARMS(L,S,P) \
94 (((unsigned int)(L)<<28) | ((unsigned int)(S)<<16) | (P))
95
96 /* Each bit in the ECCMSK array represents a spare area byte. Bits that are
97 * set correspond to spare area bytes that are reserved for the ECC or bad
98 * block indicator. Bits that are not set can be used for data such as the
99 * JFFS2 clean marker. This macro returns 0 if the spare area byte at offset,
100 * OFS, is available and non-0 if it is being used for the ECC or BI.
101 */
102 #define ECC_MASK_BIT(ECCMSK, OFS) (ECCMSK[OFS / 8] & (1 << (OFS % 8)))
103
104 #define SPARE_BI_MARKER 0
105 #define SPARE_GOOD_MARKER 0xFF
106
107 /* Fixed definition for NAND controller on all revision */
108 #define CMD_NULL 0x00
109 #define CMD_PAGE_READ 0x01
110 #define CMD_SPARE_AREA_READ 0x02
111 #define CMD_STATUS_READ 0x03
112 #define CMD_PROGRAM_PAGE 0x04
113 #define CMD_PROGRAM_SPARE_AREA 0x05
114 #define CMD_COPY_BACK 0x06
115 #define CMD_DEVICE_ID_READ 0x07
116 #define CMD_BLOCK_ERASE 0x08
117 #define CMD_FLASH_RESET 0x09
118 #define CMD_BLOCKS_LOCK 0x0a
119 #define CMD_BLOCKS_LOCK_DOWN 0x0b
120 #define CMD_BLOCKS_UNLOCK 0x0c
121 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
122 #define CMD_PARAMETER_READ 0x0e
123 #define CMD_PARAMETER_CHANGE_COL 0x0f
124 #define CMD_LOW_LEVEL_OP 0x10
125
126 #define NBC_AUTO_DEV_ID_CFG (1 << 30)
127
128 #define NIS_CTLR_READY (1 << 31)
129 #define NIS_FLASH_READY (1 << 30)
130 #define NIS_CACHE_VALID (1 << 29)
131 #define NIS_SPARE_VALID (1 << 28)
132 #define NIS_FLASH_STS_MASK 0x000000ff
133
134 #define NC_DEV_SIZE_SHIFT 24
135 #define NC_DEV_SIZE_MASK (0x0f << NC_DEV_SIZE_SHIFT)
136 #define NC_FUL_ADDR_SHIFT 16
137 #define NC_FUL_ADDR_MASK (0x7 << NC_FUL_ADDR_SHIFT)
138 #define NC_COL_ADDR_SHIFT 12
139 #define NC_COL_ADDR_MASK (0x7 << NC_COL_ADDR_SHIFT)
140 #define NC_BLK_ADDR_SHIFT 8
141 #define NC_BLK_ADDR_MASK (0x07 << NC_BLK_ADDR_SHIFT)
142
143 #define NAC_ECC_LVL_SHIFT 16
144 #define NAC_ECC_LVL_MASK 0x001f0000
145 #define NAC_ECC_LVL_DISABLE 0
146 #define NAC_ECC_LVL_BCH_1 1
147 #define NAC_ECC_LVL_BCH_2 2
148 #define NAC_ECC_LVL_BCH_3 3
149 #define NAC_ECC_LVL_BCH_4 4
150 #define NAC_ECC_LVL_BCH_5 5
151 #define NAC_ECC_LVL_BCH_6 6
152 #define NAC_ECC_LVL_BCH_7 7
153 #define NAC_ECC_LVL_BCH_8 8
154 #define NAC_ECC_LVL_BCH_9 9
155 #define NAC_ECC_LVL_BCH_10 10
156 #define NAC_ECC_LVL_BCH_11 11
157 #define NAC_ECC_LVL_BCH_12 12
158 #define NAC_ECC_LVL_BCH_13 13
159 #define NAC_ECC_LVL_BCH_14 14
160 #define NAC_ECC_LVL_HAMMING 15 /* Hamming if spare are size = 16, BCH15 otherwise */
161 #define NAC_ECC_LVL_BCH15 15
162 #define NAC_ECC_LVL_BCH_16 16
163 #define NAC_ECC_LVL_BCH_17 17
164 /* BCH18 to 30 use sector size = 1K */
165 #define NAC_SECTOR_SIZE_1K (1 << 7)
166 #define NAC_SPARE_SZ_SHIFT 0
167 #define NAC_SPARE_SZ_MASK 0x0000007f
168
169 #define NT_TREH_MASK 0x000f0000
170 #define NT_TREH_SHIFT 16
171 #define NT_TRP_MASK 0x00f00000
172 #define NT_TRP_SHIFT 20
173 #define NT_TREAD_MASK 0x0000000f
174 #define NT_TREAD_SHIFT 0
175
176 struct cfg_decode_map {
177 uint16_t dev_size_reg;
178 uint16_t dev_size_shift;
179 uint32_t dev_size_mask;
180 uint16_t block_size_reg;
181 uint16_t block_size_shift;
182 uint32_t block_size_mask;
183 uint16_t page_size_reg;
184 uint16_t page_size_shift;
185 uint32_t page_size_mask;
186 uint32_t *block_tbl;
187 uint32_t *page_tbl;
188 };
189
190 struct brcmnand_controller {
191 void __iomem *nand_base;
192 void __iomem *nand_fc; /* flash cache */
193 uint16_t nand_version;
194 const uint16_t *reg_offsets;
195 const struct cfg_decode_map *cfg_dec_map;
196 uint8_t *flash_cache;
197 };
198
199 struct brcmnand_chip {
200 struct brcmnand_controller *ctrl;
201 uint32_t chip_device_id;
202 uint64_t chip_total_size;
203 uint32_t chip_block_size;
204 uint32_t chip_page_size;
205 uint32_t chip_spare_size;
206 uint32_t chip_spare_step_size;
207 uint32_t chip_ecc_level;
208 uint32_t sector_size_1k;
209 uint32_t chip_bi_index_1;
210 uint32_t chip_bi_index_2;
211 };
212
213 enum brcmnand_reg {
214 BRCMNAND_CMD_START = 0,
215 BRCMNAND_CMD_EXT_ADDRESS,
216 BRCMNAND_CMD_ADDRESS,
217 BRCMNAND_INTFC_STATUS,
218 BRCMNAND_CS_SELECT,
219 BRCMNAND_CS_XOR,
220 BRCMNAND_CS_ACC_CONTROL,
221 BRCMNAND_CS_CFG_EXT,
222 BRCMNAND_CS_CFG,
223 BRCMNAND_ID,
224 BRCMNAND_OOB_READ_BASE,
225 BRCMNAND_TIMING1,
226 BRCMNAND_TIMING2,
227 };
228
229 /* BRCMNAND v6.0 - v7.0 */
230 static const u16 brcmnand_regs_v60[] = {
231 [BRCMNAND_CMD_START] = 0x04,
232 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
233 [BRCMNAND_CMD_ADDRESS] = 0x0c,
234 [BRCMNAND_INTFC_STATUS] = 0x14,
235 [BRCMNAND_CS_SELECT] = 0x18,
236 [BRCMNAND_CS_XOR] = 0x1c,
237 [BRCMNAND_CS_ACC_CONTROL] = 0x50,
238 [BRCMNAND_CS_CFG_EXT] = 0,
239 [BRCMNAND_CS_CFG] = 0x54,
240 [BRCMNAND_TIMING1] = 0x58,
241 [BRCMNAND_TIMING2] = 0x5c,
242 [BRCMNAND_ID] = 0x194,
243 [BRCMNAND_OOB_READ_BASE] = 0x200,
244 };
245
246 /* BRCMNAND v7.1 */
247 static const u16 brcmnand_regs_v71[] = {
248 [BRCMNAND_CMD_START] = 0x04,
249 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
250 [BRCMNAND_CMD_ADDRESS] = 0x0c,
251 [BRCMNAND_INTFC_STATUS] = 0x14,
252 [BRCMNAND_CS_SELECT] = 0x18,
253 [BRCMNAND_CS_XOR] = 0x1c,
254 [BRCMNAND_CS_ACC_CONTROL] = 0x50,
255 [BRCMNAND_CS_CFG_EXT] = 0x54,
256 [BRCMNAND_CS_CFG] = 0x58,
257 [BRCMNAND_TIMING1] = 0x5c,
258 [BRCMNAND_TIMING2] = 0x60,
259 [BRCMNAND_ID] = 0x194,
260 [BRCMNAND_OOB_READ_BASE] = 0x200,
261 };
262
263 uint32_t blk_tbl_v60[] = {
264 SZ_8K,
265 SZ_16K,
266 SZ_128K,
267 SZ_256K,
268 SZ_512K,
269 SZ_1M,
270 SZ_2M,
271 0,
272 };
273
274 uint32_t pg_tbl_v60[] = {
275 SZ_512,
276 SZ_2K,
277 SZ_4K,
278 SZ_8K,
279 0,
280 };
281
282 static const struct cfg_decode_map cfg_decode_map_v60 = {
283 .dev_size_reg = BRCMNAND_CS_CFG,
284 .dev_size_shift = 24,
285 .dev_size_mask = (0xf << 24),
286 .block_size_reg = BRCMNAND_CS_CFG,
287 .block_size_shift = 28,
288 .block_size_mask = (0x7 << 28),
289 .page_size_reg = BRCMNAND_CS_CFG,
290 .page_size_shift = 20,
291 .page_size_mask = (0x3 << 20),
292 .block_tbl = blk_tbl_v60,
293 .page_tbl = pg_tbl_v60,
294 };
295
296 static const struct cfg_decode_map cfg_decode_map_v71 = {
297 .dev_size_reg = BRCMNAND_CS_CFG,
298 .dev_size_shift = 24,
299 .dev_size_mask = (0xf << 24),
300 .block_size_reg = BRCMNAND_CS_CFG_EXT,
301 .block_size_shift = 4,
302 .block_size_mask = (0xff << 4),
303 .page_size_reg = BRCMNAND_CS_CFG_EXT,
304 .page_size_shift = 0,
305 .page_size_mask = 0xf,
306 .block_tbl = NULL,
307 .page_tbl = NULL,
308 };
309
310 static struct brcmnand_chip nand_chip;
311 static struct brcmnand_controller nand_ctrl;
312
313 static u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
314 {
315 return brcmnand_readl(ctrl->nand_base + offs);
316 }
317
318 static void nand_writereg(struct brcmnand_controller *ctrl, u32 offs, u32 val)
319 {
320 brcmnand_writel(val, ctrl->nand_base + offs);
321 }
322
323 static u32 brcmnand_read_reg(struct brcmnand_controller *ctrl, enum brcmnand_reg reg)
324 {
325 u16 offs = ctrl->reg_offsets[reg];
326
327 if (offs)
328 return nand_readreg(ctrl, offs);
329 else
330 return 0;
331 }
332
333 static void brcmnand_write_reg(struct brcmnand_controller *ctrl, enum brcmnand_reg reg, u32 val)
334 {
335 u16 offs = ctrl->reg_offsets[reg];
336
337 if (offs)
338 nand_writereg(ctrl, offs, val);
339 }
340
341 static void brcmnand_rmw_reg(struct brcmnand_controller *ctrl, enum brcmnand_reg reg, u32 mask, unsigned
342 int shift, u32 val)
343 {
344 u32 tmp = brcmnand_read_reg(ctrl, reg);
345
346 tmp &= ~mask;
347 tmp |= val << shift;
348 brcmnand_write_reg(ctrl, reg, tmp);
349 }
350
351 static u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, uint32_t offset)
352 {
353 return __raw_readl(ctrl->nand_fc + offset);
354 }
355
356 static u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
357 {
358 u16 offset0, reg_offs;
359
360 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
361
362 reg_offs = offset0 + (offs & ~0x03);
363
364 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
365 }
366
367 static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
368 {
369 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
370
371 /* Only support v4.0+? */
372 if (ctrl->nand_version < 0x0600 || ctrl->nand_version > 0x0701) {
373 dev_err(ctrl->dev, "version %#x not supported\n", ctrl->nand_version);
374 return -ENODEV;
375 }
376
377 /* Register offsets */
378 if (ctrl->nand_version >= 0x0701) {
379 ctrl->reg_offsets = brcmnand_regs_v71;
380 ctrl->cfg_dec_map = &cfg_decode_map_v71;
381 } else {
382 ctrl->reg_offsets = brcmnand_regs_v60;
383 ctrl->cfg_dec_map = &cfg_decode_map_v60;
384 }
385
386 return 0;
387 }
388
389 static int brcmnand_wait_status(struct brcmnand_controller *ctrl, unsigned int status_mask)
390 {
391
392 const unsigned int nand_poll_max = 2000000;
393
394 unsigned int data;
395 unsigned int poll_count = 0;
396 int ret = 0;
397
398 do {
399 data = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
400 } while (!(status_mask & data) && (++poll_count < nand_poll_max));
401
402 data = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
403 if (!(status_mask & data)) {
404 printf("Status wait timeout: nandsts=0x%8.8x mask=0x%8.8x, count=" "%u\n", data, status_mask, poll_count);
405 ret = -ETIMEDOUT;
406 }
407
408 return (ret);
409 }
410
411 extern int brcmnand_wait_cmd(struct brcmnand_controller *ctrl)
412 {
413 return brcmnand_wait_status(ctrl, NIS_CTLR_READY);
414 }
415
416 extern int brcmnand_wait_device(struct brcmnand_controller *ctrl)
417 {
418 return brcmnand_wait_status(ctrl, NIS_FLASH_READY);
419 }
420
421 extern int brcmnand_wait_cache(struct brcmnand_controller *ctrl)
422 {
423 return brcmnand_wait_status(ctrl, NIS_CACHE_VALID);
424 }
425
426 extern int brcmnand_wait_spare(struct brcmnand_controller *ctrl)
427 {
428 return brcmnand_wait_status(ctrl, NIS_SPARE_VALID);
429 }
430
431 static void brcmnand_reset_device(struct brcmnand_controller *ctrl)
432 {
433 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, (NBC_AUTO_DEV_ID_CFG | 1));
434 brcmnand_wait_device(ctrl);
435 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, 0x0);
436 }
437
438 static uint32_t brcmnand_block_size_mapped(uint32_t * block_tbl, uint32_t block_size)
439 {
440 uint32_t i = 0;
441
442 while (block_tbl[i]) {
443 if (block_tbl[i] == block_size)
444 return i;
445 i++;
446 }
447
448 printf("Invalid block size %dKB for this nand controller!\n", block_size >> 10);
449 return 0;
450 }
451
452 static uint64_t brcmnand_get_dev_size(struct brcmnand_controller *ctrl)
453 {
454 uint32_t dev_size, reg;
455 const struct cfg_decode_map *cfg_map = ctrl->cfg_dec_map;
456
457 reg = brcmnand_read_reg(ctrl, cfg_map->dev_size_reg);
458 dev_size = (reg & cfg_map->dev_size_mask) >> cfg_map->dev_size_shift;
459
460 return 1ULL << (dev_size + 22);
461 }
462
463 static uint32_t brcmnand_get_blk_size(struct brcmnand_controller *ctrl)
464 {
465 uint32_t blk_size, reg;
466 const struct cfg_decode_map *cfg_map = ctrl->cfg_dec_map;
467
468 reg = brcmnand_read_reg(ctrl, cfg_map->block_size_reg);
469 blk_size = (reg & cfg_map->block_size_mask) >> cfg_map->block_size_shift;
470
471 if (cfg_map->block_tbl)
472 return cfg_map->block_tbl[blk_size];
473 else
474 return 1 << (blk_size + 13);
475 }
476
477 static uint32_t brcmnand_get_pg_size(struct brcmnand_controller *ctrl)
478 {
479 uint32_t page_size, reg;
480 const struct cfg_decode_map *cfg_map = ctrl->cfg_dec_map;
481
482 reg = brcmnand_read_reg(ctrl, cfg_map->page_size_reg);
483 page_size = (reg & cfg_map->page_size_mask) >> cfg_map->page_size_shift;
484
485 if (cfg_map->page_tbl)
486 return cfg_map->page_tbl[page_size];
487 else
488 return 1 << (page_size + 9);
489 }
490
491 static void brcmnand_set_dev_size(struct brcmnand_controller *ctrl, uint32_t dev_size_in_order)
492 {
493 const struct cfg_decode_map *cfg_map = ctrl->cfg_dec_map;
494
495 brcmnand_rmw_reg(ctrl, cfg_map->dev_size_reg, cfg_map->dev_size_mask, cfg_map->dev_size_shift, dev_size_in_order - 22);
496 }
497
498 static void brcmnand_set_block_size(struct brcmnand_controller *ctrl, uint32_t block_size_in_order)
499 {
500 const struct cfg_decode_map *cfg_map = ctrl->cfg_dec_map;
501 uint32_t block_size, reg_val;
502
503 if (cfg_map->block_tbl) {
504 block_size = 1 << block_size_in_order;
505 reg_val = brcmnand_block_size_mapped(cfg_map->block_tbl, block_size);
506 } else
507 reg_val = block_size_in_order - 13;
508
509 brcmnand_rmw_reg(ctrl, cfg_map->block_size_reg, cfg_map->block_size_mask, cfg_map->block_size_shift, reg_val);
510 }
511
512 static void brcmnand_copy_from_cache(struct brcmnand_controller *ctrl, unsigned char *buffer, int offset, int numbytes)
513 {
514 int use_buffer = 0, read_bytes, i;
515 uint32_t *buf;
516
517 #if defined(CONFIG_BCM47189)
518 {
519 uint32_t ioctrl = NAND_FLASH_CTRL_WRAP->ioctrl;
520 ioctrl = ioctrl | NAND_APB_LITTLE_ENDIAN;
521 NAND_FLASH_CTRL_WRAP->ioctrl = ioctrl;
522 }
523 #endif
524 if (offset & 0x3) {
525 printk("brcmnand_copy_from_cache invalid offset %d!\n", offset);
526 return;
527 }
528
529 use_buffer = ((uintptr_t) buffer & 0x3) || (numbytes & 0x3);
530 if (use_buffer)
531 buf = (uint32_t *) ctrl->flash_cache;
532 else
533 buf = (uint32_t *) buffer;
534 read_bytes = ((numbytes + 0x3) >> 2) << 2;
535
536 debug(">> brcmnand_copy_from_cache - use_buffer %d buffer 0x%p offset %d read bytes %d\n",
537 use_buffer, buf, offset, read_bytes);
538
539 for (i = 0; i < read_bytes; i += 4, buf++) {
540 *buf = brcmnand_read_fc(ctrl, i + offset);
541 #if 0
542 debug("0x%08x ", *buf);
543 if ((i + 1) % 16 == 0)
544 debug("\n");
545 #endif
546 }
547 //debug("\n");
548
549 if (use_buffer)
550 memcpy(buffer, ctrl->flash_cache, numbytes);
551
552 #if defined(CONFIG_BCM47189)
553 {
554 uint32_t ioctrl = NAND_FLASH_CTRL_WRAP->ioctrl;
555 ioctrl = ioctrl & ~NAND_APB_LITTLE_ENDIAN;
556 NAND_FLASH_CTRL_WRAP->ioctrl = ioctrl;
557 }
558 #endif
559
560 }
561
562 static void brcmnand_copy_from_spare(struct brcmnand_controller *ctrl, unsigned char *buffer, int numbytes)
563 {
564 for (int i = 0; i < numbytes; i++)
565 buffer[i] = oob_reg_read(ctrl, i);
566 }
567
568 static void brcmnand_check_onfi(struct brcmnand_chip *chip, struct brcmnand_controller *ctrl)
569 {
570 struct nand_onfi_params onfi;
571 uint64_t onfi_total_size;
572 uint32_t size_in_order = 0;
573
574 memset(&onfi, 0x0, sizeof(onfi));
575 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, CMD_PARAMETER_READ);
576 if (brcmnand_wait_cmd(ctrl) == 0 && brcmnand_wait_cache(ctrl) == 0) {
577 // Hardware NAND controller does not take into account LUNs, so if this value is not 1 we calculate the die stack NAND size
578 brcmnand_copy_from_cache(ctrl, (unsigned char *)&onfi, 0, sizeof(onfi));
579 if ((onfi.sig[0] == 'O') && (onfi.sig[1] == 'N') && (onfi.sig[2] == 'F') && (onfi.sig[3] == 'I')) {
580 debug("ONFI detected, page size 0x%x, page per block %d block per lun %d lun count%d\n",
581 le32_to_cpu(onfi.byte_per_page), le32_to_cpu(onfi.pages_per_block),
582 le32_to_cpu(onfi.blocks_per_lun), onfi.lun_count);
583
584 //adjust size based on # of luns
585 if (onfi.lun_count != 1) {
586 onfi_total_size = le32_to_cpu(onfi.byte_per_page) * le32_to_cpu(onfi.pages_per_block);
587 onfi_total_size *= le32_to_cpu(onfi.blocks_per_lun);
588 onfi_total_size *= onfi.lun_count;
589
590 if (onfi_total_size != chip->chip_total_size) {
591 printf("Correct total size based on ONFI old size 0x%llx to new size 0x%llx\n",
592 chip->chip_total_size, onfi_total_size);
593 chip->chip_total_size = onfi_total_size;
594 while (onfi_total_size >>= 1) {
595 size_in_order++;
596 }
597
598 brcmnand_set_dev_size(ctrl, size_in_order);
599 }
600 }
601 }
602 }
603 }
604
605 static int brcmnand_adjust_cfg(struct brcmnand_chip *chip)
606 {
607 struct brcmnand_controller *ctrl = chip->ctrl;
608 const struct cfg_decode_map *cfg_map = ctrl->cfg_dec_map;
609 uint32_t mask, reg_val;
610 uint32_t ecc_lvl;
611
612 /* Special case changes from what the NAND controller configured. */
613 switch (NAND_CHIPID(chip)) {
614 case NAND_MAKE_ID(FLASHTYPE_HYNIX, HYNIX_H27U1G8F2B):
615 /* 128 MB device size, 4 full address bytes, 2 column address bytes, 2 block address bytes */
616 mask = cfg_map->dev_size_mask | NC_FUL_ADDR_MASK | NC_COL_ADDR_MASK | NC_BLK_ADDR_MASK;
617 reg_val =
618 (5 << cfg_map->
619 dev_size_shift) | (0x04 << NC_FUL_ADDR_SHIFT) | (0x2 << NC_COL_ADDR_SHIFT) | (0x2 << NC_BLK_ADDR_SHIFT);
620 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_CFG, mask, 0, reg_val);
621 break;
622
623 case NAND_MAKE_ID(FLASHTYPE_SAMSUNG, SAMSUNG_K9F5608U0A):
624 case NAND_MAKE_ID(FLASHTYPE_SAMSUNG, SAMSUNG_K9F1208U0):
625 case NAND_MAKE_ID(FLASHTYPE_SAMSUNG, SAMSUNG_K9F1G08U0):
626 case NAND_MAKE_ID(FLASHTYPE_HYNIX, HYNIX_H27U518S2C):
627 /* Set device id "cell type" to 0 (SLC). */
628 chip->chip_device_id &= ~NAND_CI_CELLTYPE_MSK;
629 brcmnand_write_reg(ctrl, BRCMNAND_ID, chip->chip_device_id);
630 break;
631
632 case NAND_MAKE_ID(FLASHTYPE_MXIC, MXIC_MX30LF1208AA):
633 /* This 64MB device was detected as 256MB device on 63268. Manually update
634 * device size in the cfg register.
635 */
636 brcmnand_set_dev_size(ctrl, 26);
637 break;
638
639 case NAND_MAKE_ID(FLASHTYPE_SPANSION, SPANSION_S34ML01G1):
640 /* Set device size to 128MB, it is misconfigured to 512MB. */
641 brcmnand_set_dev_size(ctrl, 27);
642 break;
643
644 case NAND_MAKE_ID(FLASHTYPE_SPANSION, SPANSION_S34ML02G1):
645 /* Set device size to 256MB, it is misconfigured to 512MB. */
646 brcmnand_set_dev_size(ctrl, 28);
647 break;
648
649 case NAND_MAKE_ID(FLASHTYPE_SPANSION, SPANSION_S34ML04G1):
650 /* Set block size to 128KB, it is misconfigured to 512MB in 63138, 47189. */
651 if (ctrl->nand_version <= 0x00000700)
652 brcmnand_set_block_size(ctrl, 17);
653 break;
654 }
655
656 chip->chip_total_size = brcmnand_get_dev_size(ctrl);
657 chip->chip_block_size = brcmnand_get_blk_size(ctrl);
658 chip->chip_page_size = brcmnand_get_pg_size(ctrl);
659
660 /* for SPL we don't care spare area. Only need to know the bbi location */
661 reg_val = brcmnand_read_reg(ctrl, BRCMNAND_CS_ACC_CONTROL);
662 chip->chip_ecc_level = ecc_lvl = (reg_val & NAC_ECC_LVL_MASK) >> NAC_ECC_LVL_SHIFT;
663
664 /* The access control register spare size is the number of spare area
665 * bytes per 512 bytes of data. The chip_spare_size is the number
666 * of spare area bytes per page.
667 */
668 chip->chip_spare_step_size = ((reg_val & NAC_SPARE_SZ_MASK) >> NAC_SPARE_SZ_SHIFT);
669 chip->chip_spare_size = chip->chip_spare_step_size * (chip->chip_page_size >> 9);
670
671 if (ecc_lvl == NAC_ECC_LVL_HAMMING) {
672 if (chip->chip_page_size == 512) {
673 chip->chip_bi_index_1 = chip->chip_bi_index_2 = 5;
674 } else {
675 chip->chip_bi_index_1 = 0;
676 chip->chip_bi_index_2 = 1;
677 }
678 } else if (ecc_lvl == NAC_ECC_LVL_BCH_4 && chip->chip_page_size == 512) {
679 chip->chip_bi_index_1 = chip->chip_bi_index_2 = 5;
680 } else
681 chip->chip_bi_index_1 = chip->chip_bi_index_2 = 0;
682
683 chip->sector_size_1k = (reg_val & NAC_SECTOR_SIZE_1K) ? 1 : 0;
684
685 brcmnand_check_onfi(chip, ctrl);
686
687 return 0;
688 }
689
690 static void brcmnand_adjust_timing(struct brcmnand_chip *chip)
691 {
692 struct brcmnand_controller *ctrl = chip->ctrl;
693
694 /* adjust reading timing */
695 /* Default of TRP=4 and TREAD=5 for Hynix parts on 63268 */
696 /* Almost all parts could use TRP=3 and TREAD=4 */
697
698 brcmnand_rmw_reg(ctrl, BRCMNAND_TIMING1, NT_TREH_MASK, NT_TREH_SHIFT, 2);
699 brcmnand_rmw_reg(ctrl, BRCMNAND_TIMING1, NT_TRP_MASK, NT_TRP_SHIFT, 4);
700
701 brcmnand_rmw_reg(ctrl, BRCMNAND_TIMING2, NT_TREAD_MASK, NT_TREAD_SHIFT, 5);
702 }
703
704 /* Simplified version for bad block marker read. Only read the first spare area step size */
705 /* This is good enough for BBM */
706 static int brcmnand_read_spare_area(struct brcmnand_chip *chip, uint64_t page_addr, unsigned char *buffer, int len)
707 {
708 int ret = -EIO;
709 struct brcmnand_controller *ctrl = chip->ctrl;
710
711 if (len > chip->chip_spare_step_size)
712 len = chip->chip_spare_step_size;
713
714 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, (uint32_t) page_addr);
715 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, (uint32_t) (page_addr >> 32));
716 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, CMD_PAGE_READ);
717
718 if ((ret = brcmnand_wait_cmd(ctrl)) == 0) {
719 /* wait until data is available in the spare area registers */
720 if ((ret = brcmnand_wait_spare(ctrl)) == 0)
721 brcmnand_copy_from_spare(ctrl, buffer, len);
722 }
723
724 return ret;
725 }
726
727 static int brcmnand_read_page(struct brcmnand_chip *chip, uint64_t start_addr, unsigned char *buffer, int len)
728 {
729 int ret = -EIO;
730 struct brcmnand_controller *ctrl = chip->ctrl;
731
732 if (len <= chip->chip_block_size) {
733 uint64_t page_addr = start_addr & ~(chip->chip_page_size - 1);
734 uint32_t index = 0;
735 uint32_t subpage;
736 int length = len;
737
738 do {
739 for (subpage = 0, ret = 0; (subpage < chip->chip_page_size) && (ret == 0); subpage += CTRLR_CACHE_SIZE) {
740 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, (uint32_t) page_addr + subpage);
741 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, (uint32_t) (page_addr >> 32));
742 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, CMD_PAGE_READ);
743
744 if ((ret = brcmnand_wait_cmd(ctrl)) == 0) {
745 /* wait until data is available in the cache */
746 if ((ret = brcmnand_wait_cache(ctrl)) != 0) {
747 ret = -EIO;
748 }
749
750 if ((ret == 0) && (start_addr < (page_addr + subpage + CTRLR_CACHE_SIZE)) && ((start_addr + len) > page_addr + subpage)) { // copy from cache only if buffer is within the subpage
751 uint32_t copy_size, offset;
752
753 if (start_addr <= page_addr + subpage) {
754 offset = 0;
755
756 if ((start_addr + len) >= (page_addr + subpage + CTRLR_CACHE_SIZE))
757 copy_size = CTRLR_CACHE_SIZE;
758 else
759 copy_size = (start_addr + len) - (page_addr + subpage);
760 } else { // start_addr > page_addr + subpage
761 offset = start_addr - (page_addr + subpage);
762
763 if ((start_addr + len) >= (page_addr + subpage + CTRLR_CACHE_SIZE))
764 copy_size = page_addr + subpage + CTRLR_CACHE_SIZE - start_addr;
765 else
766 copy_size = start_addr + len - start_addr;
767 }
768
769 brcmnand_copy_from_cache(ctrl, &buffer[index], offset, copy_size);
770
771 index += copy_size;
772 length -= copy_size;
773 }
774 }
775 }
776
777 if (ret != 0)
778 break;
779
780 page_addr += chip->chip_page_size;
781
782 } while (length);
783 }
784
785 return (ret);
786 }
787
788 void brcmnand_init(void)
789 {
790 struct brcmnand_chip *chip = &nand_chip;
791 struct brcmnand_controller *ctrl = &nand_ctrl;
792
793 ctrl->flash_cache = memalign(sizeof(uint32_t), CTRLR_CACHE_SIZE);
794 if (ctrl->flash_cache == NULL) {
795 printf("nand_flash_init failed to allocate flash buffer!\n");
796 hang();
797 }
798
799 /* TODO get this base from device tree */
800 ctrl->nand_base = (void __iomem *)CONFIG_SYS_NAND_BASE;
801 ctrl->nand_fc = (void __iomem *)(CONFIG_SYS_NAND_BASE + 0x400);
802
803 chip->ctrl = ctrl;
804 brcmnand_revision_init(ctrl);
805 brcmnand_reset_device(ctrl);
806
807 /* Read the chip id. Only use the most signficant 16 bits. */
808 chip->chip_device_id = brcmnand_read_reg(ctrl, BRCMNAND_ID);
809 brcmnand_adjust_cfg(chip);
810 brcmnand_adjust_timing(chip);
811
812 printf("nand flash device id 0x%x, total size %dMB\n", chip->chip_device_id, (uint32_t) (chip->chip_total_size >> 20));
813 printf("block size %dKB, page size %d bytes, spare area %d bytes\n",
814 chip->chip_block_size >> 10, chip->chip_page_size, chip->chip_spare_size);
815 if (chip->chip_ecc_level == 0)
816 printf("ECC disabled\n");
817 else if (chip->chip_ecc_level == NAC_ECC_LVL_HAMMING)
818 printf("ECC Hamming\n");
819 else {
820 printf("ECC BCH-%d", chip->chip_ecc_level << chip->sector_size_1k);
821 printf(" %s\n", chip->sector_size_1k ? "(1KB sector)" : "");
822 }
823 }
824
825 /* Check if the block is good or bad. If bad returns 1, if good returns 0 */
826 int brcmnand_is_bad_block(int blk)
827 {
828 struct brcmnand_chip *chip = &nand_chip;
829 unsigned char spare[16];
830 uint32_t page_addr = (blk * chip->chip_block_size) & ~(chip->chip_page_size - 1);
831 int i, size;
832
833 // always return good for block 0, because if it's a bad chip quite possibly the board is useless
834 if (blk == 0)
835 return 0;
836
837 /* bad block markers are always within first spare area step size. only need to read this many bytes */
838 size = max(chip->chip_bi_index_1, chip->chip_bi_index_2) + 1;
839 if (size > chip->chip_spare_step_size || size > 16) {
840 printf("bad block marker invalid location %d %d\n",
841 chip->chip_bi_index_1, chip->chip_bi_index_2);
842 return 1;
843 }
844
845 /* Read the spare area of first and second page and check for bad block indicator */
846 for (i = 0; i < 2; i += 1, page_addr += chip->chip_page_size) {
847 if (brcmnand_read_spare_area(chip, page_addr, spare, size) == 0) {
848 if ((spare[chip->chip_bi_index_1] != SPARE_GOOD_MARKER)
849 || (spare[chip->chip_bi_index_2] != SPARE_GOOD_MARKER)) {
850 return 1; // bad block
851 }
852 } else {
853 return 1; //bad block
854 }
855 }
856
857 return 0; // good block
858 }
859
860 int brcmnand_read_buf(int blk, int offset, u8 *buffer, u32 len)
861 {
862 int ret;
863 struct brcmnand_chip *chip = &nand_chip;
864 uint64_t start_addr;
865 uint64_t blk_addr;
866 uint32_t blk_offset;
867 uint32_t size;
868 uint32_t total_block = chip->chip_total_size / chip->chip_block_size;
869
870 ret = len;
871
872 debug(">> brcmnand_read_buf - 1 blk=0x%8.8x, offset=%d, len=%d buffer 0x%p\n", blk, offset, len, buffer);
873
874 start_addr = (blk * chip->chip_block_size) + offset;
875 blk_addr = start_addr & ~(chip->chip_block_size - 1);
876 blk_offset = start_addr - blk_addr;
877 size = chip->chip_block_size - blk_offset;
878
879 if (size > len)
880 size = len;
881
882 if (blk >= total_block) {
883 printf("Attempt to read block number(%d) beyond the nand max blk(%d) \n", blk, total_block - 1);
884 return -EINVAL;
885 }
886
887 if (len)
888 do {
889 if (brcmnand_read_page(chip, start_addr, buffer, size) != 0) {
890 ret = -EIO;
891 break;
892 }
893
894 len -= size;
895 if (len) {
896 blk++;
897
898 debug(">> brcmnand_read_buf - 2 blk=0x%8.8x, len=%u\n", blk, len);
899
900 start_addr = blk * chip->chip_block_size;
901 buffer += size;
902 if (len > chip->chip_block_size)
903 size = chip->chip_block_size;
904 else
905 size = len;
906 }
907 } while (len);
908
909 if (brcmnand_is_bad_block(blk)) { /* don't check for bad block during page read/write since may be reading/writing to bad block marker,
910 check for bad block after read to allow for data recovery */
911 printf("brcmnand_read_buf(): Attempt to read bad nand block %d\n", blk);
912 return -EIO;
913 }
914
915 debug(">> brcmnand_read_buf - ret=%d\n", ret);
916
917 return (ret);
918 }
919
920 uint32_t brcmnand_get_page_size(void)
921 {
922 return (nand_chip.chip_page_size);
923 }
924
925 uint32_t brcmnand_get_block_size(void)
926 {
927 return (nand_chip.chip_block_size);
928 }
929
930 uint64_t brcmnand_get_total_size(void)
931 {
932 return nand_chip.chip_total_size;
933 }
934