1 From 0e1c4e3c97b83b4e7da65b1c56f0a7d40736ac53 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sun, 27 Jul 2014 11:05:17 +0100
4 Subject: [PATCH 39/53] mtd: add mt7621 nand support
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/mtd/nand/Kconfig | 6 +
9 drivers/mtd/nand/Makefile | 1 +
10 drivers/mtd/nand/bmt.c | 750 ++++++++++++
11 drivers/mtd/nand/bmt.h | 80 ++
12 drivers/mtd/nand/dev-nand.c | 63 +
13 drivers/mtd/nand/mt6575_typedefs.h | 340 ++++++
14 drivers/mtd/nand/mtk_nand2.c | 2304 +++++++++++++++++++++++++++++++++++
15 drivers/mtd/nand/mtk_nand2.h | 452 +++++++
16 drivers/mtd/nand/nand_base.c | 6 +-
17 drivers/mtd/nand/nand_bbt.c | 19 +
18 drivers/mtd/nand/nand_def.h | 123 ++
19 drivers/mtd/nand/nand_device_list.h | 55 +
20 drivers/mtd/nand/partition.h | 115 ++
21 13 files changed, 4311 insertions(+), 3 deletions(-)
22 create mode 100644 drivers/mtd/nand/bmt.c
23 create mode 100644 drivers/mtd/nand/bmt.h
24 create mode 100644 drivers/mtd/nand/dev-nand.c
25 create mode 100644 drivers/mtd/nand/mt6575_typedefs.h
26 create mode 100644 drivers/mtd/nand/mtk_nand2.c
27 create mode 100644 drivers/mtd/nand/mtk_nand2.h
28 create mode 100644 drivers/mtd/nand/nand_def.h
29 create mode 100644 drivers/mtd/nand/nand_device_list.h
30 create mode 100644 drivers/mtd/nand/partition.h
32 --- a/drivers/mtd/nand/Kconfig
33 +++ b/drivers/mtd/nand/Kconfig
34 @@ -563,4 +563,10 @@ config MTD_NAND_MTK
35 Enables support for NAND controller on MTK SoCs.
36 This controller is found on mt27xx, mt81xx, mt65xx SoCs.
39 + tristate "Support for MTK SoC NAND controller"
40 + depends on SOC_MT7621
45 --- a/drivers/mtd/nand/Makefile
46 +++ b/drivers/mtd/nand/Makefile
47 @@ -60,6 +60,7 @@ obj-$(CONFIG_MTD_NAND_HISI504) +
48 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
49 obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
50 obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
51 +obj-$(CONFIG_MTK_MTD_NAND) += mtk_nand2.o bmt.o
53 nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
54 nand-objs += nand_amd.o
56 +++ b/drivers/mtd/nand/bmt.c
64 + u8 bad_count; // bad block count in pool
65 + u8 mapped_count; // mapped block count in pool
72 + phys_bmt_header header;
73 + bmt_entry table[MAX_BMT_SIZE];
81 +static char MAIN_SIGNATURE[] = "BMT";
82 +static char OOB_SIGNATURE[] = "bmt";
83 +#define SIGNATURE_SIZE (3)
85 +#define MAX_DAT_SIZE 0x1000
86 +#define MAX_OOB_SIZE 0x80
88 +static struct mtd_info *mtd_bmt;
89 +static struct nand_chip *nand_chip_bmt;
90 +#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
91 +#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
93 +#define OFFSET(block) ((block) * BLOCK_SIZE_BMT)
94 +#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT)
96 +/*********************************************************************
97 +* Flash is splited into 2 parts, system part is for normal system *
98 +* system usage, size is system_block_count, another is replace pool *
99 +* +-------------------------------------------------+ *
100 +* | system_block_count | bmt_block_count | *
101 +* +-------------------------------------------------+ *
102 +*********************************************************************/
103 +static u32 total_block_count; // block number in flash
104 +static u32 system_block_count;
105 +static int bmt_block_count; // bmt table size
106 +// static int bmt_count; // block used in bmt
107 +static int page_per_block; // page per count
109 +static u32 bmt_block_index; // bmt block index
110 +static bmt_struct bmt; // dynamic created global bmt table
112 +static u8 dat_buf[MAX_DAT_SIZE];
113 +static u8 oob_buf[MAX_OOB_SIZE];
114 +static bool pool_erased;
116 +/***************************************************************
118 +* Interface adaptor for preloader/uboot/kernel
119 +* These interfaces operate on physical address, read/write
122 +***************************************************************/
123 +int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob)
125 + return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
128 +bool nand_block_bad_bmt(u32 offset)
130 + return mtk_nand_block_bad_hw(mtd_bmt, offset);
133 +bool nand_erase_bmt(u32 offset)
136 + if (offset < 0x20000)
138 + MSG(INIT, "erase offset: 0x%x\n", offset);
141 + status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined
142 + if (status & NAND_STATUS_FAIL)
148 +int mark_block_bad_bmt(u32 offset)
150 + return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset);
153 +bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob)
155 + if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
161 +/***************************************************************
163 +* static internal function *
165 +***************************************************************/
166 +static void dump_bmt_info(bmt_struct * bmt)
170 + MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
171 + for (i = 0; i < bmt->mapped_count; i++)
173 + MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
177 +static bool match_bmt_signature(u8 * dat, u8 * oob)
180 + if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
185 + if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
187 + MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
192 +static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size)
196 + u8 *dat = (u8 *) phys_table;
198 + checksum += phys_table->header.version;
199 + checksum += phys_table->header.mapped_count;
201 + dat += sizeof(phys_bmt_header);
202 + for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
204 + checksum += dat[i];
211 +static int is_block_mapped(int index)
214 + for (i = 0; i < bmt.mapped_count; i++)
216 + if (index == bmt.table[i].mapped_index)
222 +static bool is_page_used(u8 * dat, u8 * oob)
224 + return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
227 +static bool valid_bmt_data(phys_bmt_struct * phys_table)
230 + u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
232 + // checksum correct?
233 + if (phys_table->header.checksum != checksum)
235 + MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum);
239 + MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
241 + // block index correct?
242 + for (i = 0; i < phys_table->header.mapped_count; i++)
244 + if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count)
246 + MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
251 + // pass check, valid bmt.
252 + MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
256 +static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob)
258 + phys_bmt_struct phys_bmt;
260 + dump_bmt_info(bmt);
262 + // fill phys_bmt_struct structure with bmt_struct
263 + memset(&phys_bmt, 0xFF, sizeof(phys_bmt));
265 + memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
266 + phys_bmt.header.version = BMT_VERSION;
267 + // phys_bmt.header.bad_count = bmt->bad_count;
268 + phys_bmt.header.mapped_count = bmt->mapped_count;
269 + memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
271 + phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count);
273 + memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt));
274 + memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
277 +// return valid index if found BMT, else return 0
278 +static int load_bmt_data(int start, int pool_size)
280 + int bmt_index = start + pool_size - 1; // find from the end
281 + phys_bmt_struct phys_table;
284 + MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index);
286 + for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
288 + if (nand_block_bad_bmt(OFFSET(bmt_index)))
290 + MSG(INIT, "Skip bad block: %d\n", bmt_index);
294 + if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
296 + MSG(INIT, "Error found when read block %d\n", bmt_index);
300 + if (!match_bmt_signature(dat_buf, oob_buf))
305 + MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index);
307 + memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table));
309 + if (!valid_bmt_data(&phys_table))
311 + MSG(INIT, "BMT data is not correct %d\n", bmt_index);
315 + bmt.mapped_count = phys_table.header.mapped_count;
316 + bmt.version = phys_table.header.version;
317 + // bmt.bad_count = phys_table.header.bad_count;
318 + memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry));
320 + MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
322 + for (i = 0; i < bmt.mapped_count; i++)
324 + if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index)))
326 + MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
327 + mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
335 + MSG(INIT, "bmt block not found!\n");
339 +/*************************************************************************
340 +* Find an available block and erase. *
341 +* start_from_end: if true, find available block from end of flash. *
342 +* else, find from the beginning of the pool *
343 +* need_erase: if true, all unmapped blocks in the pool will be erased *
344 +*************************************************************************/
345 +static int find_available_block(bool start_from_end)
348 + int block = system_block_count;
350 + // int avail_index = 0;
351 + MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
353 + // erase all un-mapped blocks in pool when finding avaliable block
356 + MSG(INIT, "Erase all un-mapped blocks in pool\n");
357 + for (i = 0; i < bmt_block_count; i++)
359 + if (block == bmt_block_index)
361 + MSG(INIT, "Skip bmt block 0x%x\n", block);
365 + if (nand_block_bad_bmt(OFFSET(block + i)))
367 + MSG(INIT, "Skip bad block 0x%x\n", block + i);
375 + if (is_block_mapped(block + i) >= 0)
377 + MSG(INIT, "Skip mapped block 0x%x\n", block + i);
381 + if (!nand_erase_bmt(OFFSET(block + i)))
383 + MSG(INIT, "Erase block 0x%x failed\n", block + i);
384 + mark_block_bad_bmt(OFFSET(block + i));
391 + if (start_from_end)
393 + block = total_block_count - 1;
397 + block = system_block_count;
401 + for (i = 0; i < bmt_block_count; i++, block += direction)
403 + if (block == bmt_block_index)
405 + MSG(INIT, "Skip bmt block 0x%x\n", block);
409 + if (nand_block_bad_bmt(OFFSET(block)))
411 + MSG(INIT, "Skip bad block 0x%x\n", block);
415 + if (is_block_mapped(block) >= 0)
417 + MSG(INIT, "Skip mapped block 0x%x\n", block);
421 + MSG(INIT, "Find block 0x%x available\n", block);
428 +static unsigned short get_bad_index_from_oob(u8 * oob_buf)
430 + unsigned short index;
431 + memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
436 +void set_bad_index_to_oob(u8 * oob, u16 index)
438 + memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
441 +static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob)
444 + int error_block = offset / BLOCK_SIZE_BMT;
445 + int error_page = (offset / PAGE_SIZE_BMT) % page_per_block;
448 + memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
450 + to_index = find_available_block(false);
454 + MSG(INIT, "Cannot find an available block for BMT\n");
458 + { // migrate error page first
459 + MSG(INIT, "Write error page: 0x%x\n", error_page);
462 + nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
463 + write_dat = dat_buf;
465 + // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
467 + if (error_block < system_block_count)
468 + set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
470 + if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
472 + MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
473 + mark_block_bad_bmt(to_index);
474 + return migrate_from_bad(offset, write_dat, write_oob);
478 + for (page = 0; page < page_per_block; page++)
480 + if (page != error_page)
482 + nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
483 + if (is_page_used(dat_buf, oob_buf))
485 + if (error_block < system_block_count)
487 + set_bad_index_to_oob(oob_buf, error_block);
489 + MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
490 + if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
492 + MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page);
493 + mark_block_bad_bmt(to_index);
494 + return migrate_from_bad(offset, write_dat, write_oob);
500 + MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
505 +static bool write_bmt_to_flash(u8 * dat, u8 * oob)
507 + bool need_erase = true;
508 + MSG(INIT, "Try to write BMT\n");
510 + if (bmt_block_index == 0)
512 + // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
513 + need_erase = false;
514 + if (!(bmt_block_index = find_available_block(true)))
516 + MSG(INIT, "Cannot find an available block for BMT\n");
521 + MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
523 + // write bmt to flash
526 + if (!nand_erase_bmt(OFFSET(bmt_block_index)))
528 + MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
529 + mark_block_bad_bmt(OFFSET(bmt_block_index));
530 + // bmt.bad_count++;
532 + bmt_block_index = 0;
533 + return write_bmt_to_flash(dat, oob); // recursive call
537 + if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob))
539 + MSG(INIT, "Write BMT data fail, need to write again\n");
540 + mark_block_bad_bmt(OFFSET(bmt_block_index));
541 + // bmt.bad_count++;
543 + bmt_block_index = 0;
544 + return write_bmt_to_flash(dat, oob); // recursive call
547 + MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
551 +/*******************************************************************
552 +* Reconstruct bmt, called when found bmt info doesn't match bad
553 +* block info in flash.
555 +* Return NULL for failure
556 +*******************************************************************/
557 +bmt_struct *reconstruct_bmt(bmt_struct * bmt)
560 + int index = system_block_count;
561 + unsigned short bad_index;
564 + // init everything in BMT struct
565 + bmt->version = BMT_VERSION;
566 + bmt->bad_count = 0;
567 + bmt->mapped_count = 0;
569 + memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
571 + for (i = 0; i < bmt_block_count; i++, index++)
573 + if (nand_block_bad_bmt(OFFSET(index)))
575 + MSG(INIT, "Skip bad block: 0x%x\n", index);
576 + // bmt->bad_count++;
580 + MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
581 + nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
582 + /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
584 + MSG(INIT, "Error when read block %d\n", bmt_block_index);
588 + if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
590 + MSG(INIT, "get bad index: 0x%x\n", bad_index);
591 + if (bad_index != 0xFFFF)
592 + MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index);
596 + MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
598 + if (!nand_block_bad_bmt(OFFSET(bad_index)))
600 + MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index);
601 + continue; // no need to erase here, it will be erased later when trying to write BMT
604 + if ((mapped = is_block_mapped(bad_index)) >= 0)
606 + MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
607 + bmt->table[mapped].mapped_index = index; // use new one instead.
610 + // add mapping to BMT
611 + bmt->table[bmt->mapped_count].bad_index = bad_index;
612 + bmt->table[bmt->mapped_count].mapped_index = index;
613 + bmt->mapped_count++;
616 + MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
620 + MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
621 + // dump_bmt_info(bmt);
623 + // fill NAND BMT buffer
624 + memset(oob_buf, 0xFF, sizeof(oob_buf));
625 + fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
628 + if (!write_bmt_to_flash(dat_buf, oob_buf))
630 + MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
636 +/*******************************************************************
640 +* Init bmt from nand. Reconstruct if not found or data error
643 +* size: size of bmt and replace pool
646 +* NULL for failure, and a bmt struct for success
647 +*******************************************************************/
648 +bmt_struct *init_bmt(struct nand_chip * chip, int size)
650 + struct mtk_nand_host *host;
652 + if (size > 0 && size < MAX_BMT_SIZE)
654 + MSG(INIT, "Init bmt table, size: %d\n", size);
655 + bmt_block_count = size;
658 + MSG(INIT, "Invalid bmt table size: %d\n", size);
661 + nand_chip_bmt = chip;
662 + system_block_count = chip->chipsize >> chip->phys_erase_shift;
663 + total_block_count = bmt_block_count + system_block_count;
664 + page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
665 + host = (struct mtk_nand_host *)chip->priv;
666 + mtd_bmt = host->mtd;
668 + MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
669 + MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
671 + // set this flag, and unmapped block in pool will be erased.
673 + memset(bmt.table, 0, size * sizeof(bmt_entry));
674 + if ((bmt_block_index = load_bmt_data(system_block_count, size)))
676 + MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
677 + dump_bmt_info(&bmt);
681 + MSG(INIT, "Load bmt data fail, need re-construct!\n");
682 +#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT.
683 + if (reconstruct_bmt(&bmt))
691 +/*******************************************************************
698 +* offset: update block/page offset.
699 +* reason: update reason, see update_reason_t for reason.
700 +* dat/oob: data and oob buffer for write fail.
703 +* Return true for success, and false for failure.
704 +*******************************************************************/
705 +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob)
708 + int orig_bad_block = -1;
709 + // int bmt_update_index;
711 + int bad_index = offset / BLOCK_SIZE_BMT;
713 +#ifndef MTK_NAND_BMT
716 + if (reason == UPDATE_WRITE_FAIL)
718 + MSG(INIT, "Write fail, need to migrate\n");
719 + if (!(map_index = migrate_from_bad(offset, dat, oob)))
721 + MSG(INIT, "migrate fail\n");
726 + if (!(map_index = find_available_block(false)))
728 + MSG(INIT, "Cannot find block in pool\n");
733 + // now let's update BMT
734 + if (bad_index >= system_block_count) // mapped block become bad, find original bad block
736 + for (i = 0; i < bmt_block_count; i++)
738 + if (bmt.table[i].mapped_index == bad_index)
740 + orig_bad_block = bmt.table[i].bad_index;
744 + // bmt.bad_count++;
745 + MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
747 + bmt.table[i].mapped_index = map_index;
750 + bmt.table[bmt.mapped_count].mapped_index = map_index;
751 + bmt.table[bmt.mapped_count].bad_index = bad_index;
752 + bmt.mapped_count++;
755 + memset(oob_buf, 0xFF, sizeof(oob_buf));
756 + fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
757 + if (!write_bmt_to_flash(dat_buf, oob_buf))
760 + mark_block_bad_bmt(offset);
765 +/*******************************************************************
769 +* Given an block index, return mapped index if it's mapped, else
770 +* return given index.
773 +* index: given an block index. This value cannot exceed
774 +* system_block_count.
776 +* Return NULL for failure
777 +*******************************************************************/
778 +u16 get_mapping_block_index(int index)
781 +#ifndef MTK_NAND_BMT
784 + if (index > system_block_count)
789 + for (i = 0; i < bmt.mapped_count; i++)
791 + if (bmt.table[i].bad_index == index)
793 + return bmt.table[i].mapped_index;
799 +#ifdef __KERNEL_NAND__
800 +EXPORT_SYMBOL_GPL(init_bmt);
801 +EXPORT_SYMBOL_GPL(update_bmt);
802 +EXPORT_SYMBOL_GPL(get_mapping_block_index);
804 +MODULE_LICENSE("GPL");
805 +MODULE_AUTHOR("MediaTek");
806 +MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");
809 +++ b/drivers/mtd/nand/bmt.h
814 +#include "nand_def.h"
816 +#if defined(__PRELOADER_NAND__)
820 +#elif defined(__UBOOT_NAND__)
822 +#include <linux/mtd/nand.h>
823 +#include "mtk_nand2.h"
825 +#elif defined(__KERNEL_NAND__)
827 +#include <linux/mtd/mtd.h>
828 +#include <linux/mtd/rawnand.h>
829 +#include <linux/module.h>
830 +#include "mtk_nand2.h"
835 +#define MAX_BMT_SIZE (0x80)
836 +#define BMT_VERSION (1) // initial version
838 +#define MAIN_SIGNATURE_OFFSET (0)
839 +#define OOB_SIGNATURE_OFFSET (1)
840 +#define OOB_INDEX_OFFSET (29)
841 +#define OOB_INDEX_SIZE (2)
842 +#define FAKE_INDEX (0xAAAA)
844 +typedef struct _bmt_entry_
846 + u16 bad_index; // bad block index
847 + u16 mapped_index; // mapping block index in the replace pool
854 + UPDATE_UNMAPPED_BLOCK,
855 + UPDATE_REASON_COUNT,
860 + bmt_entry table[MAX_BMT_SIZE];
862 + u8 mapped_count; // mapped block count in pool
863 + u8 bad_count; // bad block count in pool. Not used in V1
866 +/***************************************************************
868 +* Interface BMT need to use *
870 +***************************************************************/
871 +extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
872 +extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs);
873 +extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page);
874 +extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs);
875 +extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
878 +/***************************************************************
880 +* Different function interface for preloader/uboot/kernel *
882 +***************************************************************/
883 +void set_bad_index_to_oob(u8 * oob, u16 index);
886 +bmt_struct *init_bmt(struct nand_chip *nand, int size);
887 +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob);
888 +unsigned short get_mapping_block_index(int index);
890 +#endif // #ifndef __BMT_H__
892 +++ b/drivers/mtd/nand/dev-nand.c
894 +#include <linux/init.h>
895 +#include <linux/kernel.h>
896 +#include <linux/platform_device.h>
898 +#include "mt6575_typedefs.h"
900 +#define RALINK_NAND_CTRL_BASE 0xBE003000
901 +#define NFI_base RALINK_NAND_CTRL_BASE
902 +#define RALINK_NANDECC_CTRL_BASE 0xBE003800
903 +#define NFIECC_base RALINK_NANDECC_CTRL_BASE
904 +#define MT7621_NFI_IRQ_ID SURFBOARDINT_NAND
905 +#define MT7621_NFIECC_IRQ_ID SURFBOARDINT_NAND_ECC
907 +#define SURFBOARDINT_NAND 22
908 +#define SURFBOARDINT_NAND_ECC 23
910 +static struct resource MT7621_resource_nand[] = {
913 + .end = NFI_base + 0x1A0,
914 + .flags = IORESOURCE_MEM,
917 + .start = NFIECC_base,
918 + .end = NFIECC_base + 0x150,
919 + .flags = IORESOURCE_MEM,
922 + .start = MT7621_NFI_IRQ_ID,
923 + .flags = IORESOURCE_IRQ,
926 + .start = MT7621_NFIECC_IRQ_ID,
927 + .flags = IORESOURCE_IRQ,
931 +static struct platform_device MT7621_nand_dev = {
932 + .name = "MT7621-NAND",
934 + .num_resources = ARRAY_SIZE(MT7621_resource_nand),
935 + .resource = MT7621_resource_nand,
937 + .platform_data = &mt7621_nand_hw,
942 +int __init mtk_nand_register(void)
947 + retval = platform_device_register(&MT7621_nand_dev);
949 + printk(KERN_ERR "register nand device fail\n");
956 +arch_initcall(mtk_nand_register);
958 +++ b/drivers/mtd/nand/mt6575_typedefs.h
960 +/* Copyright Statement:
962 + * This software/firmware and related documentation ("MediaTek Software") are
963 + * protected under relevant copyright laws. The information contained herein
964 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
965 + * Without the prior written permission of MediaTek inc. and/or its licensors,
966 + * any reproduction, modification, use or disclosure of MediaTek Software,
967 + * and information contained herein, in whole or in part, shall be strictly prohibited.
969 +/* MediaTek Inc. (C) 2010. All rights reserved.
971 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
972 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
973 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
974 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
975 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
976 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
977 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
978 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
979 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
980 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
981 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
982 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
983 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
984 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
985 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
986 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
987 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
988 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
990 + * The following software/firmware and/or related documentation ("MediaTek Software")
991 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
992 + * applicable license agreements with MediaTek Inc.
995 +/*****************************************************************************
996 +* Copyright Statement:
997 +* --------------------
998 +* This software is protected by Copyright and the information contained
999 +* herein is confidential. The software may not be copied and the information
1000 +* contained herein may not be used or disclosed except with the written
1001 +* permission of MediaTek Inc. (C) 2008
1003 +* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
1004 +* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
1005 +* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
1006 +* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
1007 +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
1008 +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
1009 +* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
1010 +* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
1011 +* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
1012 +* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
1013 +* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
1014 +* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
1016 +* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
1017 +* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
1018 +* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
1019 +* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
1020 +* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
1022 +* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
1023 +* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
1024 +* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
1025 +* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
1026 +* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
1028 +*****************************************************************************/
1030 +#ifndef _MT6575_TYPEDEFS_H
1031 +#define _MT6575_TYPEDEFS_H
1033 +#if defined (__KERNEL_NAND__)
1034 +#include <linux/bug.h>
1041 +// ---------------------------------------------------------------------------
1042 +// Basic Type Definitions
1043 +// ---------------------------------------------------------------------------
1045 +typedef volatile unsigned char *P_kal_uint8;
1046 +typedef volatile unsigned short *P_kal_uint16;
1047 +typedef volatile unsigned int *P_kal_uint32;
1050 +typedef unsigned char UBYTE;
1051 +typedef short SHORT;
1053 +typedef signed char kal_int8;
1054 +typedef signed short kal_int16;
1055 +typedef signed int kal_int32;
1056 +typedef long long kal_int64;
1057 +typedef unsigned char kal_uint8;
1058 +typedef unsigned short kal_uint16;
1059 +typedef unsigned int kal_uint32;
1060 +typedef unsigned long long kal_uint64;
1061 +typedef char kal_char;
1063 +typedef unsigned int *UINT32P;
1064 +typedef volatile unsigned short *UINT16P;
1065 +typedef volatile unsigned char *UINT8P;
1066 +typedef unsigned char *U8P;
1068 +typedef volatile unsigned char *P_U8;
1069 +typedef volatile signed char *P_S8;
1070 +typedef volatile unsigned short *P_U16;
1071 +typedef volatile signed short *P_S16;
1072 +typedef volatile unsigned int *P_U32;
1073 +typedef volatile signed int *P_S32;
1074 +typedef unsigned long long *P_U64;
1075 +typedef signed long long *P_S64;
1077 +typedef unsigned char U8;
1078 +typedef signed char S8;
1079 +typedef unsigned short U16;
1080 +typedef signed short S16;
1081 +typedef unsigned int U32;
1082 +typedef signed int S32;
1083 +typedef unsigned long long U64;
1084 +typedef signed long long S64;
1085 +//typedef unsigned char bool;
1087 +typedef unsigned char UINT8;
1088 +typedef unsigned short UINT16;
1089 +typedef unsigned int UINT32;
1090 +typedef unsigned short USHORT;
1091 +typedef signed char INT8;
1092 +typedef signed short INT16;
1093 +typedef signed int INT32;
1094 +typedef unsigned int DWORD;
1096 +typedef unsigned char BYTE;
1097 +typedef float FLOAT;
1099 +typedef char *LPCSTR;
1100 +typedef short *LPWSTR;
1103 +// ---------------------------------------------------------------------------
1105 +// ---------------------------------------------------------------------------
1107 +#define IMPORT EXTERN
1108 +#ifndef __cplusplus
1109 + #define EXTERN extern
1111 + #define EXTERN extern "C"
1113 +#define LOCAL static
1115 +#define EXPORT GLOBAL
1121 +#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B)))
1135 +//enum boolean {false, true};
1136 +enum {RX, TX, NONE};
1139 +typedef unsigned char BOOL;
1148 +// ---------------------------------------------------------------------------
1150 +// ---------------------------------------------------------------------------
1152 +#define AS_INT32(x) (*(INT32 *)((void*)x))
1153 +#define AS_INT16(x) (*(INT16 *)((void*)x))
1154 +#define AS_INT8(x) (*(INT8 *)((void*)x))
1156 +#define AS_UINT32(x) (*(UINT32 *)((void*)x))
1157 +#define AS_UINT16(x) (*(UINT16 *)((void*)x))
1158 +#define AS_UINT8(x) (*(UINT8 *)((void*)x))
1161 +// ---------------------------------------------------------------------------
1162 +// Register Manipulations
1163 +// ---------------------------------------------------------------------------
1165 +#define READ_REGISTER_UINT32(reg) \
1166 + (*(volatile UINT32 * const)(reg))
1168 +#define WRITE_REGISTER_UINT32(reg, val) \
1169 + (*(volatile UINT32 * const)(reg)) = (val)
1171 +#define READ_REGISTER_UINT16(reg) \
1172 + (*(volatile UINT16 * const)(reg))
1174 +#define WRITE_REGISTER_UINT16(reg, val) \
1175 + (*(volatile UINT16 * const)(reg)) = (val)
1177 +#define READ_REGISTER_UINT8(reg) \
1178 + (*(volatile UINT8 * const)(reg))
1180 +#define WRITE_REGISTER_UINT8(reg, val) \
1181 + (*(volatile UINT8 * const)(reg)) = (val)
1183 +#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x)))
1184 +#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y))
1185 +#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y))
1186 +#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y))
1187 +#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z))
1189 +#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x)))
1190 +#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y))
1191 +#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y))
1192 +#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y))
1193 +#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z))
1195 +#define INREG32(x) READ_REGISTER_UINT32((UINT32*)((void*)(x)))
1196 +#define OUTREG32(x, y) WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y))
1197 +#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y))
1198 +#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y))
1199 +#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z))
1202 +#define DRV_Reg8(addr) INREG8(addr)
1203 +#define DRV_WriteReg8(addr, data) OUTREG8(addr, data)
1204 +#define DRV_SetReg8(addr, data) SETREG8(addr, data)
1205 +#define DRV_ClrReg8(addr, data) CLRREG8(addr, data)
1207 +#define DRV_Reg16(addr) INREG16(addr)
1208 +#define DRV_WriteReg16(addr, data) OUTREG16(addr, data)
1209 +#define DRV_SetReg16(addr, data) SETREG16(addr, data)
1210 +#define DRV_ClrReg16(addr, data) CLRREG16(addr, data)
1212 +#define DRV_Reg32(addr) INREG32(addr)
1213 +#define DRV_WriteReg32(addr, data) OUTREG32(addr, data)
1214 +#define DRV_SetReg32(addr, data) SETREG32(addr, data)
1215 +#define DRV_ClrReg32(addr, data) CLRREG32(addr, data)
1217 +// !!! DEPRECATED, WILL BE REMOVED LATER !!!
1218 +#define DRV_Reg(addr) DRV_Reg16(addr)
1219 +#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data)
1220 +#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data)
1221 +#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data)
1224 +// ---------------------------------------------------------------------------
1225 +// Compiler Time Deduction Macros
1226 +// ---------------------------------------------------------------------------
1228 +#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) :
1229 +#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1)
1230 +#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2)
1231 +#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4)
1232 +#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8)
1233 +#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16)
1235 +#define MASK_OFFSET_ERROR (0xFFFFFFFF)
1237 +#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR)
1240 +// ---------------------------------------------------------------------------
1242 +// ---------------------------------------------------------------------------
1245 + #define ASSERT(expr) BUG_ON(!(expr))
1248 +#ifndef NOT_IMPLEMENTED
1249 + #define NOT_IMPLEMENTED() BUG_ON(1)
1252 +#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__)
1253 +#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line)
1254 +#define STATIC_ASSERT_XX(pred, line) \
1255 + extern char assertion_failed_at_##line[(pred) ? 1 : -1]
1257 +// ---------------------------------------------------------------------------
1258 +// Resolve Compiler Warnings
1259 +// ---------------------------------------------------------------------------
1261 +#define NOT_REFERENCED(x) { (x) = (x); }
1264 +// ---------------------------------------------------------------------------
1266 +// ---------------------------------------------------------------------------
1268 +#define MAXIMUM(A,B) (((A)>(B))?(A):(B))
1269 +#define MINIMUM(A,B) (((A)<(B))?(A):(B))
1271 +#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0])))
1272 +#define DVT_DELAYMACRO(u4Num) \
1274 + UINT32 u4Count = 0 ; \
1275 + for (u4Count = 0; u4Count < u4Num; u4Count++ ); \
1282 +#define UNKNOWN_IC_VERSION 0xFF
1285 +struct mtk_nand_host_hw {
1286 + unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
1287 + unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
1288 + unsigned int nfi_cs_num; /* NFI_CS_NUM */
1289 + unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
1290 + unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
1291 + unsigned int nand_ecc_size;
1292 + unsigned int nand_ecc_bytes;
1293 + unsigned int nand_ecc_mode;
1295 +extern struct mtk_nand_host_hw mt7621_nand_hw;
1296 +extern unsigned int CFG_BLOCKSIZE;
1298 +#endif // _MT6575_TYPEDEFS_H
1301 +++ b/drivers/mtd/nand/mtk_nand2.c
1303 +/******************************************************************************
1304 +* mtk_nand2.c - MTK NAND Flash Device Driver
1306 +* Copyright 2009-2012 MediaTek Co.,Ltd.
1309 +* This file provid the other drivers nand relative functions
1311 +* modification history
1312 +* ----------------------------------------
1313 +* v3.0, 11 Feb 2010, mtk
1314 +* ----------------------------------------
1315 +******************************************************************************/
1316 +#include "nand_def.h"
1317 +#include <linux/slab.h>
1318 +#include <linux/init.h>
1319 +#include <linux/module.h>
1320 +#include <linux/delay.h>
1321 +#include <linux/errno.h>
1322 +#include <linux/sched.h>
1323 +#include <linux/types.h>
1324 +#include <linux/wait.h>
1325 +#include <linux/spinlock.h>
1326 +#include <linux/interrupt.h>
1327 +#include <linux/mtd/mtd.h>
1328 +#include <linux/mtd/rawnand.h>
1329 +#include <linux/mtd/partitions.h>
1330 +#include <linux/mtd/nand_ecc.h>
1331 +#include <linux/dma-mapping.h>
1332 +#include <linux/jiffies.h>
1333 +#include <linux/platform_device.h>
1334 +#include <linux/proc_fs.h>
1335 +#include <linux/time.h>
1336 +#include <linux/mm.h>
1337 +#include <asm/io.h>
1338 +#include <asm/cacheflush.h>
1339 +#include <asm/uaccess.h>
1340 +#include <linux/miscdevice.h>
1341 +#include "mtk_nand2.h"
1342 +#include "nand_device_list.h"
1345 +#include "partition.h"
1347 +unsigned int CFG_BLOCKSIZE;
1349 +static int shift_on_bbt = 0;
1350 +extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag);
1351 +extern int nand_bbt_get(struct mtd_info *mtd, int page);
1352 +int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page);
1354 +static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL };
1356 +#define NAND_CMD_STATUS_MULTI 0x71
1358 +void show_stack(struct task_struct *tsk, unsigned long *sp);
1359 +extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
1360 +extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity);
1362 +struct mtk_nand_host mtk_nand_host; /* include mtd_info and nand_chip structs */
1363 +struct mtk_nand_host_hw mt7621_nand_hw = {
1364 + .nfi_bus_width = 8,
1365 + .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING,
1366 + .nfi_cs_num = NFI_CS_NUM,
1367 + .nand_sec_size = 512,
1368 + .nand_sec_shift = 9,
1369 + .nand_ecc_size = 2048,
1370 + .nand_ecc_bytes = 32,
1371 + .nand_ecc_mode = NAND_ECC_HW,
1375 +/*******************************************************************************
1376 + * Gloable Varible Definition
1377 + *******************************************************************************/
1379 +#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
1381 + DRV_WriteReg(NFI_CMD_REG16,cmd);\
1382 + while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
1383 + DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
1384 + DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
1385 + DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
1386 + while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
1389 +//-------------------------------------------------------------------------------
1390 +static struct NAND_CMD g_kCMD;
1391 +static u32 g_u4ChipVer;
1393 +static bool g_bcmdstatus;
1394 +static u32 g_value = 0;
1395 +static int g_page_size;
1397 +BOOL g_bHwEcc = true;
1400 +static u8 *local_buffer_16_align; // 16 byte aligned buffer, for HW issue
1401 +static u8 local_buffer[4096 + 512];
1403 +extern void nand_release_device(struct mtd_info *mtd);
1404 +extern int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state);
1406 +#if defined(MTK_NAND_BMT)
1407 +static bmt_struct *g_bmt;
1409 +struct mtk_nand_host *host;
1410 +extern struct mtd_partition g_pasStatic_Partition[];
1411 +int part_num = NUM_PARTITIONS;
1415 +/* this constant was taken from linux/nand/nand.h v 3.14
1416 + * in later versions it seems it was removed in order to save a bit of space
1418 +#define NAND_MAX_OOBSIZE 774
1419 +static u8 local_oob_buf[NAND_MAX_OOBSIZE];
1421 +static u8 nand_badblock_offset = 0;
1423 +void nand_enable_clock(void)
1425 + //enable_clock(MT65XX_PDN_PERI_NFI, "NAND");
1428 +void nand_disable_clock(void)
1430 + //disable_clock(MT65XX_PDN_PERI_NFI, "NAND");
1433 +struct nand_ecclayout {
1435 + __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
1437 + struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
1440 +static struct nand_ecclayout *layout;
1442 +static struct nand_ecclayout nand_oob_16 = {
1444 + .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
1445 + .oobfree = {{1, 6}, {0, 0}}
1448 +struct nand_ecclayout nand_oob_64 = {
1450 + .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
1451 + 40, 41, 42, 43, 44, 45, 46, 47,
1452 + 48, 49, 50, 51, 52, 53, 54, 55,
1453 + 56, 57, 58, 59, 60, 61, 62, 63},
1454 + .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
1457 +struct nand_ecclayout nand_oob_128 = {
1460 + 64, 65, 66, 67, 68, 69, 70, 71,
1461 + 72, 73, 74, 75, 76, 77, 78, 79,
1462 + 80, 81, 82, 83, 84, 85, 86, 86,
1463 + 88, 89, 90, 91, 92, 93, 94, 95,
1464 + 96, 97, 98, 99, 100, 101, 102, 103,
1465 + 104, 105, 106, 107, 108, 109, 110, 111,
1466 + 112, 113, 114, 115, 116, 117, 118, 119,
1467 + 120, 121, 122, 123, 124, 125, 126, 127},
1468 + .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
1471 +flashdev_info devinfo;
1473 +void dump_nfi(void)
1477 +void dump_ecc(void)
1482 +nand_virt_to_phys_add(u32 va)
1484 + u32 pageOffset = (va & (PAGE_SIZE - 1));
1490 + if (virt_addr_valid(va))
1491 + return __virt_to_phys(va);
1493 + if (NULL == current) {
1494 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR ,current is NULL! \n");
1498 + if (NULL == current->mm) {
1499 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
1503 + pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
1504 + if (pgd_none(*pgd) || pgd_bad(*pgd)) {
1505 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va);
1509 + pmd = pmd_offset((pud_t *)pgd, va);
1510 + if (pmd_none(*pmd) || pmd_bad(*pmd)) {
1511 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va);
1515 + pte = pte_offset_map(pmd, va);
1516 + if (pte_present(*pte)) {
1517 + pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset;
1521 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va);
1524 +EXPORT_SYMBOL(nand_virt_to_phys_add);
1527 +get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo)
1530 + for (index = 0; gen_FlashTable[index].id != 0; index++) {
1531 + if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) {
1532 + pdevinfo->id = gen_FlashTable[index].id;
1533 + pdevinfo->ext_id = gen_FlashTable[index].ext_id;
1534 + pdevinfo->blocksize = gen_FlashTable[index].blocksize;
1535 + pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle;
1536 + pdevinfo->iowidth = gen_FlashTable[index].iowidth;
1537 + pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting;
1538 + pdevinfo->advancedmode = gen_FlashTable[index].advancedmode;
1539 + pdevinfo->pagesize = gen_FlashTable[index].pagesize;
1540 + pdevinfo->sparesize = gen_FlashTable[index].sparesize;
1541 + pdevinfo->totalsize = gen_FlashTable[index].totalsize;
1542 + memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename));
1543 + printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id);
1550 + if (0 == pdevinfo->id) {
1551 + printk(KERN_INFO "Device not found, ID: %x\n", id);
1559 +ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit)
1563 + u32 ecc_bit_cfg = ECC_CNFG_ECC4;
1567 + ecc_bit_cfg = ECC_CNFG_ECC4;
1570 + ecc_bit_cfg = ECC_CNFG_ECC8;
1573 + ecc_bit_cfg = ECC_CNFG_ECC10;
1576 + ecc_bit_cfg = ECC_CNFG_ECC12;
1581 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1583 + } while (!DRV_Reg16(ECC_DECIDLE_REG16));
1585 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1587 + } while (!DRV_Reg32(ECC_ENCIDLE_REG32));
1589 + /* setup FDM register base */
1590 + DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
1592 + /* Sector + FDM */
1593 + u4ENCODESize = (hw->nand_sec_size + 8) << 3;
1594 + /* Sector + FDM + YAFFS2 meta data bits */
1595 + u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13;
1597 + /* configure ECC decoder && encoder */
1598 + DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT));
1600 + DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
1601 + NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
1605 +ECC_Decode_Start(void)
1607 + while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
1609 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
1613 +ECC_Decode_End(void)
1615 + while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
1617 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1621 +ECC_Encode_Start(void)
1623 + while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE))
1626 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
1630 +ECC_Encode_End(void)
1632 + /* wait for device returning idle */
1633 + while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ;
1635 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1639 +mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr)
1642 + u16 u2SectorDoneMask = 1 << u4SecIndex;
1643 + u32 u4ErrorNumDebug, i, u4ErrNum;
1644 + u32 timeout = 0xFFFF;
1646 + u32 au4ErrBitLoc[6];
1647 + u32 u4ErrByteLoc, u4BitOffset;
1648 + u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
1650 + //4 // Wait for Decode Done
1651 + while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) {
1656 + /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
1657 + memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
1658 + u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32);
1659 + u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2);
1663 + if (0xF == u4ErrNum) {
1664 + mtd->ecc_stats.failed++;
1666 + printk(KERN_ERR"mtk_nand: UnCorrectable at PageAddr=%d\n", u4PageAddr);
1668 + for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) {
1669 + au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
1670 + u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF;
1671 + if (u4ErrBitLoc1th < 0x1000) {
1672 + u4ErrByteLoc = u4ErrBitLoc1th / 8;
1673 + u4BitOffset = u4ErrBitLoc1th % 8;
1674 + pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1675 + mtd->ecc_stats.corrected++;
1677 + mtd->ecc_stats.failed++;
1679 + u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF;
1680 + if (0 != u4ErrBitLoc2nd) {
1681 + if (u4ErrBitLoc2nd < 0x1000) {
1682 + u4ErrByteLoc = u4ErrBitLoc2nd / 8;
1683 + u4BitOffset = u4ErrBitLoc2nd % 8;
1684 + pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1685 + mtd->ecc_stats.corrected++;
1687 + mtd->ecc_stats.failed++;
1688 + //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
1693 + if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
1700 +mtk_nand_RFIFOValidSize(u16 u2Size)
1702 + u32 timeout = 0xFFFF;
1703 + while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) {
1712 +mtk_nand_WFIFOValidSize(u16 u2Size)
1714 + u32 timeout = 0xFFFF;
1716 + while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) {
1725 +mtk_nand_status_ready(u32 u4Status)
1727 + u32 timeout = 0xFFFF;
1729 + while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) {
1738 +mtk_nand_reset(void)
1740 + int timeout = 0xFFFF;
1741 + if (DRV_Reg16(NFI_MASTERSTA_REG16)) {
1743 + DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1744 + while (DRV_Reg16(NFI_MASTERSTA_REG16)) {
1747 + MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
1750 + /* issue reset operation */
1752 + DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1754 + return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0);
1758 +mtk_nand_set_mode(u16 u2OpMode)
1760 + u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
1761 + u2Mode &= ~CNFG_OP_MODE_MASK;
1762 + u2Mode |= u2OpMode;
1763 + DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
1767 +mtk_nand_set_autoformat(bool bEnable)
1770 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1772 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1776 +mtk_nand_configure_fdm(u16 u2FDMSize)
1778 + NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
1779 + NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
1780 + NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
1784 +mtk_nand_configure_lock(void)
1786 + u32 u4WriteColNOB = 2;
1787 + u32 u4WriteRowNOB = 3;
1788 + u32 u4EraseColNOB = 0;
1789 + u32 u4EraseRowNOB = 3;
1790 + DRV_WriteReg16(NFI_LOCKANOB_REG16,
1791 + (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT));
1793 + if (CHIPVER_ECO_1 == g_u4ChipVer) {
1795 + for (i = 0; i < 16; ++i) {
1796 + DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF);
1797 + DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF);
1799 + //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0);
1800 + DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF);
1801 + DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON);
1806 +mtk_nand_pio_ready(void)
1809 + while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) {
1811 + if (count > 0xffff) {
1812 + printk("PIO_DIRDY timeout\n");
1821 +mtk_nand_set_command(u16 command)
1824 + DRV_WriteReg16(NFI_CMD_REG16, command);
1825 + return mtk_nand_status_ready(STA_CMD_STATE);
1829 +mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
1832 + DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr);
1833 + DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr);
1834 + DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT));
1835 + return mtk_nand_status_ready(STA_ADDR_STATE);
1838 +static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
1840 + if (ctrl & NAND_ALE) {
1841 + mtk_nand_set_address(dat, 0, 1, 0);
1842 + } else if (ctrl & NAND_CLE) {
1844 + mtk_nand_set_mode(0x6000);
1845 + mtk_nand_set_command(dat);
1850 +mtk_nand_check_RW_count(u16 u2WriteSize)
1852 + u32 timeout = 0xFFFF;
1853 + u16 u2SecNum = u2WriteSize >> 9;
1855 + while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) {
1857 + if (0 == timeout) {
1858 + printk(KERN_INFO "[%s] timeout\n", __FUNCTION__);
1866 +mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf)
1868 + /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
1869 + bool bRet = false;
1870 + u16 sec_num = 1 << (nand->page_shift - 9);
1871 + u32 col_addr = u4ColAddr;
1872 + u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
1873 + if (nand->options & NAND_BUSWIDTH_16)
1876 + if (!mtk_nand_reset())
1879 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1881 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1884 + mtk_nand_set_mode(CNFG_OP_READ);
1885 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
1886 + DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
1889 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1892 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1894 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1896 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1897 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1900 + mtk_nand_set_autoformat(full);
1903 + ECC_Decode_Start();
1904 + if (!mtk_nand_set_command(NAND_CMD_READ0))
1906 + if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
1908 + if (!mtk_nand_set_command(NAND_CMD_READSTART))
1910 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
1920 +mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf)
1922 + bool bRet = false;
1923 + u32 sec_num = 1 << (nand->page_shift - 9);
1924 + u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
1925 + if (nand->options & NAND_BUSWIDTH_16)
1928 + /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
1929 + if (!mtk_nand_reset())
1932 + mtk_nand_set_mode(CNFG_OP_PRGM);
1934 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
1936 + DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
1939 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1941 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1943 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1945 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1946 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1949 + mtk_nand_set_autoformat(full);
1953 + ECC_Encode_Start();
1955 + if (!mtk_nand_set_command(NAND_CMD_SEQIN))
1957 + //1 FIXED ME: For Any Kind of AddrCycle
1958 + if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
1961 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
1971 +mtk_nand_check_dececc_done(u32 u4SecNum)
1973 + u32 timeout, dec_mask;
1976 + dec_mask = (1 << u4SecNum) - 1;
1977 + while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0)
1979 + if (timeout == 0) {
1980 + MSG(VERIFY, "ECC_DECDONE: timeout\n");
1987 +mtk_nand_mcu_read_data(u8 * buf, u32 length)
1989 + int timeout = 0xffff;
1991 + u32 *buf32 = (u32 *) buf;
1992 + if ((u32) buf % 4 || length % 4)
1993 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
1995 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
1997 + //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
1999 + NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD);
2001 + if ((u32) buf % 4 || length % 4) {
2002 + for (i = 0; (i < (length)) && (timeout > 0);) {
2003 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2004 + *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
2009 + if (0 == timeout) {
2010 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2016 + for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
2017 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2018 + *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
2023 + if (0 == timeout) {
2024 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2034 +mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size)
2036 + return mtk_nand_mcu_read_data(pDataBuf, u4Size);
2040 +mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length)
2042 + u32 timeout = 0xFFFF;
2045 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2047 + NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR);
2048 + pBuf32 = (u32 *) buf;
2050 + if ((u32) buf % 4 || length % 4)
2051 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2053 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2055 + if ((u32) buf % 4 || length % 4) {
2056 + for (i = 0; (i < (length)) && (timeout > 0);) {
2057 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2058 + DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
2063 + if (0 == timeout) {
2064 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2070 + for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
2071 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2072 + DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
2077 + if (0 == timeout) {
2078 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2089 +mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size)
2091 + return mtk_nand_mcu_write_data(mtd, buf, size);
2095 +mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum)
2098 + u32 *pBuf32 = (u32 *) pDataBuf;
2101 + for (i = 0; i < u4SecNum; ++i) {
2102 + *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
2103 + *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
2108 +static u8 fdm_buf[64];
2110 +mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum)
2114 + bool empty = true;
2115 + struct nand_oobfree *free_entry;
2118 + memcpy(fdm_buf, pDataBuf, u4SecNum * 8);
2120 + free_entry = layout->oobfree;
2121 + for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) {
2122 + for (j = 0; j < free_entry[i].length; j++) {
2123 + if (pDataBuf[free_entry[i].offset + j] != 0xFF)
2125 + checksum ^= pDataBuf[free_entry[i].offset + j];
2130 + fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum;
2133 + pBuf32 = (u32 *) fdm_buf;
2134 + for (i = 0; i < u4SecNum; ++i) {
2135 + DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++);
2136 + DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++);
2141 +mtk_nand_stop_read(void)
2143 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
2147 + DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2151 +mtk_nand_stop_write(void)
2153 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
2156 + DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2160 +mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
2164 + struct nand_chip *nand = mtd->priv;
2165 + u32 u4SecNum = u4PageSize >> 9;
2167 + if (((u32) pPageBuf % 16) && local_buffer_16_align)
2168 + buf = local_buffer_16_align;
2171 + if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) {
2173 + for (j = 0 ; j < u4SecNum; j++) {
2174 + if (!mtk_nand_read_page_data(mtd, buf+j*512, 512))
2176 + if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1))
2178 + if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr))
2181 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2184 + mtk_nand_read_fdm_data(pFDMBuf, u4SecNum);
2185 + mtk_nand_stop_read();
2188 + if (buf == local_buffer_16_align)
2189 + memcpy(pPageBuf, buf, u4PageSize);
2195 +mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
2197 + struct nand_chip *chip = mtd->priv;
2198 + u32 u4SecNum = u4PageSize >> 9;
2202 + MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
2204 + if (((u32) pPageBuf % 16) && local_buffer_16_align) {
2205 + printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf);
2206 + memcpy(local_buffer_16_align, pPageBuf, mtd->writesize);
2207 + buf = local_buffer_16_align;
2211 + if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) {
2212 + mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
2213 + (void)mtk_nand_write_page_data(mtd, buf, u4PageSize);
2214 + (void)mtk_nand_check_RW_count(u4PageSize);
2215 + mtk_nand_stop_write();
2216 + (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
2217 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
2220 + status = chip->waitfunc(mtd, chip);
2221 + if (status & NAND_STATUS_FAIL)
2227 +get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk)
2229 + struct nand_chip *chip = mtd->priv;
2233 + for (i = 0; i <= part_num; i++)
2235 + if (i == part_num)
2237 + // try the last reset partition
2238 + *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1;
2239 + if (*start_blk <= *end_blk)
2241 + if ((block >= *start_blk) && (block <= *end_blk))
2245 + // skip All partition entry
2246 + else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL)
2250 + *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1;
2251 + if ((block >= *start_blk) && (block <= *end_blk))
2253 + *start_blk = *end_blk + 1;
2255 + if (*start_blk > *end_blk)
2263 +block_remap(struct mtd_info *mtd, int block)
2265 + struct nand_chip *chip = mtd->priv;
2266 + int start_blk, end_blk;
2267 + int j, block_offset;
2268 + int bad_block = 0;
2270 + if (chip->bbt == NULL) {
2271 + printk("ERROR!! no bbt table for block_remap\n");
2275 + if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) {
2276 + printk("ERROR!! can not find start_blk and end_blk\n");
2280 + block_offset = block - start_blk;
2281 + for (j = start_blk; j <= end_blk;j++) {
2282 + if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) {
2283 + if (!block_offset)
2290 + if (j <= end_blk) {
2293 + // remap to the bad block
2294 + for (j = end_blk; bad_block > 0; j--)
2296 + if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0)
2299 + if (bad_block <= block_offset)
2305 + printk("Error!! block_remap error\n");
2310 +check_block_remap(struct mtd_info *mtd, int block)
2313 + return block_remap(mtd, block);
2317 +EXPORT_SYMBOL(check_block_remap);
2321 +write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk)
2323 + struct nand_chip *chip = mtd->priv;
2324 + int i, j, to_page = 0, first_page;
2326 + int start_blk = 0, end_blk;
2328 + int page_per_block_bit = chip->phys_erase_shift - chip->page_shift;
2329 + int block = page >> page_per_block_bit;
2331 + // find next available block in the same MTD partition
2332 + mapped_block = block_remap(mtd, block);
2333 + if (mapped_block == -1)
2334 + return NAND_STATUS_FAIL;
2336 + get_start_end_block(mtd, block, &start_blk, &end_blk);
2338 + buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA);
2342 + oob = buf + mtd->writesize;
2343 + for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) {
2344 + if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) {
2346 + status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit);
2347 + if (status & NAND_STATUS_FAIL) {
2348 + mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift);
2349 + nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3);
2352 + to_page = (*to_blk) << page_per_block_bit;
2363 + first_page = (page >> page_per_block_bit) << page_per_block_bit;
2364 + for (i = 0; i < (1 << page_per_block_bit); i++) {
2365 + if ((first_page + i) != page) {
2366 + mtk_nand_read_oob_hw(mtd, chip, (first_page+i));
2367 + for (j = 0; j < mtd->oobsize; j++)
2368 + if (chip->oob_poi[j] != (unsigned char)0xff)
2370 + if (j < mtd->oobsize) {
2371 + mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob);
2372 + memset(oob, 0xff, mtd->oobsize);
2373 + if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) {
2374 + int ret, new_blk = 0;
2375 + nand_bbt_set(mtd, to_page, 0x3);
2376 + ret = write_next_on_fail(mtd, buf, to_page + i, &new_blk);
2379 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2382 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2383 + *to_blk = new_blk;
2384 + to_page = ((*to_blk) << page_per_block_bit);
2388 + memset(chip->oob_poi, 0xff, mtd->oobsize);
2389 + if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) {
2390 + int ret, new_blk = 0;
2391 + nand_bbt_set(mtd, to_page, 0x3);
2392 + ret = write_next_on_fail(mtd, write_buf, to_page + i, &new_blk);
2395 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2398 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2399 + *to_blk = new_blk;
2400 + to_page = ((*to_blk) << page_per_block_bit);
2411 +mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset,
2412 + int data_len, const u8 * buf, int oob_required, int page, int raw)
2414 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2415 + int block = page / page_per_block;
2416 + u16 page_in_block = page % page_per_block;
2417 + int mapped_block = block;
2419 +#if defined(MTK_NAND_BMT)
2420 + mapped_block = get_mapping_block_index(block);
2421 + // write bad index into oob
2422 + if (mapped_block != block)
2423 + set_bad_index_to_oob(chip->oob_poi, block);
2425 + set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
2427 + if (shift_on_bbt) {
2428 + mapped_block = block_remap(mtd, block);
2429 + if (mapped_block == -1)
2430 + return NAND_STATUS_FAIL;
2431 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2432 + return NAND_STATUS_FAIL;
2436 + if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) {
2437 + MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
2438 +#if defined(MTK_NAND_BMT)
2439 + if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
2440 + MSG(INIT, "Update BMT success\n");
2443 + MSG(INIT, "Update BMT fail\n");
2449 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
2450 + if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0)
2452 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2453 + return NAND_STATUS_FAIL;
2455 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2467 +mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
2469 + struct nand_chip *nand = mtd->priv;
2471 + switch (command) {
2472 + case NAND_CMD_SEQIN:
2473 + memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
2474 + g_kCMD.pDataBuf = NULL;
2475 + g_kCMD.u4RowAddr = page_addr;
2476 + g_kCMD.u4ColAddr = column;
2479 + case NAND_CMD_PAGEPROG:
2480 + if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) {
2481 + u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
2482 + mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB);
2483 + g_kCMD.u4RowAddr = (u32) - 1;
2484 + g_kCMD.u4OOBRowAddr = (u32) - 1;
2488 + case NAND_CMD_READOOB:
2489 + g_kCMD.u4RowAddr = page_addr;
2490 + g_kCMD.u4ColAddr = column + mtd->writesize;
2493 + case NAND_CMD_READ0:
2494 + g_kCMD.u4RowAddr = page_addr;
2495 + g_kCMD.u4ColAddr = column;
2498 + case NAND_CMD_ERASE1:
2499 + nand->state=FL_ERASING;
2500 + (void)mtk_nand_reset();
2501 + mtk_nand_set_mode(CNFG_OP_ERASE);
2502 + (void)mtk_nand_set_command(NAND_CMD_ERASE1);
2503 + (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2);
2506 + case NAND_CMD_ERASE2:
2507 + (void)mtk_nand_set_command(NAND_CMD_ERASE2);
2508 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
2512 + case NAND_CMD_STATUS:
2513 + (void)mtk_nand_reset();
2514 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2515 + mtk_nand_set_mode(CNFG_OP_SRD);
2516 + mtk_nand_set_mode(CNFG_READ_EN);
2517 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2518 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2519 + (void)mtk_nand_set_command(NAND_CMD_STATUS);
2520 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
2522 + DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
2523 + g_bcmdstatus = true;
2526 + case NAND_CMD_RESET:
2527 + (void)mtk_nand_reset();
2528 + DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN);
2529 + (void)mtk_nand_set_command(NAND_CMD_RESET);
2530 + DRV_WriteReg16(NFI_BASE+0x44, 0xF1);
2531 + while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN))
2535 + case NAND_CMD_READID:
2537 + /* Disable HW ECC */
2538 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2539 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2540 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW);
2541 + (void)mtk_nand_reset();
2543 + mtk_nand_set_mode(CNFG_OP_SRD);
2544 + (void)mtk_nand_set_command(NAND_CMD_READID);
2545 + (void)mtk_nand_set_address(0, 0, 1, 0);
2546 + DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD);
2547 + while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE)
2558 +mtk_nand_select_chip(struct mtd_info *mtd, int chip)
2560 + if ((chip == -1) && (false == g_bInitDone)) {
2561 + struct nand_chip *nand = mtd->priv;
2562 + struct mtk_nand_host *host = nand->priv;
2563 + struct mtk_nand_host_hw *hw = host->hw;
2564 + u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512);
2566 + u32 spare_bit = PAGEFMT_SPARE_16;
2568 + if (spare_per_sector >= 28) {
2569 + spare_bit = PAGEFMT_SPARE_28;
2571 + spare_per_sector = 28;
2572 + } else if (spare_per_sector >= 27) {
2573 + spare_bit = PAGEFMT_SPARE_27;
2575 + spare_per_sector = 27;
2576 + } else if (spare_per_sector >= 26) {
2577 + spare_bit = PAGEFMT_SPARE_26;
2579 + spare_per_sector = 26;
2580 + } else if (spare_per_sector >= 16) {
2581 + spare_bit = PAGEFMT_SPARE_16;
2583 + spare_per_sector = 16;
2585 + MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector);
2588 + mtd->oobsize = spare_per_sector*(mtd->writesize/512);
2589 + MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector);
2590 + /* Setup PageFormat */
2591 + if (4096 == mtd->writesize) {
2592 + NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
2593 + nand->cmdfunc = mtk_nand_command_bp;
2594 + } else if (2048 == mtd->writesize) {
2595 + NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
2596 + nand->cmdfunc = mtk_nand_command_bp;
2598 + ECC_Config(hw,ecc_bit);
2599 + g_bInitDone = true;
2606 + /* Jun Shen, 2011.04.13 */
2607 + /* Note: MT6577 EVB NAND is mounted on CS0, but FPGA is CS1 */
2608 + DRV_WriteReg16(NFI_CSEL_REG16, chip);
2609 + /* Jun Shen, 2011.04.13 */
2615 +mtk_nand_read_byte(struct mtd_info *mtd)
2617 + uint8_t retval = 0;
2619 + if (!mtk_nand_pio_ready()) {
2620 + printk("pio ready timeout\n");
2624 + if (g_bcmdstatus) {
2625 + retval = DRV_Reg8(NFI_DATAR_REG32);
2626 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
2629 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2631 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2633 + g_bcmdstatus = false;
2635 + retval = DRV_Reg8(NFI_DATAR_REG32);
2641 +mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len)
2643 + struct nand_chip *nand = (struct nand_chip *)mtd->priv;
2644 + struct NAND_CMD *pkCMD = &g_kCMD;
2645 + u32 u4ColAddr = pkCMD->u4ColAddr;
2646 + u32 u4PageSize = mtd->writesize;
2648 + if (u4ColAddr < u4PageSize) {
2649 + if ((u4ColAddr == 0) && (len >= u4PageSize)) {
2650 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB);
2651 + if (len > u4PageSize) {
2652 + u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
2653 + memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
2656 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
2657 + memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
2659 + pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
2661 + u32 u4Offset = u4ColAddr - u4PageSize;
2662 + u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
2663 + if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) {
2664 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
2665 + pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
2667 + memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
2669 + pkCMD->u4ColAddr += len;
2673 +mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
2675 + struct NAND_CMD *pkCMD = &g_kCMD;
2676 + u32 u4ColAddr = pkCMD->u4ColAddr;
2677 + u32 u4PageSize = mtd->writesize;
2680 + if (u4ColAddr >= u4PageSize) {
2681 + u32 u4Offset = u4ColAddr - u4PageSize;
2682 + u8 *pOOB = pkCMD->au1OOB + u4Offset;
2683 + i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
2684 + for (i = 0; i < i4Size; i++) {
2685 + pOOB[i] &= buf[i];
2688 + pkCMD->pDataBuf = (u8 *) buf;
2691 + pkCMD->u4ColAddr += len;
2695 +mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required, int page)
2697 + mtk_nand_write_buf(mtd, buf, mtd->writesize);
2698 + mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
2703 +mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page)
2705 + struct NAND_CMD *pkCMD = &g_kCMD;
2706 + u32 u4ColAddr = pkCMD->u4ColAddr;
2707 + u32 u4PageSize = mtd->writesize;
2709 + if (u4ColAddr == 0) {
2710 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
2711 + pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
2718 +mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page)
2720 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2721 + int block = page / page_per_block;
2722 + u16 page_in_block = page % page_per_block;
2723 + int mapped_block = block;
2725 +#if defined (MTK_NAND_BMT)
2726 + mapped_block = get_mapping_block_index(block);
2727 + if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block,
2728 + mtd->writesize, buf, chip->oob_poi))
2731 + if (shift_on_bbt) {
2732 + mapped_block = block_remap(mtd, block);
2733 + if (mapped_block == -1)
2734 + return NAND_STATUS_FAIL;
2735 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2736 + return NAND_STATUS_FAIL;
2739 + if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi))
2747 +mtk_nand_erase_hw(struct mtd_info *mtd, int page)
2749 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2751 + chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2752 + chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2754 + return chip->waitfunc(mtd, chip);
2758 +mtk_nand_erase(struct mtd_info *mtd, int page)
2761 + struct nand_chip *chip = mtd->priv;
2762 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2763 + int page_in_block = page % page_per_block;
2764 + int block = page / page_per_block;
2765 + int mapped_block = block;
2767 +#if defined(MTK_NAND_BMT)
2768 + mapped_block = get_mapping_block_index(block);
2770 + if (shift_on_bbt) {
2771 + mapped_block = block_remap(mtd, block);
2772 + if (mapped_block == -1)
2773 + return NAND_STATUS_FAIL;
2774 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2775 + return NAND_STATUS_FAIL;
2780 + int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
2782 + if (status & NAND_STATUS_FAIL) {
2783 +#if defined (MTK_NAND_BMT)
2784 + if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift,
2785 + UPDATE_ERASE_FAIL, NULL, NULL))
2787 + MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
2790 + MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
2791 + return NAND_STATUS_FAIL;
2794 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2795 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
2796 + if (shift_on_bbt) {
2797 + mapped_block = block_remap(mtd, block);
2798 + if (mapped_block == -1)
2799 + return NAND_STATUS_FAIL;
2800 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2801 + return NAND_STATUS_FAIL;
2803 + return NAND_STATUS_FAIL;
2813 +mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len)
2815 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2819 + u32 colnob = 2, rawnob = devinfo.addr_cycle - 2;
2820 + int randomread = 0;
2822 + int sec_num = 1<<(chip->page_shift-9);
2823 + int spare_per_sector = mtd->oobsize/sec_num;
2825 + if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
2826 + printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
2829 + if (len > spare_per_sector)
2831 + if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) {
2833 + read_len = min(len, spare_per_sector);
2834 + col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16
2835 + if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) {
2836 + printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n");
2840 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2841 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n");
2845 + mtk_nand_check_RW_count(read_len);
2846 + mtk_nand_stop_read();
2851 + col_addr = NAND_SECTOR_SIZE;
2852 + if (chip->options & NAND_BUSWIDTH_16)
2854 + if (!mtk_nand_reset())
2856 + mtk_nand_set_mode(0x6000);
2857 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
2858 + DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
2860 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2861 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2863 + mtk_nand_set_autoformat(false);
2865 + if (!mtk_nand_set_command(NAND_CMD_READ0))
2867 + //1 FIXED ME: For Any Kind of AddrCycle
2868 + if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob))
2870 + if (!mtk_nand_set_command(NAND_CMD_READSTART))
2872 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2874 + read_len = min(len, spare_per_sector);
2875 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2876 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
2882 + mtk_nand_stop_read();
2884 + read_len = min(len, spare_per_sector);
2885 + if (!mtk_nand_set_command(0x05))
2887 + col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector);
2888 + if (chip->options & NAND_BUSWIDTH_16)
2890 + DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
2891 + DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
2892 + DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
2893 + if (!mtk_nand_status_ready(STA_ADDR_STATE))
2895 + if (!mtk_nand_set_command(0xE0))
2897 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2899 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2900 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
2904 + mtk_nand_stop_read();
2910 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
2915 +mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len)
2917 + struct nand_chip *chip = mtd->priv;
2920 + int write_len = 0;
2922 + int sec_num = 1<<(chip->page_shift-9);
2923 + int spare_per_sector = mtd->oobsize/sec_num;
2925 + if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
2926 + printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
2931 + write_len = min(len, spare_per_sector);
2932 + col_addr = sector * (NAND_SECTOR_SIZE + spare_per_sector) + NAND_SECTOR_SIZE;
2933 + if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
2935 + if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len))
2937 + (void)mtk_nand_check_RW_count(write_len);
2938 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
2939 + (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
2940 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
2942 + status = chip->waitfunc(mtd, chip);
2943 + if (status & NAND_STATUS_FAIL) {
2944 + printk(KERN_INFO "status: %d\n", status);
2955 +mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
2958 + int sec_num = 1<<(chip->page_shift-9);
2959 + int spare_per_sector = mtd->oobsize/sec_num;
2961 + memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
2964 + for (i = 0; i < layout->eccbytes; i++) {
2965 + iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
2966 + local_oob_buf[iter] = chip->oob_poi[layout->eccpos[i]];
2970 + for (i = 0; i < sec_num; i++)
2971 + memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
2973 + return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
2976 +static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
2978 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2979 + int block = page / page_per_block;
2980 + u16 page_in_block = page % page_per_block;
2981 + int mapped_block = block;
2983 +#if defined(MTK_NAND_BMT)
2984 + mapped_block = get_mapping_block_index(block);
2985 + // write bad index into oob
2986 + if (mapped_block != block)
2987 + set_bad_index_to_oob(chip->oob_poi, block);
2989 + set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
2993 + mapped_block = block_remap(mtd, block);
2994 + if (mapped_block == -1)
2995 + return NAND_STATUS_FAIL;
2996 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2997 + return NAND_STATUS_FAIL;
3001 + if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) {
3002 + MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
3003 +#if defined(MTK_NAND_BMT)
3004 + if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift,
3005 + UPDATE_WRITE_FAIL, NULL, chip->oob_poi))
3007 + MSG(INIT, "Update BMT success\n");
3010 + MSG(INIT, "Update BMT fail\n");
3014 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
3015 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
3016 + if (shift_on_bbt) {
3017 + mapped_block = block_remap(mtd, mapped_block);
3018 + if (mapped_block == -1)
3019 + return NAND_STATUS_FAIL;
3020 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
3021 + return NAND_STATUS_FAIL;
3023 + return NAND_STATUS_FAIL;
3034 +mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset)
3036 + struct nand_chip *chip = mtd->priv;
3037 + int block = (int)offset >> chip->phys_erase_shift;
3038 + int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
3041 + memset(buf, 0xFF, 8);
3043 + return mtk_nand_write_oob_raw(mtd, buf, page, 8);
3047 +mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset)
3049 + struct nand_chip *chip = mtd->priv;
3050 + int block = (int)offset >> chip->phys_erase_shift;
3052 + int mapped_block = block;
3054 + nand_get_device(chip, mtd, FL_WRITING);
3056 +#if defined(MTK_NAND_BMT)
3057 + mapped_block = get_mapping_block_index(block);
3058 + ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
3060 + if (shift_on_bbt) {
3061 + mapped_block = block_remap(mtd, block);
3062 + if (mapped_block == -1) {
3063 + printk("NAND mark bad failed\n");
3064 + nand_release_device(mtd);
3065 + return NAND_STATUS_FAIL;
3068 + ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
3070 + nand_release_device(mtd);
3076 +mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
3081 + int sec_num = 1<<(chip->page_shift-9);
3082 + int spare_per_sector = mtd->oobsize/sec_num;
3084 + if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) {
3085 + printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__);
3089 + // adjust to ecc physical layout to memory layout
3090 + /*********************************************************/
3091 + /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
3092 + /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
3093 + /*********************************************************/
3095 + memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
3097 + for (i = 0; i < layout->eccbytes; i++) {
3098 + iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
3099 + chip->oob_poi[layout->eccpos[i]] = local_oob_buf[iter];
3103 + for (i = 0; i < sec_num; i++) {
3104 + memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR);
3111 +mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
3113 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3114 + int block = page / page_per_block;
3115 + u16 page_in_block = page % page_per_block;
3116 + int mapped_block = block;
3118 +#if defined (MTK_NAND_BMT)
3119 + mapped_block = get_mapping_block_index(block);
3120 + mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block);
3122 + if (shift_on_bbt) {
3123 + mapped_block = block_remap(mtd, block);
3124 + if (mapped_block == -1)
3125 + return NAND_STATUS_FAIL;
3126 + // allow to read oob even if the block is bad
3128 + if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0)
3135 +mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs)
3137 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3138 + int page_addr = (int)(ofs >> chip->page_shift);
3139 + unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3140 + unsigned char oob_buf[8];
3142 + page_addr &= ~(page_per_block - 1);
3143 + if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) {
3144 + printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n");
3148 + if (oob_buf[0] != 0xff) {
3149 + printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]);
3158 +mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
3160 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3161 + int block = (int)ofs >> chip->phys_erase_shift;
3162 + int mapped_block = block;
3165 +#if defined(MTK_NAND_BMT)
3166 + mapped_block = get_mapping_block_index(block);
3168 + if (shift_on_bbt) {
3169 + mapped_block = block_remap(mtd, block);
3173 + ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift);
3174 +#if defined (MTK_NAND_BMT)
3176 + MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block);
3177 + if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) {
3178 + MSG(INIT, "Update BMT success\n");
3181 + MSG(INIT, "Update BMT fail\n");
3190 +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
3191 +char gacBuf[4096 + 288];
3194 +mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
3196 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3197 + struct NAND_CMD *pkCMD = &g_kCMD;
3198 + u32 u4PageSize = mtd->writesize;
3202 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
3204 + pSrc = (u32 *) buf;
3205 + pDst = (u32 *) gacBuf;
3206 + len = len / sizeof(u32);
3207 + for (i = 0; i < len; ++i) {
3208 + if (*pSrc != *pDst) {
3209 + MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
3216 + pSrc = (u32 *) chip->oob_poi;
3217 + pDst = (u32 *) (gacBuf + u4PageSize);
3219 + if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) {
3220 + // TODO: Ask Designer Why?
3221 + //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7]))
3222 + MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
3223 + MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
3224 + MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
3232 +mtk_nand_init_hw(struct mtk_nand_host *host) {
3233 + struct mtk_nand_host_hw *hw = host->hw;
3236 + data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
3237 + data &= ~((0x3<<18)|(0x3<<16));
3238 + data |= ((0x2<<18) |(0x2<<16));
3239 + DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
3241 + MSG(INIT, "Enable NFI Clock\n");
3242 + nand_enable_clock();
3244 + g_bInitDone = false;
3245 + g_kCMD.u4OOBRowAddr = (u32) - 1;
3247 + /* Set default NFI access timing control */
3248 + DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing);
3249 + DRV_WriteReg16(NFI_CNFG_REG16, 0);
3250 + DRV_WriteReg16(NFI_PAGEFMT_REG16, 0);
3252 + /* Reset the state machine and data FIFO, because flushing FIFO */
3253 + (void)mtk_nand_reset();
3255 + /* Set the ECC engine */
3256 + if (hw->nand_ecc_mode == NAND_ECC_HW) {
3257 + MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
3259 + NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
3260 + ECC_Config(host->hw,4);
3261 + mtk_nand_configure_fdm(8);
3262 + mtk_nand_configure_lock();
3265 + NFI_SET_REG16(NFI_IOCON_REG16, 0x47);
3268 +static int mtk_nand_dev_ready(struct mtd_info *mtd)
3270 + return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
3273 +#define FACT_BBT_BLOCK_NUM 32 // use the latest 32 BLOCK for factory bbt table
3274 +#define FACT_BBT_OOB_SIGNATURE 1
3275 +#define FACT_BBT_SIGNATURE_LEN 7
3276 +const u8 oob_signature[] = "mtknand";
3277 +static u8 *fact_bbt = 0;
3278 +static u32 bbt_size = 0;
3281 +read_fact_bbt(struct mtd_info *mtd, unsigned int page)
3283 + struct nand_chip *chip = mtd->priv;
3286 + if (mtk_nand_read_oob_hw(mtd, chip, page)==0)
3288 + if (chip->oob_poi[nand_badblock_offset] != 0xFF)
3290 + printk("Bad Block on Page %x\n", page);
3293 + if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0)
3295 + printk("compare signature failed %x\n", page);
3298 + if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi))
3300 + printk("Signature matched and data read!\n");
3301 + memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize);
3306 + printk("failed at page %x\n", page);
3311 +load_fact_bbt(struct mtd_info *mtd)
3313 + struct nand_chip *chip = mtd->priv;
3317 + total_block = 1 << (chip->chip_shift - chip->phys_erase_shift);
3318 + bbt_size = total_block >> 2;
3320 + if ((!fact_bbt) && (bbt_size))
3321 + fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL);
3325 + for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--)
3327 + if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0)
3329 + printk("load_fact_bbt success %d\n", i);
3334 + printk("load_fact_bbt failed\n");
3338 +static int oob_mtk_ooblayout_ecc(struct mtd_info *mtd, int section,
3339 + struct mtd_oob_region *oobregion)
3341 + oobregion->length = 8;
3342 + oobregion->offset = layout->eccpos[section * 8];
3347 +static int oob_mtk_ooblayout_free(struct mtd_info *mtd, int section,
3348 + struct mtd_oob_region *oobregion)
3350 + if (section >= (layout->eccbytes / 8)) {
3353 + oobregion->offset = layout->oobfree[section].offset;
3354 + oobregion->length = layout->oobfree[section].length;
3360 +static const struct mtd_ooblayout_ops oob_mtk_ops = {
3361 + .ecc = oob_mtk_ooblayout_ecc,
3362 + .free = oob_mtk_ooblayout_free,
3366 +mtk_nand_probe(struct platform_device *pdev)
3368 + struct mtd_part_parser_data ppdata;
3369 + struct mtk_nand_host_hw *hw;
3370 + struct nand_chip *nand_chip;
3371 + struct mtd_info *mtd;
3372 + u8 ext_id1, ext_id2, ext_id3;
3379 + data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
3380 + data &= ~((0x3<<18)|(0x3<<16));
3381 + data |= ((0x2<<18) |(0x2<<16));
3382 + DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
3384 + hw = &mt7621_nand_hw;
3386 + /* Allocate memory for the device structure (and zero it) */
3387 + host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL);
3389 + MSG(INIT, "mtk_nand: failed to allocate device structure.\n");
3393 + /* Allocate memory for 16 byte aligned buffer */
3394 + local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16);
3395 + printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
3398 + /* init mtd data structure */
3399 + nand_chip = &host->nand_chip;
3400 + nand_chip->priv = host; /* link the private data structures */
3402 + mtd = host->mtd = &nand_chip->mtd;
3403 + mtd->priv = nand_chip;
3404 + mtd->owner = THIS_MODULE;
3405 + mtd->name = "MT7621-NAND";
3407 + hw->nand_ecc_mode = NAND_ECC_HW;
3409 + /* Set address of NAND IO lines */
3410 + nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32;
3411 + nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32;
3412 + nand_chip->chip_delay = 20; /* 20us command delay time */
3413 + nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */
3414 + nand_chip->ecc.strength = 1;
3415 + nand_chip->read_byte = mtk_nand_read_byte;
3416 + nand_chip->read_buf = mtk_nand_read_buf;
3417 + nand_chip->write_buf = mtk_nand_write_buf;
3418 +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
3419 + nand_chip->verify_buf = mtk_nand_verify_buf;
3421 + nand_chip->select_chip = mtk_nand_select_chip;
3422 + nand_chip->dev_ready = mtk_nand_dev_ready;
3423 + nand_chip->cmdfunc = mtk_nand_command_bp;
3424 + nand_chip->ecc.read_page = mtk_nand_read_page_hwecc;
3425 + nand_chip->ecc.write_page = mtk_nand_write_page_hwecc;
3427 + mtd_set_ooblayout(mtd, &oob_mtk_ops);
3428 + nand_chip->ecc.size = hw->nand_ecc_size; //2048
3429 + nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32
3431 + // For BMT, we need to revise driver architecture
3432 + nand_chip->write_page = mtk_nand_write_page;
3433 + nand_chip->ecc.write_oob = mtk_nand_write_oob;
3434 + nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device().
3435 + nand_chip->read_page = mtk_nand_read_page;
3436 + nand_chip->ecc.read_oob = mtk_nand_read_oob;
3437 + nand_chip->block_bad = mtk_nand_block_bad;
3438 + nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl;
3440 + //Qwert:Add for Uboot
3441 + mtk_nand_init_hw(host);
3442 + /* Select the device */
3443 + nand_chip->select_chip(mtd, NFI_DEFAULT_CS);
3446 + * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3449 + nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
3451 + memset(&devinfo, 0 , sizeof(flashdev_info));
3453 + /* Send the command for reading device ID */
3455 + nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3457 + /* Read manufacturer and device IDs */
3458 + manu_id = nand_chip->read_byte(mtd);
3459 + dev_id = nand_chip->read_byte(mtd);
3460 + id = dev_id | (manu_id << 8);
3461 + ext_id1 = nand_chip->read_byte(mtd);
3462 + ext_id2 = nand_chip->read_byte(mtd);
3463 + ext_id3 = nand_chip->read_byte(mtd);
3464 + ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3;
3465 + if (!get_device_info(id, ext_id, &devinfo)) {
3466 + u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F;
3467 + MSG(INIT, "Not Support this Device! \r\n");
3468 + memset(&devinfo, 0 , sizeof(flashdev_info));
3469 + MSG(INIT, "chip_mode=%08X\n",chip_mode);
3471 + /* apply bootstrap first */
3472 + devinfo.addr_cycle = 5;
3473 + devinfo.iowidth = 8;
3475 + switch (chip_mode) {
3477 + devinfo.pagesize = 2048;
3478 + devinfo.sparesize = 128;
3479 + devinfo.totalsize = 128;
3480 + devinfo.blocksize = 128;
3483 + devinfo.pagesize = 4096;
3484 + devinfo.sparesize = 128;
3485 + devinfo.totalsize = 1024;
3486 + devinfo.blocksize = 256;
3489 + devinfo.pagesize = 4096;
3490 + devinfo.sparesize = 224;
3491 + devinfo.totalsize = 2048;
3492 + devinfo.blocksize = 512;
3496 + devinfo.pagesize = 2048;
3497 + devinfo.sparesize = 64;
3498 + devinfo.totalsize = 128;
3499 + devinfo.blocksize = 128;
3503 + devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING;
3504 + devinfo.devciename[0] = 'U';
3505 + devinfo.advancedmode = 0;
3507 + mtd->writesize = devinfo.pagesize;
3508 + mtd->erasesize = (devinfo.blocksize<<10);
3509 + mtd->oobsize = devinfo.sparesize;
3511 + nand_chip->chipsize = (devinfo.totalsize<<20);
3512 + nand_chip->page_shift = ffs(mtd->writesize) - 1;
3513 + nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1;
3514 + nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
3515 + nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1;
3516 + nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl;
3518 + /* allocate buffers or call select_chip here or a bit earlier*/
3520 + struct nand_buffers *nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize + mtd->oobsize * 3, GFP_KERNEL);
3524 + nbuf->ecccalc = (uint8_t *)(nbuf + 1);
3525 + nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
3526 + nbuf->databuf = nbuf->ecccode + mtd->oobsize;
3528 + nand_chip->buffers = nbuf;
3529 + nand_chip->options |= NAND_OWN_BUFFERS;
3532 + nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize;
3533 + nand_chip->badblockpos = 0;
3535 + if (devinfo.pagesize == 4096)
3536 + layout = &nand_oob_128;
3537 + else if (devinfo.pagesize == 2048)
3538 + layout = &nand_oob_64;
3539 + else if (devinfo.pagesize == 512)
3540 + layout = &nand_oob_16;
3542 + layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE);
3543 + for (i = 0; i < layout->eccbytes; i++)
3544 + layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i;
3546 + MSG(INIT, "Support this Device in MTK table! %x \r\n", id);
3547 + hw->nfi_bus_width = devinfo.iowidth;
3548 + DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting);
3550 + /* 16-bit bus width */
3551 + if (hw->nfi_bus_width == 16) {
3552 + MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
3553 + nand_chip->options |= NAND_BUSWIDTH_16;
3555 + mtd->oobsize = devinfo.sparesize;
3556 + hw->nfi_cs_num = 1;
3558 + /* Scan to find existance of the device */
3559 + if (nand_scan(mtd, hw->nfi_cs_num)) {
3560 + MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
3565 + nand_chip->erase = mtk_nand_erase;
3567 + g_page_size = mtd->writesize;
3568 + platform_set_drvdata(pdev, host);
3569 + if (hw->nfi_bus_width == 16) {
3570 + NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
3573 + nand_chip->select_chip(mtd, 0);
3574 +#if defined(MTK_NAND_BMT)
3575 + nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift;
3577 + mtd->size = nand_chip->chipsize;
3579 + CFG_BLOCKSIZE = mtd->erasesize;
3581 +#if defined(MTK_NAND_BMT)
3583 + if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) {
3584 + MSG(INIT, "Error: init bmt failed\n");
3590 + nand_set_flash_node(nand_chip, pdev->dev.of_node);
3591 + err = mtd_device_parse_register(mtd, probe_types, &ppdata,
3594 + MSG(INIT, "[mtk_nand] probe successfully!\n");
3595 + nand_disable_clock();
3597 + if (load_fact_bbt(mtd) == 0) {
3599 + for (i = 0; i < 0x100; i++)
3600 + nand_chip->bbt[i] |= fact_bbt[i];
3607 + MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err);
3608 + nand_release(mtd);
3609 + platform_set_drvdata(pdev, NULL);
3610 + if ( NULL != nand_chip->buffers) {
3611 + kfree(nand_chip->buffers);
3614 + nand_disable_clock();
3619 +mtk_nand_remove(struct platform_device *pdev)
3621 + struct mtk_nand_host *host = platform_get_drvdata(pdev);
3622 + struct mtd_info *mtd = host->mtd;
3623 + struct nand_chip *nand_chip = &host->nand_chip;
3625 + nand_release(mtd);
3626 + if ( NULL != nand_chip->buffers) {
3627 + kfree(nand_chip->buffers);
3630 + nand_disable_clock();
3635 +static const struct of_device_id mt7621_nand_match[] = {
3636 + { .compatible = "mtk,mt7621-nand" },
3639 +MODULE_DEVICE_TABLE(of, mt7621_nand_match);
3641 +static struct platform_driver mtk_nand_driver = {
3642 + .probe = mtk_nand_probe,
3643 + .remove = mtk_nand_remove,
3645 + .name = "MT7621-NAND",
3646 + .owner = THIS_MODULE,
3647 + .of_match_table = mt7621_nand_match,
3652 +mtk_nand_init(void)
3654 + printk("MediaTek Nand driver init, version %s\n", VERSION);
3656 + return platform_driver_register(&mtk_nand_driver);
3660 +mtk_nand_exit(void)
3662 + platform_driver_unregister(&mtk_nand_driver);
3665 +module_init(mtk_nand_init);
3666 +module_exit(mtk_nand_exit);
3667 +MODULE_LICENSE("GPL");
3669 +++ b/drivers/mtd/nand/mtk_nand2.h
3671 +#ifndef __MTK_NAND_H
3672 +#define __MTK_NAND_H
3674 +#define RALINK_NAND_CTRL_BASE 0xBE003000
3675 +#define RALINK_SYSCTL_BASE 0xBE000000
3676 +#define RALINK_NANDECC_CTRL_BASE 0xBE003800
3677 +/*******************************************************************************
3678 + * NFI Register Definition
3679 + *******************************************************************************/
3681 +#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000))
3682 +#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004))
3683 +#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008))
3684 +#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C))
3685 +#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010))
3686 +#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014))
3688 +#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020))
3690 +#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030))
3691 +#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034))
3692 +#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038))
3694 +#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040))
3696 +#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050))
3697 +#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054))
3698 +#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058))
3700 +#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060))
3701 +#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064))
3702 +#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068))
3704 +#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070))
3706 +#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080))
3707 +#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084))
3709 +#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090))
3710 +#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094))
3712 +#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0))
3713 +#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4))
3715 +#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100))
3716 +#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104))
3717 +#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108))
3718 +#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110))
3719 +#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114))
3720 +#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118))
3721 +#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C))
3722 +#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120))
3723 +#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124))
3724 +#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128))
3725 +#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C))
3726 +#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130))
3727 +#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134))
3728 +#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138))
3729 +#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C))
3730 +#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140))
3731 +#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144))
3732 +#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148))
3733 +#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C))
3734 +#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150))
3735 +#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154))
3736 +#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158))
3737 +#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C))
3738 +#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160))
3739 +#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164))
3740 +#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168))
3741 +#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C))
3742 +#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170))
3743 +#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174))
3744 +#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178))
3745 +#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C))
3746 +#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180))
3747 +#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184))
3748 +#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188))
3749 +#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C))
3751 +#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190))
3752 +#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194))
3753 +#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198))
3754 +#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C))
3755 +#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210))
3758 +/*******************************************************************************
3759 + * NFI Register Field Definition
3760 + *******************************************************************************/
3763 +#define CNFG_AHB (0x0001)
3764 +#define CNFG_READ_EN (0x0002)
3765 +#define CNFG_DMA_BURST_EN (0x0004)
3766 +#define CNFG_BYTE_RW (0x0040)
3767 +#define CNFG_HW_ECC_EN (0x0100)
3768 +#define CNFG_AUTO_FMT_EN (0x0200)
3769 +#define CNFG_OP_IDLE (0x0000)
3770 +#define CNFG_OP_READ (0x1000)
3771 +#define CNFG_OP_SRD (0x2000)
3772 +#define CNFG_OP_PRGM (0x3000)
3773 +#define CNFG_OP_ERASE (0x4000)
3774 +#define CNFG_OP_RESET (0x5000)
3775 +#define CNFG_OP_CUST (0x6000)
3776 +#define CNFG_OP_MODE_MASK (0x7000)
3777 +#define CNFG_OP_MODE_SHIFT (12)
3780 +#define PAGEFMT_512 (0x0000)
3781 +#define PAGEFMT_2K (0x0001)
3782 +#define PAGEFMT_4K (0x0002)
3784 +#define PAGEFMT_PAGE_MASK (0x0003)
3786 +#define PAGEFMT_DBYTE_EN (0x0008)
3788 +#define PAGEFMT_SPARE_16 (0x0000)
3789 +#define PAGEFMT_SPARE_26 (0x0001)
3790 +#define PAGEFMT_SPARE_27 (0x0002)
3791 +#define PAGEFMT_SPARE_28 (0x0003)
3792 +#define PAGEFMT_SPARE_MASK (0x0030)
3793 +#define PAGEFMT_SPARE_SHIFT (4)
3795 +#define PAGEFMT_FDM_MASK (0x0F00)
3796 +#define PAGEFMT_FDM_SHIFT (8)
3798 +#define PAGEFMT_FDM_ECC_MASK (0xF000)
3799 +#define PAGEFMT_FDM_ECC_SHIFT (12)
3802 +#define CON_FIFO_FLUSH (0x0001)
3803 +#define CON_NFI_RST (0x0002)
3804 +#define CON_NFI_SRD (0x0010)
3806 +#define CON_NFI_NOB_MASK (0x0060)
3807 +#define CON_NFI_NOB_SHIFT (5)
3809 +#define CON_NFI_BRD (0x0100)
3810 +#define CON_NFI_BWR (0x0200)
3812 +#define CON_NFI_SEC_MASK (0xF000)
3813 +#define CON_NFI_SEC_SHIFT (12)
3816 +#define ACCCON_SETTING ()
3819 +#define INTR_RD_DONE_EN (0x0001)
3820 +#define INTR_WR_DONE_EN (0x0002)
3821 +#define INTR_RST_DONE_EN (0x0004)
3822 +#define INTR_ERASE_DONE_EN (0x0008)
3823 +#define INTR_BSY_RTN_EN (0x0010)
3824 +#define INTR_ACC_LOCK_EN (0x0020)
3825 +#define INTR_AHB_DONE_EN (0x0040)
3826 +#define INTR_ALL_INTR_DE (0x0000)
3827 +#define INTR_ALL_INTR_EN (0x007F)
3830 +#define INTR_RD_DONE (0x0001)
3831 +#define INTR_WR_DONE (0x0002)
3832 +#define INTR_RST_DONE (0x0004)
3833 +#define INTR_ERASE_DONE (0x0008)
3834 +#define INTR_BSY_RTN (0x0010)
3835 +#define INTR_ACC_LOCK (0x0020)
3836 +#define INTR_AHB_DONE (0x0040)
3839 +#define ADDR_COL_NOB_MASK (0x0003)
3840 +#define ADDR_COL_NOB_SHIFT (0)
3841 +#define ADDR_ROW_NOB_MASK (0x0030)
3842 +#define ADDR_ROW_NOB_SHIFT (4)
3845 +#define STA_READ_EMPTY (0x00001000)
3846 +#define STA_ACC_LOCK (0x00000010)
3847 +#define STA_CMD_STATE (0x00000001)
3848 +#define STA_ADDR_STATE (0x00000002)
3849 +#define STA_DATAR_STATE (0x00000004)
3850 +#define STA_DATAW_STATE (0x00000008)
3852 +#define STA_NAND_FSM_MASK (0x1F000000)
3853 +#define STA_NAND_BUSY (0x00000100)
3854 +#define STA_NAND_BUSY_RETURN (0x00000200)
3855 +#define STA_NFI_FSM_MASK (0x000F0000)
3856 +#define STA_NFI_OP_MASK (0x0000000F)
3859 +#define FIFO_RD_EMPTY (0x0040)
3860 +#define FIFO_RD_FULL (0x0080)
3861 +#define FIFO_WR_FULL (0x8000)
3862 +#define FIFO_WR_EMPTY (0x4000)
3863 +#define FIFO_RD_REMAIN(x) (0x1F&(x))
3864 +#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8)
3867 +#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12)
3868 +#define ADDRCNTR_OFFSET(x) (0x03FF&(x))
3871 +#define NFI_LOCK_ON (0x0001)
3874 +#define PROG_RADD_NOB_MASK (0x7000)
3875 +#define PROG_RADD_NOB_SHIFT (12)
3876 +#define PROG_CADD_NOB_MASK (0x0300)
3877 +#define PROG_CADD_NOB_SHIFT (8)
3878 +#define ERASE_RADD_NOB_MASK (0x0070)
3879 +#define ERASE_RADD_NOB_SHIFT (4)
3880 +#define ERASE_CADD_NOB_MASK (0x0007)
3881 +#define ERASE_CADD_NOB_SHIFT (0)
3883 +/*******************************************************************************
3884 + * ECC Register Definition
3885 + *******************************************************************************/
3887 +#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000))
3888 +#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004))
3889 +#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008))
3890 +#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C))
3891 +#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010))
3892 +#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014))
3893 +#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018))
3894 +#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C))
3895 +#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020))
3896 +#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024))
3897 +#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028))
3898 +#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C))
3900 +#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100))
3901 +#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104))
3902 +#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108))
3903 +#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C))
3904 +#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110))
3905 +#define ECC_DECENUM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114))
3906 +#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x0118))
3907 +#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x011C))
3908 +#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120))
3909 +#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124))
3910 +#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128))
3911 +#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C))
3912 +#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130))
3913 +#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0134))
3914 +#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138))
3915 +#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C))
3916 +#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0140))
3917 +#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0144))
3918 +#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148))
3919 +#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C))
3921 +/*******************************************************************************
3922 + * ECC register definition
3923 + *******************************************************************************/
3925 +#define ENC_EN (0x0001)
3926 +#define ENC_DE (0x0000)
3929 +#define ECC_CNFG_ECC4 (0x0000)
3930 +#define ECC_CNFG_ECC6 (0x0001)
3931 +#define ECC_CNFG_ECC8 (0x0002)
3932 +#define ECC_CNFG_ECC10 (0x0003)
3933 +#define ECC_CNFG_ECC12 (0x0004)
3934 +#define ECC_CNFG_ECC_MASK (0x00000007)
3936 +#define ENC_CNFG_NFI (0x0010)
3937 +#define ENC_CNFG_MODE_MASK (0x0010)
3939 +#define ENC_CNFG_META6 (0x10300000)
3940 +#define ENC_CNFG_META8 (0x10400000)
3942 +#define ENC_CNFG_MSG_MASK (0x1FFF0000)
3943 +#define ENC_CNFG_MSG_SHIFT (0x10)
3946 +#define ENC_IDLE (0x0001)
3949 +#define STA_FSM (0x001F)
3950 +#define STA_COUNT_PS (0xFF10)
3951 +#define STA_COUNT_MS (0x3FFF0000)
3954 +#define ENC_IRQEN (0x0001)
3956 +/* ECC_ENCIRQSTA */
3957 +#define ENC_IRQSTA (0x0001)
3960 +#define DEC_EN (0x0001)
3961 +#define DEC_DE (0x0000)
3964 +#define DEC_CNFG_ECC4 (0x0000)
3965 +//#define DEC_CNFG_ECC6 (0x0001)
3966 +//#define DEC_CNFG_ECC12 (0x0002)
3967 +#define DEC_CNFG_NFI (0x0010)
3968 +//#define DEC_CNFG_META6 (0x10300000)
3969 +//#define DEC_CNFG_META8 (0x10400000)
3971 +#define DEC_CNFG_FER (0x01000)
3972 +#define DEC_CNFG_EL (0x02000)
3973 +#define DEC_CNFG_CORRECT (0x03000)
3974 +#define DEC_CNFG_TYPE_MASK (0x03000)
3976 +#define DEC_CNFG_EMPTY_EN (0x80000000)
3978 +#define DEC_CNFG_CODE_MASK (0x1FFF0000)
3979 +#define DEC_CNFG_CODE_SHIFT (0x10)
3982 +#define DEC_IDLE (0x0001)
3985 +#define DEC_FER0 (0x0001)
3986 +#define DEC_FER1 (0x0002)
3987 +#define DEC_FER2 (0x0004)
3988 +#define DEC_FER3 (0x0008)
3989 +#define DEC_FER4 (0x0010)
3990 +#define DEC_FER5 (0x0020)
3991 +#define DEC_FER6 (0x0040)
3992 +#define DEC_FER7 (0x0080)
3995 +#define ERR_NUM0 (0x0000000F)
3996 +#define ERR_NUM1 (0x000000F0)
3997 +#define ERR_NUM2 (0x00000F00)
3998 +#define ERR_NUM3 (0x0000F000)
3999 +#define ERR_NUM4 (0x000F0000)
4000 +#define ERR_NUM5 (0x00F00000)
4001 +#define ERR_NUM6 (0x0F000000)
4002 +#define ERR_NUM7 (0xF0000000)
4005 +#define DEC_DONE0 (0x0001)
4006 +#define DEC_DONE1 (0x0002)
4007 +#define DEC_DONE2 (0x0004)
4008 +#define DEC_DONE3 (0x0008)
4009 +#define DEC_DONE4 (0x0010)
4010 +#define DEC_DONE5 (0x0020)
4011 +#define DEC_DONE6 (0x0040)
4012 +#define DEC_DONE7 (0x0080)
4015 +#define DEC_IRQEN (0x0001)
4017 +/* ECC_DECIRQSTA */
4018 +#define DEC_IRQSTA (0x0001)
4020 +#define CHIPVER_ECO_1 (0x8a00)
4021 +#define CHIPVER_ECO_2 (0x8a01)
4025 +/*******************************************************************************
4026 + * Data Structure Definition
4027 + *******************************************************************************/
4028 +struct mtk_nand_host
4030 + struct nand_chip nand_chip;
4031 + struct mtd_info *mtd;
4032 + struct mtk_nand_host_hw *hw;
4044 + u32 pureReadOOBNum;
4049 + * ECC layout control structure. Exported to userspace for
4050 + * diagnosis and to allow creation of raw images
4051 +struct nand_ecclayout {
4052 + uint32_t eccbytes;
4053 + uint32_t eccpos[64];
4054 + uint32_t oobavail;
4055 + struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES];
4058 +#define __DEBUG_NAND 1 /* Debug information on/off */
4060 +/* Debug message event */
4061 +#define DBG_EVT_NONE 0x00000000 /* No event */
4062 +#define DBG_EVT_INIT 0x00000001 /* Initial related event */
4063 +#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */
4064 +#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */
4065 +#define DBG_EVT_READ 0x00000008 /* Read related event */
4066 +#define DBG_EVT_WRITE 0x00000010 /* Write related event */
4067 +#define DBG_EVT_ERASE 0x00000020 /* Erase related event */
4068 +#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */
4069 +#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */
4071 +#define DBG_EVT_ALL 0xffffffff
4073 +#define DBG_EVT_MASK (DBG_EVT_INIT)
4076 +#define MSG(evt, fmt, args...) \
4078 + if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \
4079 + printk(fmt, ##args); \
4083 +#define MSG_FUNC_ENTRY(f) MSG(FUC, "<FUN_ENT>: %s\n", __FUNCTION__)
4085 +#define MSG(evt, fmt, args...) do{}while(0)
4086 +#define MSG_FUNC_ENTRY(f) do{}while(0)
4089 +#define RAMDOM_READ 1<<0
4090 +#define CACHE_READ 1<<1
4094 + u16 id; //deviceid+menuid
4102 + u32 timmingsetting;
4103 + char devciename[14];
4104 + u32 advancedmode; //
4105 +}flashdev_info,*pflashdev_info;
4109 +struct mtk_nand_host_hw {
4110 + unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
4111 + unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
4112 + unsigned int nfi_cs_num; /* NFI_CS_NUM */
4113 + unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
4114 + unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
4115 + unsigned int nand_ecc_size;
4116 + unsigned int nand_ecc_bytes;
4117 + unsigned int nand_ecc_mode;
4119 +extern struct mtk_nand_host_hw mt7621_nand_hw;
4120 +extern u32 CFG_BLOCKSIZE;
4123 --- a/drivers/mtd/nand/nand_base.c
4124 +++ b/drivers/mtd/nand/nand_base.c
4126 #include <linux/mtd/partitions.h>
4127 #include <linux/of.h>
4129 -static int nand_get_device(struct mtd_info *mtd, int new_state);
4130 +int nand_get_device(struct mtd_info *mtd, int new_state);
4132 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
4133 struct mtd_oob_ops *ops);
4134 @@ -240,7 +240,7 @@ static int check_offs_len(struct mtd_inf
4136 * Release chip lock and wake up anyone waiting on the device.
4138 -static void nand_release_device(struct mtd_info *mtd)
4139 +void nand_release_device(struct mtd_info *mtd)
4141 struct nand_chip *chip = mtd_to_nand(mtd);
4143 @@ -968,7 +968,7 @@ static void panic_nand_get_device(struct
4145 * Get the device and lock it for exclusive access
4149 nand_get_device(struct mtd_info *mtd, int new_state)
4151 struct nand_chip *chip = mtd_to_nand(mtd);
4152 --- a/drivers/mtd/nand/nand_bbt.c
4153 +++ b/drivers/mtd/nand/nand_bbt.c
4154 @@ -1215,6 +1215,25 @@ err:
4158 +void nand_bbt_set(struct mtd_info *mtd, int page, int flag)
4160 + struct nand_chip *this = mtd->priv;
4163 + block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
4164 + this->bbt[block >> 3] &= ~(0x03 << (block & 0x6));
4165 + this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6);
4168 +int nand_bbt_get(struct mtd_info *mtd, int page)
4170 + struct nand_chip *this = mtd->priv;
4173 + block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
4174 + return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
4178 * nand_update_bbt - update bad block table(s)
4179 * @mtd: MTD device structure
4181 +++ b/drivers/mtd/nand/nand_def.h
4183 +#ifndef __NAND_DEF_H__
4184 +#define __NAND_DEF_H__
4186 +#define VERSION "v2.1 Fix AHB virt2phys error"
4187 +#define MODULE_NAME "# MTK NAND #"
4188 +#define PROCNAME "driver/nand"
4191 +//#define __UBOOT_NAND__ 1
4192 +#define __KERNEL_NAND__ 1
4193 +//#define __PRELOADER_NAND__ 1
4195 +//#define _MTK_NAND_DUMMY_DRIVER
4196 +//#define CONFIG_BADBLOCK_CHECK 1
4197 +//#ifdef CONFIG_BADBLOCK_CHECK
4198 +//#define MTK_NAND_BMT 1
4200 +#define ECC_ENABLE 1
4201 +#define MANUAL_CORRECT 1
4202 +//#define __INTERNAL_USE_AHB_MODE__ (0)
4203 +#define SKIP_BAD_BLOCK
4206 +#ifndef NAND_OTP_SUPPORT
4207 +#define NAND_OTP_SUPPORT 0
4210 +/*******************************************************************************
4211 + * Macro definition
4212 + *******************************************************************************/
4213 +//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value)))
4214 +//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value)))
4215 +//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value))))
4216 +//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value))))
4218 +#if defined (__KERNEL_NAND__)
4219 +#define NFI_SET_REG32(reg, value) \
4221 + g_value = (DRV_Reg32(reg) | (value));\
4222 + DRV_WriteReg32(reg, g_value); \
4225 +#define NFI_SET_REG16(reg, value) \
4227 + g_value = (DRV_Reg16(reg) | (value));\
4228 + DRV_WriteReg16(reg, g_value); \
4231 +#define NFI_CLN_REG32(reg, value) \
4233 + g_value = (DRV_Reg32(reg) & (~(value)));\
4234 + DRV_WriteReg32(reg, g_value); \
4237 +#define NFI_CLN_REG16(reg, value) \
4239 + g_value = (DRV_Reg16(reg) & (~(value)));\
4240 + DRV_WriteReg16(reg, g_value); \
4244 +#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
4245 +#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
4248 +#define NAND_SECTOR_SIZE (512)
4249 +#define OOB_PER_SECTOR (16)
4250 +#define OOB_AVAI_PER_SECTOR (8)
4252 +#ifndef PART_SIZE_BMTPOOL
4253 +#define BMT_POOL_SIZE (80)
4255 +#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
4258 +#define PMT_POOL_SIZE (2)
4260 +#define TIMEOUT_1 0x1fff
4261 +#define TIMEOUT_2 0x8ff
4262 +#define TIMEOUT_3 0xffff
4263 +#define TIMEOUT_4 0xffff//5000 //PIO
4266 +/* temporarity definiation */
4267 +#if !defined (__KERNEL_NAND__)
4269 +#define KERN_WARNING
4271 +#define PAGE_SIZE (4096)
4273 +#define AddStorageTrace //AddStorageTrace
4274 +#define STORAGE_LOGGER_MSG_NAND 0
4275 +#define NFI_BASE RALINK_NAND_CTRL_BASE
4276 +#define NFIECC_BASE RALINK_NANDECC_CTRL_BASE
4278 +#ifdef __INTERNAL_USE_AHB_MODE__
4279 +#define MT65xx_POLARITY_LOW 0
4280 +#define MT65XX_PDN_PERI_NFI 0
4281 +#define MT65xx_EDGE_SENSITIVE 0
4282 +#define MT6575_NFI_IRQ_ID (58)
4285 +#if defined (__KERNEL_NAND__)
4286 +#define RALINK_REG(x) (*((volatile u32 *)(x)))
4287 +#define __virt_to_phys(x) virt_to_phys((volatile void*)x)
4289 +#define CONFIG_MTD_NAND_VERIFY_WRITE (1)
4290 +#define printk printf
4291 +#define ra_dbg printf
4292 +#define BUG() //BUG()
4293 +#define BUG_ON(x) //BUG_ON()
4294 +#define NUM_PARTITIONS 1
4297 +#define NFI_DEFAULT_ACCESS_TIMING (0x30C77fff) //(0x44333)
4299 +//uboot only support 1 cs
4300 +#define NFI_CS_NUM (1)
4301 +#define NFI_DEFAULT_CS (0)
4303 +#include "mt6575_typedefs.h"
4305 +#endif /* __NAND_DEF_H__ */
4307 +++ b/drivers/mtd/nand/nand_device_list.h
4309 +/* Copyright Statement:
4311 + * This software/firmware and related documentation ("MediaTek Software") are
4312 + * protected under relevant copyright laws. The information contained herein
4313 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
4314 + * Without the prior written permission of MediaTek inc. and/or its licensors,
4315 + * any reproduction, modification, use or disclosure of MediaTek Software,
4316 + * and information contained herein, in whole or in part, shall be strictly prohibited.
4318 +/* MediaTek Inc. (C) 2010. All rights reserved.
4320 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
4321 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
4322 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
4323 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
4324 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
4325 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
4326 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
4327 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
4328 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
4329 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
4330 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
4331 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
4332 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
4333 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
4334 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
4335 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
4336 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
4337 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
4339 + * The following software/firmware and/or related documentation ("MediaTek Software")
4340 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
4341 + * applicable license agreements with MediaTek Inc.
4344 +#ifndef __NAND_DEVICE_LIST_H__
4345 +#define __NAND_DEVICE_LIST_H__
4347 +static const flashdev_info gen_FlashTable[]={
4348 + {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0},
4349 + {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0},
4350 + {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0},
4351 + {0x2CDA, 0x909506, 5, 8, 256, 128, 2048, 64, 0x30C77fff, "MT29F2G08ABAE", 0},
4352 + {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0},
4353 + {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0},
4354 + {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0},
4355 + {0xC8D1, 0x809540, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81MA", 0},
4356 + {0xC8DA, 0x909544, 5, 8, 256, 128, 2048, 64, 0x30C77fff, "F59L2G81A", 0},
4357 + {0xC8DC, 0x909554, 5, 8, 512, 128, 2048, 64, 0x30C77fff, "F59L4G81A", 0},
4358 + {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0},
4359 + {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0},
4360 + {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0},
4361 + {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0},
4362 + {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0},
4363 + {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0},
4369 +++ b/drivers/mtd/nand/partition.h
4371 +/* Copyright Statement:
4373 + * This software/firmware and related documentation ("MediaTek Software") are
4374 + * protected under relevant copyright laws. The information contained herein
4375 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
4376 + * Without the prior written permission of MediaTek inc. and/or its licensors,
4377 + * any reproduction, modification, use or disclosure of MediaTek Software,
4378 + * and information contained herein, in whole or in part, shall be strictly prohibited.
4380 +/* MediaTek Inc. (C) 2010. All rights reserved.
4382 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
4383 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
4384 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
4385 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
4386 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
4387 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
4388 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
4389 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
4390 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
4391 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
4392 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
4393 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
4394 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
4395 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
4396 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
4397 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
4398 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
4399 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
4401 + * The following software/firmware and/or related documentation ("MediaTek Software")
4402 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
4403 + * applicable license agreements with MediaTek Inc.
4406 +#include <linux/mtd/mtd.h>
4407 +#include <linux/mtd/rawnand.h>
4408 +#include <linux/mtd/partitions.h>
4410 +#define RECONFIG_PARTITION_SIZE 1
4412 +#define MTD_BOOT_PART_SIZE 0x80000
4413 +#define MTD_CONFIG_PART_SIZE 0x20000
4414 +#define MTD_FACTORY_PART_SIZE 0x20000
4416 +extern unsigned int CFG_BLOCKSIZE;
4417 +#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2)
4418 +#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2)
4419 +#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1)
4421 +/*=======================================================================*/
4422 +/* NAND PARTITION Mapping */
4423 +/*=======================================================================*/
4424 +//#ifdef CONFIG_MTD_PARTITIONS
4425 +static struct mtd_partition g_pasStatic_Partition[] = {
4428 + size: MTDPART_SIZ_FULL,
4431 + /* Put your own partition definitions here */
4433 + name: "Bootloader",
4434 + size: MTD_BOOT_PART_SIZE,
4438 + size: MTD_CONFIG_PART_SIZE,
4439 + offset: MTDPART_OFS_APPEND
4442 + size: MTD_FACTORY_PART_SIZE,
4443 + offset: MTDPART_OFS_APPEND
4444 +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
4447 + size: MTD_KERN_PART_SIZE,
4448 + offset: MTDPART_OFS_APPEND,
4451 + size: MTD_ROOTFS_PART_SIZE,
4452 + offset: MTDPART_OFS_APPEND,
4453 +#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING
4455 + name: "Kernel_RootFS",
4456 + size: MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE,
4457 + offset: MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE,
4459 +#else //CONFIG_RT2880_ROOTFS_IN_RAM
4463 + offset: MTDPART_OFS_APPEND,
4465 +#ifdef CONFIG_DUAL_IMAGE
4468 + size: MTD_KERN2_PART_SIZE,
4469 + offset: MTD_KERN2_PART_OFFSET,
4470 +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
4473 + size: MTD_ROOTFS2_PART_SIZE,
4474 + offset: MTD_ROOTFS2_PART_OFFSET,
4481 +#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition)
4482 +extern int part_num; // = NUM_PARTITIONS;
4484 +#undef RECONFIG_PARTITION_SIZE