ramips: backport series of patches that ensure GCRs of other CPUs are accessed properly
[openwrt/openwrt.git] / target / linux / ramips / patches-4.3 / 0039-mtd-add-mt7621-nand-support.patch
1 From 0e1c4e3c97b83b4e7da65b1c56f0a7d40736ac53 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Sun, 27 Jul 2014 11:05:17 +0100
4 Subject: [PATCH 39/53] mtd: add mt7621 nand support
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8 drivers/mtd/nand/Kconfig | 6 +
9 drivers/mtd/nand/Makefile | 1 +
10 drivers/mtd/nand/bmt.c | 750 ++++++++++++
11 drivers/mtd/nand/bmt.h | 80 ++
12 drivers/mtd/nand/dev-nand.c | 63 +
13 drivers/mtd/nand/mt6575_typedefs.h | 340 ++++++
14 drivers/mtd/nand/mtk_nand.c | 2304 +++++++++++++++++++++++++++++++++++
15 drivers/mtd/nand/mtk_nand.h | 452 +++++++
16 drivers/mtd/nand/nand_base.c | 6 +-
17 drivers/mtd/nand/nand_bbt.c | 19 +
18 drivers/mtd/nand/nand_def.h | 123 ++
19 drivers/mtd/nand/nand_device_list.h | 55 +
20 drivers/mtd/nand/partition.h | 115 ++
21 13 files changed, 4311 insertions(+), 3 deletions(-)
22 create mode 100644 drivers/mtd/nand/bmt.c
23 create mode 100644 drivers/mtd/nand/bmt.h
24 create mode 100644 drivers/mtd/nand/dev-nand.c
25 create mode 100644 drivers/mtd/nand/mt6575_typedefs.h
26 create mode 100644 drivers/mtd/nand/mtk_nand.c
27 create mode 100644 drivers/mtd/nand/mtk_nand.h
28 create mode 100644 drivers/mtd/nand/nand_def.h
29 create mode 100644 drivers/mtd/nand/nand_device_list.h
30 create mode 100644 drivers/mtd/nand/partition.h
31
32 diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
33 index 3324281..76cfc97 100644
34 --- a/drivers/mtd/nand/Kconfig
35 +++ b/drivers/mtd/nand/Kconfig
36 @@ -535,4 +535,10 @@ config MTD_NAND_HISI504
37 help
38 Enables support for NAND controller on Hisilicon SoC Hip04.
39
40 +config MTK_MTD_NAND
41 + tristate "Support for MTK SoC NAND controller"
42 + depends on SOC_MT7621
43 + select MTD_NAND_IDS
44 + select MTD_NAND_ECC
45 +
46 endif # MTD_NAND
47 diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
48 index 075a027..ec349e3 100644
49 --- a/drivers/mtd/nand/Makefile
50 +++ b/drivers/mtd/nand/Makefile
51 @@ -54,5 +54,6 @@ obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
52 obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
53 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
54 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
55 +obj-$(CONFIG_MTK_MTD_NAND) += mtk_nand.o bmt.o
56
57 nand-objs := nand_base.o nand_bbt.o nand_timings.o
58 diff --git a/drivers/mtd/nand/bmt.c b/drivers/mtd/nand/bmt.c
59 new file mode 100644
60 index 0000000..0462871
61 --- /dev/null
62 +++ b/drivers/mtd/nand/bmt.c
63 @@ -0,0 +1,750 @@
64 +#include "bmt.h"
65 +
66 +typedef struct
67 +{
68 + char signature[3];
69 + u8 version;
70 + u8 bad_count; // bad block count in pool
71 + u8 mapped_count; // mapped block count in pool
72 + u8 checksum;
73 + u8 reseverd[13];
74 +} phys_bmt_header;
75 +
76 +typedef struct
77 +{
78 + phys_bmt_header header;
79 + bmt_entry table[MAX_BMT_SIZE];
80 +} phys_bmt_struct;
81 +
82 +typedef struct
83 +{
84 + char signature[3];
85 +} bmt_oob_data;
86 +
87 +static char MAIN_SIGNATURE[] = "BMT";
88 +static char OOB_SIGNATURE[] = "bmt";
89 +#define SIGNATURE_SIZE (3)
90 +
91 +#define MAX_DAT_SIZE 0x1000
92 +#define MAX_OOB_SIZE 0x80
93 +
94 +static struct mtd_info *mtd_bmt;
95 +static struct nand_chip *nand_chip_bmt;
96 +#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
97 +#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
98 +
99 +#define OFFSET(block) ((block) * BLOCK_SIZE_BMT)
100 +#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT)
101 +
102 +/*********************************************************************
103 +* Flash is splited into 2 parts, system part is for normal system *
104 +* system usage, size is system_block_count, another is replace pool *
105 +* +-------------------------------------------------+ *
106 +* | system_block_count | bmt_block_count | *
107 +* +-------------------------------------------------+ *
108 +*********************************************************************/
109 +static u32 total_block_count; // block number in flash
110 +static u32 system_block_count;
111 +static int bmt_block_count; // bmt table size
112 +// static int bmt_count; // block used in bmt
113 +static int page_per_block; // page per count
114 +
115 +static u32 bmt_block_index; // bmt block index
116 +static bmt_struct bmt; // dynamic created global bmt table
117 +
118 +static u8 dat_buf[MAX_DAT_SIZE];
119 +static u8 oob_buf[MAX_OOB_SIZE];
120 +static bool pool_erased;
121 +
122 +/***************************************************************
123 +*
124 +* Interface adaptor for preloader/uboot/kernel
125 +* These interfaces operate on physical address, read/write
126 +* physical data.
127 +*
128 +***************************************************************/
129 +int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob)
130 +{
131 + return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
132 +}
133 +
134 +bool nand_block_bad_bmt(u32 offset)
135 +{
136 + return mtk_nand_block_bad_hw(mtd_bmt, offset);
137 +}
138 +
139 +bool nand_erase_bmt(u32 offset)
140 +{
141 + int status;
142 + if (offset < 0x20000)
143 + {
144 + MSG(INIT, "erase offset: 0x%x\n", offset);
145 + }
146 +
147 + status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined
148 + if (status & NAND_STATUS_FAIL)
149 + return false;
150 + else
151 + return true;
152 +}
153 +
154 +int mark_block_bad_bmt(u32 offset)
155 +{
156 + return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset);
157 +}
158 +
159 +bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob)
160 +{
161 + if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
162 + return false;
163 + else
164 + return true;
165 +}
166 +
167 +/***************************************************************
168 +* *
169 +* static internal function *
170 +* *
171 +***************************************************************/
172 +static void dump_bmt_info(bmt_struct * bmt)
173 +{
174 + int i;
175 +
176 + MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
177 + for (i = 0; i < bmt->mapped_count; i++)
178 + {
179 + MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
180 + }
181 +}
182 +
183 +static bool match_bmt_signature(u8 * dat, u8 * oob)
184 +{
185 +
186 + if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
187 + {
188 + return false;
189 + }
190 +
191 + if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
192 + {
193 + MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
194 + }
195 + return true;
196 +}
197 +
198 +static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size)
199 +{
200 + int i;
201 + u8 checksum = 0;
202 + u8 *dat = (u8 *) phys_table;
203 +
204 + checksum += phys_table->header.version;
205 + checksum += phys_table->header.mapped_count;
206 +
207 + dat += sizeof(phys_bmt_header);
208 + for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
209 + {
210 + checksum += dat[i];
211 + }
212 +
213 + return checksum;
214 +}
215 +
216 +
217 +static int is_block_mapped(int index)
218 +{
219 + int i;
220 + for (i = 0; i < bmt.mapped_count; i++)
221 + {
222 + if (index == bmt.table[i].mapped_index)
223 + return i;
224 + }
225 + return -1;
226 +}
227 +
228 +static bool is_page_used(u8 * dat, u8 * oob)
229 +{
230 + return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
231 +}
232 +
233 +static bool valid_bmt_data(phys_bmt_struct * phys_table)
234 +{
235 + int i;
236 + u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
237 +
238 + // checksum correct?
239 + if (phys_table->header.checksum != checksum)
240 + {
241 + MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum);
242 + return false;
243 + }
244 +
245 + MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
246 +
247 + // block index correct?
248 + for (i = 0; i < phys_table->header.mapped_count; i++)
249 + {
250 + if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count)
251 + {
252 + MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
253 + return false;
254 + }
255 + }
256 +
257 + // pass check, valid bmt.
258 + MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
259 + return true;
260 +}
261 +
262 +static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob)
263 +{
264 + phys_bmt_struct phys_bmt;
265 +
266 + dump_bmt_info(bmt);
267 +
268 + // fill phys_bmt_struct structure with bmt_struct
269 + memset(&phys_bmt, 0xFF, sizeof(phys_bmt));
270 +
271 + memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
272 + phys_bmt.header.version = BMT_VERSION;
273 + // phys_bmt.header.bad_count = bmt->bad_count;
274 + phys_bmt.header.mapped_count = bmt->mapped_count;
275 + memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
276 +
277 + phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count);
278 +
279 + memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt));
280 + memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
281 +}
282 +
283 +// return valid index if found BMT, else return 0
284 +static int load_bmt_data(int start, int pool_size)
285 +{
286 + int bmt_index = start + pool_size - 1; // find from the end
287 + phys_bmt_struct phys_table;
288 + int i;
289 +
290 + MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index);
291 +
292 + for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
293 + {
294 + if (nand_block_bad_bmt(OFFSET(bmt_index)))
295 + {
296 + MSG(INIT, "Skip bad block: %d\n", bmt_index);
297 + continue;
298 + }
299 +
300 + if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
301 + {
302 + MSG(INIT, "Error found when read block %d\n", bmt_index);
303 + continue;
304 + }
305 +
306 + if (!match_bmt_signature(dat_buf, oob_buf))
307 + {
308 + continue;
309 + }
310 +
311 + MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index);
312 +
313 + memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table));
314 +
315 + if (!valid_bmt_data(&phys_table))
316 + {
317 + MSG(INIT, "BMT data is not correct %d\n", bmt_index);
318 + continue;
319 + } else
320 + {
321 + bmt.mapped_count = phys_table.header.mapped_count;
322 + bmt.version = phys_table.header.version;
323 + // bmt.bad_count = phys_table.header.bad_count;
324 + memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry));
325 +
326 + MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
327 +
328 + for (i = 0; i < bmt.mapped_count; i++)
329 + {
330 + if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index)))
331 + {
332 + MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
333 + mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
334 + }
335 + }
336 +
337 + return bmt_index;
338 + }
339 + }
340 +
341 + MSG(INIT, "bmt block not found!\n");
342 + return 0;
343 +}
344 +
345 +/*************************************************************************
346 +* Find an available block and erase. *
347 +* start_from_end: if true, find available block from end of flash. *
348 +* else, find from the beginning of the pool *
349 +* need_erase: if true, all unmapped blocks in the pool will be erased *
350 +*************************************************************************/
351 +static int find_available_block(bool start_from_end)
352 +{
353 + int i; // , j;
354 + int block = system_block_count;
355 + int direction;
356 + // int avail_index = 0;
357 + MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
358 +
359 + // erase all un-mapped blocks in pool when finding avaliable block
360 + if (!pool_erased)
361 + {
362 + MSG(INIT, "Erase all un-mapped blocks in pool\n");
363 + for (i = 0; i < bmt_block_count; i++)
364 + {
365 + if (block == bmt_block_index)
366 + {
367 + MSG(INIT, "Skip bmt block 0x%x\n", block);
368 + continue;
369 + }
370 +
371 + if (nand_block_bad_bmt(OFFSET(block + i)))
372 + {
373 + MSG(INIT, "Skip bad block 0x%x\n", block + i);
374 + continue;
375 + }
376 +//if(block==4095)
377 +//{
378 +// continue;
379 +//}
380 +
381 + if (is_block_mapped(block + i) >= 0)
382 + {
383 + MSG(INIT, "Skip mapped block 0x%x\n", block + i);
384 + continue;
385 + }
386 +
387 + if (!nand_erase_bmt(OFFSET(block + i)))
388 + {
389 + MSG(INIT, "Erase block 0x%x failed\n", block + i);
390 + mark_block_bad_bmt(OFFSET(block + i));
391 + }
392 + }
393 +
394 + pool_erased = 1;
395 + }
396 +
397 + if (start_from_end)
398 + {
399 + block = total_block_count - 1;
400 + direction = -1;
401 + } else
402 + {
403 + block = system_block_count;
404 + direction = 1;
405 + }
406 +
407 + for (i = 0; i < bmt_block_count; i++, block += direction)
408 + {
409 + if (block == bmt_block_index)
410 + {
411 + MSG(INIT, "Skip bmt block 0x%x\n", block);
412 + continue;
413 + }
414 +
415 + if (nand_block_bad_bmt(OFFSET(block)))
416 + {
417 + MSG(INIT, "Skip bad block 0x%x\n", block);
418 + continue;
419 + }
420 +
421 + if (is_block_mapped(block) >= 0)
422 + {
423 + MSG(INIT, "Skip mapped block 0x%x\n", block);
424 + continue;
425 + }
426 +
427 + MSG(INIT, "Find block 0x%x available\n", block);
428 + return block;
429 + }
430 +
431 + return 0;
432 +}
433 +
434 +static unsigned short get_bad_index_from_oob(u8 * oob_buf)
435 +{
436 + unsigned short index;
437 + memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
438 +
439 + return index;
440 +}
441 +
442 +void set_bad_index_to_oob(u8 * oob, u16 index)
443 +{
444 + memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
445 +}
446 +
447 +static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob)
448 +{
449 + int page;
450 + int error_block = offset / BLOCK_SIZE_BMT;
451 + int error_page = (offset / PAGE_SIZE_BMT) % page_per_block;
452 + int to_index;
453 +
454 + memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
455 +
456 + to_index = find_available_block(false);
457 +
458 + if (!to_index)
459 + {
460 + MSG(INIT, "Cannot find an available block for BMT\n");
461 + return 0;
462 + }
463 +
464 + { // migrate error page first
465 + MSG(INIT, "Write error page: 0x%x\n", error_page);
466 + if (!write_dat)
467 + {
468 + nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
469 + write_dat = dat_buf;
470 + }
471 + // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
472 +
473 + if (error_block < system_block_count)
474 + set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
475 +
476 + if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
477 + {
478 + MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
479 + mark_block_bad_bmt(to_index);
480 + return migrate_from_bad(offset, write_dat, write_oob);
481 + }
482 + }
483 +
484 + for (page = 0; page < page_per_block; page++)
485 + {
486 + if (page != error_page)
487 + {
488 + nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
489 + if (is_page_used(dat_buf, oob_buf))
490 + {
491 + if (error_block < system_block_count)
492 + {
493 + set_bad_index_to_oob(oob_buf, error_block);
494 + }
495 + MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
496 + if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
497 + {
498 + MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page);
499 + mark_block_bad_bmt(to_index);
500 + return migrate_from_bad(offset, write_dat, write_oob);
501 + }
502 + }
503 + }
504 + }
505 +
506 + MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
507 +
508 + return to_index;
509 +}
510 +
511 +static bool write_bmt_to_flash(u8 * dat, u8 * oob)
512 +{
513 + bool need_erase = true;
514 + MSG(INIT, "Try to write BMT\n");
515 +
516 + if (bmt_block_index == 0)
517 + {
518 + // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
519 + need_erase = false;
520 + if (!(bmt_block_index = find_available_block(true)))
521 + {
522 + MSG(INIT, "Cannot find an available block for BMT\n");
523 + return false;
524 + }
525 + }
526 +
527 + MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
528 +
529 + // write bmt to flash
530 + if (need_erase)
531 + {
532 + if (!nand_erase_bmt(OFFSET(bmt_block_index)))
533 + {
534 + MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
535 + mark_block_bad_bmt(OFFSET(bmt_block_index));
536 + // bmt.bad_count++;
537 +
538 + bmt_block_index = 0;
539 + return write_bmt_to_flash(dat, oob); // recursive call
540 + }
541 + }
542 +
543 + if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob))
544 + {
545 + MSG(INIT, "Write BMT data fail, need to write again\n");
546 + mark_block_bad_bmt(OFFSET(bmt_block_index));
547 + // bmt.bad_count++;
548 +
549 + bmt_block_index = 0;
550 + return write_bmt_to_flash(dat, oob); // recursive call
551 + }
552 +
553 + MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
554 + return true;
555 +}
556 +
557 +/*******************************************************************
558 +* Reconstruct bmt, called when found bmt info doesn't match bad
559 +* block info in flash.
560 +*
561 +* Return NULL for failure
562 +*******************************************************************/
563 +bmt_struct *reconstruct_bmt(bmt_struct * bmt)
564 +{
565 + int i;
566 + int index = system_block_count;
567 + unsigned short bad_index;
568 + int mapped;
569 +
570 + // init everything in BMT struct
571 + bmt->version = BMT_VERSION;
572 + bmt->bad_count = 0;
573 + bmt->mapped_count = 0;
574 +
575 + memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
576 +
577 + for (i = 0; i < bmt_block_count; i++, index++)
578 + {
579 + if (nand_block_bad_bmt(OFFSET(index)))
580 + {
581 + MSG(INIT, "Skip bad block: 0x%x\n", index);
582 + // bmt->bad_count++;
583 + continue;
584 + }
585 +
586 + MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
587 + nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
588 + /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
589 + {
590 + MSG(INIT, "Error when read block %d\n", bmt_block_index);
591 + continue;
592 + } */
593 +
594 + if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
595 + {
596 + MSG(INIT, "get bad index: 0x%x\n", bad_index);
597 + if (bad_index != 0xFFFF)
598 + MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index);
599 + continue;
600 + }
601 +
602 + MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
603 +
604 + if (!nand_block_bad_bmt(OFFSET(bad_index)))
605 + {
606 + MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index);
607 + continue; // no need to erase here, it will be erased later when trying to write BMT
608 + }
609 +
610 + if ((mapped = is_block_mapped(bad_index)) >= 0)
611 + {
612 + MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
613 + bmt->table[mapped].mapped_index = index; // use new one instead.
614 + } else
615 + {
616 + // add mapping to BMT
617 + bmt->table[bmt->mapped_count].bad_index = bad_index;
618 + bmt->table[bmt->mapped_count].mapped_index = index;
619 + bmt->mapped_count++;
620 + }
621 +
622 + MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
623 +
624 + }
625 +
626 + MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
627 + // dump_bmt_info(bmt);
628 +
629 + // fill NAND BMT buffer
630 + memset(oob_buf, 0xFF, sizeof(oob_buf));
631 + fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
632 +
633 + // write BMT back
634 + if (!write_bmt_to_flash(dat_buf, oob_buf))
635 + {
636 + MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
637 + }
638 +
639 + return bmt;
640 +}
641 +
642 +/*******************************************************************
643 +* [BMT Interface]
644 +*
645 +* Description:
646 +* Init bmt from nand. Reconstruct if not found or data error
647 +*
648 +* Parameter:
649 +* size: size of bmt and replace pool
650 +*
651 +* Return:
652 +* NULL for failure, and a bmt struct for success
653 +*******************************************************************/
654 +bmt_struct *init_bmt(struct nand_chip * chip, int size)
655 +{
656 + struct mtk_nand_host *host;
657 +
658 + if (size > 0 && size < MAX_BMT_SIZE)
659 + {
660 + MSG(INIT, "Init bmt table, size: %d\n", size);
661 + bmt_block_count = size;
662 + } else
663 + {
664 + MSG(INIT, "Invalid bmt table size: %d\n", size);
665 + return NULL;
666 + }
667 + nand_chip_bmt = chip;
668 + system_block_count = chip->chipsize >> chip->phys_erase_shift;
669 + total_block_count = bmt_block_count + system_block_count;
670 + page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
671 + host = (struct mtk_nand_host *)chip->priv;
672 + mtd_bmt = &host->mtd;
673 +
674 + MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
675 + MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
676 +
677 + // set this flag, and unmapped block in pool will be erased.
678 + pool_erased = 0;
679 + memset(bmt.table, 0, size * sizeof(bmt_entry));
680 + if ((bmt_block_index = load_bmt_data(system_block_count, size)))
681 + {
682 + MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
683 + dump_bmt_info(&bmt);
684 + return &bmt;
685 + } else
686 + {
687 + MSG(INIT, "Load bmt data fail, need re-construct!\n");
688 +#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT.
689 + if (reconstruct_bmt(&bmt))
690 + return &bmt;
691 + else
692 +#endif
693 + return NULL;
694 + }
695 +}
696 +
697 +/*******************************************************************
698 +* [BMT Interface]
699 +*
700 +* Description:
701 +* Update BMT.
702 +*
703 +* Parameter:
704 +* offset: update block/page offset.
705 +* reason: update reason, see update_reason_t for reason.
706 +* dat/oob: data and oob buffer for write fail.
707 +*
708 +* Return:
709 +* Return true for success, and false for failure.
710 +*******************************************************************/
711 +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob)
712 +{
713 + int map_index;
714 + int orig_bad_block = -1;
715 + // int bmt_update_index;
716 + int i;
717 + int bad_index = offset / BLOCK_SIZE_BMT;
718 +
719 +#ifndef MTK_NAND_BMT
720 + return false;
721 +#endif
722 + if (reason == UPDATE_WRITE_FAIL)
723 + {
724 + MSG(INIT, "Write fail, need to migrate\n");
725 + if (!(map_index = migrate_from_bad(offset, dat, oob)))
726 + {
727 + MSG(INIT, "migrate fail\n");
728 + return false;
729 + }
730 + } else
731 + {
732 + if (!(map_index = find_available_block(false)))
733 + {
734 + MSG(INIT, "Cannot find block in pool\n");
735 + return false;
736 + }
737 + }
738 +
739 + // now let's update BMT
740 + if (bad_index >= system_block_count) // mapped block become bad, find original bad block
741 + {
742 + for (i = 0; i < bmt_block_count; i++)
743 + {
744 + if (bmt.table[i].mapped_index == bad_index)
745 + {
746 + orig_bad_block = bmt.table[i].bad_index;
747 + break;
748 + }
749 + }
750 + // bmt.bad_count++;
751 + MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
752 +
753 + bmt.table[i].mapped_index = map_index;
754 + } else
755 + {
756 + bmt.table[bmt.mapped_count].mapped_index = map_index;
757 + bmt.table[bmt.mapped_count].bad_index = bad_index;
758 + bmt.mapped_count++;
759 + }
760 +
761 + memset(oob_buf, 0xFF, sizeof(oob_buf));
762 + fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
763 + if (!write_bmt_to_flash(dat_buf, oob_buf))
764 + return false;
765 +
766 + mark_block_bad_bmt(offset);
767 +
768 + return true;
769 +}
770 +
771 +/*******************************************************************
772 +* [BMT Interface]
773 +*
774 +* Description:
775 +* Given an block index, return mapped index if it's mapped, else
776 +* return given index.
777 +*
778 +* Parameter:
779 +* index: given an block index. This value cannot exceed
780 +* system_block_count.
781 +*
782 +* Return NULL for failure
783 +*******************************************************************/
784 +u16 get_mapping_block_index(int index)
785 +{
786 + int i;
787 +#ifndef MTK_NAND_BMT
788 + return index;
789 +#endif
790 + if (index > system_block_count)
791 + {
792 + return index;
793 + }
794 +
795 + for (i = 0; i < bmt.mapped_count; i++)
796 + {
797 + if (bmt.table[i].bad_index == index)
798 + {
799 + return bmt.table[i].mapped_index;
800 + }
801 + }
802 +
803 + return index;
804 +}
805 +#ifdef __KERNEL_NAND__
806 +EXPORT_SYMBOL_GPL(init_bmt);
807 +EXPORT_SYMBOL_GPL(update_bmt);
808 +EXPORT_SYMBOL_GPL(get_mapping_block_index);
809 +
810 +MODULE_LICENSE("GPL");
811 +MODULE_AUTHOR("MediaTek");
812 +MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");
813 +#endif
814 diff --git a/drivers/mtd/nand/bmt.h b/drivers/mtd/nand/bmt.h
815 new file mode 100644
816 index 0000000..2d30ea9
817 --- /dev/null
818 +++ b/drivers/mtd/nand/bmt.h
819 @@ -0,0 +1,80 @@
820 +#ifndef __BMT_H__
821 +#define __BMT_H__
822 +
823 +#include "nand_def.h"
824 +
825 +#if defined(__PRELOADER_NAND__)
826 +
827 +#include "nand.h"
828 +
829 +#elif defined(__UBOOT_NAND__)
830 +
831 +#include <linux/mtd/nand.h>
832 +#include "mtk_nand.h"
833 +
834 +#elif defined(__KERNEL_NAND__)
835 +
836 +#include <linux/mtd/mtd.h>
837 +#include <linux/mtd/nand.h>
838 +#include <linux/module.h>
839 +#include "mtk_nand.h"
840 +
841 +#endif
842 +
843 +
844 +#define MAX_BMT_SIZE (0x80)
845 +#define BMT_VERSION (1) // initial version
846 +
847 +#define MAIN_SIGNATURE_OFFSET (0)
848 +#define OOB_SIGNATURE_OFFSET (1)
849 +#define OOB_INDEX_OFFSET (29)
850 +#define OOB_INDEX_SIZE (2)
851 +#define FAKE_INDEX (0xAAAA)
852 +
853 +typedef struct _bmt_entry_
854 +{
855 + u16 bad_index; // bad block index
856 + u16 mapped_index; // mapping block index in the replace pool
857 +} bmt_entry;
858 +
859 +typedef enum
860 +{
861 + UPDATE_ERASE_FAIL,
862 + UPDATE_WRITE_FAIL,
863 + UPDATE_UNMAPPED_BLOCK,
864 + UPDATE_REASON_COUNT,
865 +} update_reason_t;
866 +
867 +typedef struct
868 +{
869 + bmt_entry table[MAX_BMT_SIZE];
870 + u8 version;
871 + u8 mapped_count; // mapped block count in pool
872 + u8 bad_count; // bad block count in pool. Not used in V1
873 +} bmt_struct;
874 +
875 +/***************************************************************
876 +* *
877 +* Interface BMT need to use *
878 +* *
879 +***************************************************************/
880 +extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
881 +extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs);
882 +extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page);
883 +extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs);
884 +extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
885 +
886 +
887 +/***************************************************************
888 +* *
889 +* Different function interface for preloader/uboot/kernel *
890 +* *
891 +***************************************************************/
892 +void set_bad_index_to_oob(u8 * oob, u16 index);
893 +
894 +
895 +bmt_struct *init_bmt(struct nand_chip *nand, int size);
896 +bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob);
897 +unsigned short get_mapping_block_index(int index);
898 +
899 +#endif // #ifndef __BMT_H__
900 diff --git a/drivers/mtd/nand/dev-nand.c b/drivers/mtd/nand/dev-nand.c
901 new file mode 100644
902 index 0000000..9fb5235
903 --- /dev/null
904 +++ b/drivers/mtd/nand/dev-nand.c
905 @@ -0,0 +1,63 @@
906 +#include <linux/init.h>
907 +#include <linux/kernel.h>
908 +#include <linux/platform_device.h>
909 +
910 +#include "mt6575_typedefs.h"
911 +
912 +#define RALINK_NAND_CTRL_BASE 0xBE003000
913 +#define NFI_base RALINK_NAND_CTRL_BASE
914 +#define RALINK_NANDECC_CTRL_BASE 0xBE003800
915 +#define NFIECC_base RALINK_NANDECC_CTRL_BASE
916 +#define MT7621_NFI_IRQ_ID SURFBOARDINT_NAND
917 +#define MT7621_NFIECC_IRQ_ID SURFBOARDINT_NAND_ECC
918 +
919 +#define SURFBOARDINT_NAND 22
920 +#define SURFBOARDINT_NAND_ECC 23
921 +
922 +static struct resource MT7621_resource_nand[] = {
923 + {
924 + .start = NFI_base,
925 + .end = NFI_base + 0x1A0,
926 + .flags = IORESOURCE_MEM,
927 + },
928 + {
929 + .start = NFIECC_base,
930 + .end = NFIECC_base + 0x150,
931 + .flags = IORESOURCE_MEM,
932 + },
933 + {
934 + .start = MT7621_NFI_IRQ_ID,
935 + .flags = IORESOURCE_IRQ,
936 + },
937 + {
938 + .start = MT7621_NFIECC_IRQ_ID,
939 + .flags = IORESOURCE_IRQ,
940 + },
941 +};
942 +
943 +static struct platform_device MT7621_nand_dev = {
944 + .name = "MT7621-NAND",
945 + .id = 0,
946 + .num_resources = ARRAY_SIZE(MT7621_resource_nand),
947 + .resource = MT7621_resource_nand,
948 + .dev = {
949 + .platform_data = &mt7621_nand_hw,
950 + },
951 +};
952 +
953 +
954 +int __init mtk_nand_register(void)
955 +{
956 +
957 + int retval = 0;
958 +
959 + retval = platform_device_register(&MT7621_nand_dev);
960 + if (retval != 0) {
961 + printk(KERN_ERR "register nand device fail\n");
962 + return retval;
963 + }
964 +
965 +
966 + return retval;
967 +}
968 +arch_initcall(mtk_nand_register);
969 diff --git a/drivers/mtd/nand/mt6575_typedefs.h b/drivers/mtd/nand/mt6575_typedefs.h
970 new file mode 100644
971 index 0000000..a7b9647
972 --- /dev/null
973 +++ b/drivers/mtd/nand/mt6575_typedefs.h
974 @@ -0,0 +1,340 @@
975 +/* Copyright Statement:
976 + *
977 + * This software/firmware and related documentation ("MediaTek Software") are
978 + * protected under relevant copyright laws. The information contained herein
979 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
980 + * Without the prior written permission of MediaTek inc. and/or its licensors,
981 + * any reproduction, modification, use or disclosure of MediaTek Software,
982 + * and information contained herein, in whole or in part, shall be strictly prohibited.
983 + */
984 +/* MediaTek Inc. (C) 2010. All rights reserved.
985 + *
986 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
987 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
988 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
989 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
990 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
991 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
992 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
993 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
994 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
995 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
996 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
997 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
998 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
999 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
1000 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
1001 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
1002 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
1003 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
1004 + *
1005 + * The following software/firmware and/or related documentation ("MediaTek Software")
1006 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
1007 + * applicable license agreements with MediaTek Inc.
1008 + */
1009 +
1010 +/*****************************************************************************
1011 +* Copyright Statement:
1012 +* --------------------
1013 +* This software is protected by Copyright and the information contained
1014 +* herein is confidential. The software may not be copied and the information
1015 +* contained herein may not be used or disclosed except with the written
1016 +* permission of MediaTek Inc. (C) 2008
1017 +*
1018 +* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
1019 +* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
1020 +* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
1021 +* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
1022 +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
1023 +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
1024 +* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
1025 +* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
1026 +* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
1027 +* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
1028 +* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
1029 +* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
1030 +*
1031 +* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
1032 +* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
1033 +* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
1034 +* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
1035 +* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
1036 +*
1037 +* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
1038 +* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
1039 +* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
1040 +* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
1041 +* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
1042 +*
1043 +*****************************************************************************/
1044 +
1045 +#ifndef _MT6575_TYPEDEFS_H
1046 +#define _MT6575_TYPEDEFS_H
1047 +
1048 +#if defined (__KERNEL_NAND__)
1049 +#include <linux/bug.h>
1050 +#else
1051 +#define true 1
1052 +#define false 0
1053 +#define bool u8
1054 +#endif
1055 +
1056 +// ---------------------------------------------------------------------------
1057 +// Basic Type Definitions
1058 +// ---------------------------------------------------------------------------
1059 +
1060 +typedef volatile unsigned char *P_kal_uint8;
1061 +typedef volatile unsigned short *P_kal_uint16;
1062 +typedef volatile unsigned int *P_kal_uint32;
1063 +
1064 +typedef long LONG;
1065 +typedef unsigned char UBYTE;
1066 +typedef short SHORT;
1067 +
1068 +typedef signed char kal_int8;
1069 +typedef signed short kal_int16;
1070 +typedef signed int kal_int32;
1071 +typedef long long kal_int64;
1072 +typedef unsigned char kal_uint8;
1073 +typedef unsigned short kal_uint16;
1074 +typedef unsigned int kal_uint32;
1075 +typedef unsigned long long kal_uint64;
1076 +typedef char kal_char;
1077 +
1078 +typedef unsigned int *UINT32P;
1079 +typedef volatile unsigned short *UINT16P;
1080 +typedef volatile unsigned char *UINT8P;
1081 +typedef unsigned char *U8P;
1082 +
1083 +typedef volatile unsigned char *P_U8;
1084 +typedef volatile signed char *P_S8;
1085 +typedef volatile unsigned short *P_U16;
1086 +typedef volatile signed short *P_S16;
1087 +typedef volatile unsigned int *P_U32;
1088 +typedef volatile signed int *P_S32;
1089 +typedef unsigned long long *P_U64;
1090 +typedef signed long long *P_S64;
1091 +
1092 +typedef unsigned char U8;
1093 +typedef signed char S8;
1094 +typedef unsigned short U16;
1095 +typedef signed short S16;
1096 +typedef unsigned int U32;
1097 +typedef signed int S32;
1098 +typedef unsigned long long U64;
1099 +typedef signed long long S64;
1100 +//typedef unsigned char bool;
1101 +
1102 +typedef unsigned char UINT8;
1103 +typedef unsigned short UINT16;
1104 +typedef unsigned int UINT32;
1105 +typedef unsigned short USHORT;
1106 +typedef signed char INT8;
1107 +typedef signed short INT16;
1108 +typedef signed int INT32;
1109 +typedef unsigned int DWORD;
1110 +typedef void VOID;
1111 +typedef unsigned char BYTE;
1112 +typedef float FLOAT;
1113 +
1114 +typedef char *LPCSTR;
1115 +typedef short *LPWSTR;
1116 +
1117 +
1118 +// ---------------------------------------------------------------------------
1119 +// Constants
1120 +// ---------------------------------------------------------------------------
1121 +
1122 +#define IMPORT EXTERN
1123 +#ifndef __cplusplus
1124 + #define EXTERN extern
1125 +#else
1126 + #define EXTERN extern "C"
1127 +#endif
1128 +#define LOCAL static
1129 +#define GLOBAL
1130 +#define EXPORT GLOBAL
1131 +
1132 +#define EQ ==
1133 +#define NEQ !=
1134 +#define AND &&
1135 +#define OR ||
1136 +#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B)))
1137 +
1138 +#ifndef FALSE
1139 + #define FALSE (0)
1140 +#endif
1141 +
1142 +#ifndef TRUE
1143 + #define TRUE (1)
1144 +#endif
1145 +
1146 +#ifndef NULL
1147 + #define NULL (0)
1148 +#endif
1149 +
1150 +//enum boolean {false, true};
1151 +enum {RX, TX, NONE};
1152 +
1153 +#ifndef BOOL
1154 +typedef unsigned char BOOL;
1155 +#endif
1156 +
1157 +typedef enum {
1158 + KAL_FALSE = 0,
1159 + KAL_TRUE = 1,
1160 +} kal_bool;
1161 +
1162 +
1163 +// ---------------------------------------------------------------------------
1164 +// Type Casting
1165 +// ---------------------------------------------------------------------------
1166 +
1167 +#define AS_INT32(x) (*(INT32 *)((void*)x))
1168 +#define AS_INT16(x) (*(INT16 *)((void*)x))
1169 +#define AS_INT8(x) (*(INT8 *)((void*)x))
1170 +
1171 +#define AS_UINT32(x) (*(UINT32 *)((void*)x))
1172 +#define AS_UINT16(x) (*(UINT16 *)((void*)x))
1173 +#define AS_UINT8(x) (*(UINT8 *)((void*)x))
1174 +
1175 +
1176 +// ---------------------------------------------------------------------------
1177 +// Register Manipulations
1178 +// ---------------------------------------------------------------------------
1179 +
1180 +#define READ_REGISTER_UINT32(reg) \
1181 + (*(volatile UINT32 * const)(reg))
1182 +
1183 +#define WRITE_REGISTER_UINT32(reg, val) \
1184 + (*(volatile UINT32 * const)(reg)) = (val)
1185 +
1186 +#define READ_REGISTER_UINT16(reg) \
1187 + (*(volatile UINT16 * const)(reg))
1188 +
1189 +#define WRITE_REGISTER_UINT16(reg, val) \
1190 + (*(volatile UINT16 * const)(reg)) = (val)
1191 +
1192 +#define READ_REGISTER_UINT8(reg) \
1193 + (*(volatile UINT8 * const)(reg))
1194 +
1195 +#define WRITE_REGISTER_UINT8(reg, val) \
1196 + (*(volatile UINT8 * const)(reg)) = (val)
1197 +
1198 +#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x)))
1199 +#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y))
1200 +#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y))
1201 +#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y))
1202 +#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z))
1203 +
1204 +#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x)))
1205 +#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y))
1206 +#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y))
1207 +#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y))
1208 +#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z))
1209 +
1210 +#define INREG32(x) READ_REGISTER_UINT32((UINT32*)((void*)(x)))
1211 +#define OUTREG32(x, y) WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y))
1212 +#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y))
1213 +#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y))
1214 +#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z))
1215 +
1216 +
1217 +#define DRV_Reg8(addr) INREG8(addr)
1218 +#define DRV_WriteReg8(addr, data) OUTREG8(addr, data)
1219 +#define DRV_SetReg8(addr, data) SETREG8(addr, data)
1220 +#define DRV_ClrReg8(addr, data) CLRREG8(addr, data)
1221 +
1222 +#define DRV_Reg16(addr) INREG16(addr)
1223 +#define DRV_WriteReg16(addr, data) OUTREG16(addr, data)
1224 +#define DRV_SetReg16(addr, data) SETREG16(addr, data)
1225 +#define DRV_ClrReg16(addr, data) CLRREG16(addr, data)
1226 +
1227 +#define DRV_Reg32(addr) INREG32(addr)
1228 +#define DRV_WriteReg32(addr, data) OUTREG32(addr, data)
1229 +#define DRV_SetReg32(addr, data) SETREG32(addr, data)
1230 +#define DRV_ClrReg32(addr, data) CLRREG32(addr, data)
1231 +
1232 +// !!! DEPRECATED, WILL BE REMOVED LATER !!!
1233 +#define DRV_Reg(addr) DRV_Reg16(addr)
1234 +#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data)
1235 +#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data)
1236 +#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data)
1237 +
1238 +
1239 +// ---------------------------------------------------------------------------
1240 +// Compiler Time Deduction Macros
1241 +// ---------------------------------------------------------------------------
1242 +
1243 +#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) :
1244 +#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1)
1245 +#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2)
1246 +#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4)
1247 +#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8)
1248 +#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16)
1249 +
1250 +#define MASK_OFFSET_ERROR (0xFFFFFFFF)
1251 +
1252 +#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR)
1253 +
1254 +
1255 +// ---------------------------------------------------------------------------
1256 +// Assertions
1257 +// ---------------------------------------------------------------------------
1258 +
1259 +#ifndef ASSERT
1260 + #define ASSERT(expr) BUG_ON(!(expr))
1261 +#endif
1262 +
1263 +#ifndef NOT_IMPLEMENTED
1264 + #define NOT_IMPLEMENTED() BUG_ON(1)
1265 +#endif
1266 +
1267 +#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__)
1268 +#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line)
1269 +#define STATIC_ASSERT_XX(pred, line) \
1270 + extern char assertion_failed_at_##line[(pred) ? 1 : -1]
1271 +
1272 +// ---------------------------------------------------------------------------
1273 +// Resolve Compiler Warnings
1274 +// ---------------------------------------------------------------------------
1275 +
1276 +#define NOT_REFERENCED(x) { (x) = (x); }
1277 +
1278 +
1279 +// ---------------------------------------------------------------------------
1280 +// Utilities
1281 +// ---------------------------------------------------------------------------
1282 +
1283 +#define MAXIMUM(A,B) (((A)>(B))?(A):(B))
1284 +#define MINIMUM(A,B) (((A)<(B))?(A):(B))
1285 +
1286 +#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0])))
1287 +#define DVT_DELAYMACRO(u4Num) \
1288 +{ \
1289 + UINT32 u4Count = 0 ; \
1290 + for (u4Count = 0; u4Count < u4Num; u4Count++ ); \
1291 +} \
1292 +
1293 +#define A68351B 0
1294 +#define B68351B 1
1295 +#define B68351D 2
1296 +#define B68351E 3
1297 +#define UNKNOWN_IC_VERSION 0xFF
1298 +
1299 +/* NAND driver */
1300 +struct mtk_nand_host_hw {
1301 + unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
1302 + unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
1303 + unsigned int nfi_cs_num; /* NFI_CS_NUM */
1304 + unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
1305 + unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
1306 + unsigned int nand_ecc_size;
1307 + unsigned int nand_ecc_bytes;
1308 + unsigned int nand_ecc_mode;
1309 +};
1310 +extern struct mtk_nand_host_hw mt7621_nand_hw;
1311 +extern unsigned int CFG_BLOCKSIZE;
1312 +
1313 +#endif // _MT6575_TYPEDEFS_H
1314 +
1315 diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
1316 new file mode 100644
1317 index 0000000..00e150c
1318 --- /dev/null
1319 +++ b/drivers/mtd/nand/mtk_nand.c
1320 @@ -0,0 +1,2304 @@
1321 +/******************************************************************************
1322 +* mtk_nand.c - MTK NAND Flash Device Driver
1323 + *
1324 +* Copyright 2009-2012 MediaTek Co.,Ltd.
1325 + *
1326 +* DESCRIPTION:
1327 +* This file provid the other drivers nand relative functions
1328 + *
1329 +* modification history
1330 +* ----------------------------------------
1331 +* v3.0, 11 Feb 2010, mtk
1332 +* ----------------------------------------
1333 +******************************************************************************/
1334 +#include "nand_def.h"
1335 +#include <linux/slab.h>
1336 +#include <linux/init.h>
1337 +#include <linux/module.h>
1338 +#include <linux/delay.h>
1339 +#include <linux/errno.h>
1340 +#include <linux/sched.h>
1341 +#include <linux/types.h>
1342 +#include <linux/wait.h>
1343 +#include <linux/spinlock.h>
1344 +#include <linux/interrupt.h>
1345 +#include <linux/mtd/mtd.h>
1346 +#include <linux/mtd/nand.h>
1347 +#include <linux/mtd/partitions.h>
1348 +#include <linux/mtd/nand_ecc.h>
1349 +#include <linux/dma-mapping.h>
1350 +#include <linux/jiffies.h>
1351 +#include <linux/platform_device.h>
1352 +#include <linux/proc_fs.h>
1353 +#include <linux/time.h>
1354 +#include <linux/mm.h>
1355 +#include <asm/io.h>
1356 +#include <asm/cacheflush.h>
1357 +#include <asm/uaccess.h>
1358 +#include <linux/miscdevice.h>
1359 +#include "mtk_nand.h"
1360 +#include "nand_device_list.h"
1361 +
1362 +#include "bmt.h"
1363 +#include "partition.h"
1364 +
1365 +unsigned int CFG_BLOCKSIZE;
1366 +
1367 +static int shift_on_bbt = 0;
1368 +extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag);
1369 +extern int nand_bbt_get(struct mtd_info *mtd, int page);
1370 +int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page);
1371 +
1372 +static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL };
1373 +
1374 +#define NAND_CMD_STATUS_MULTI 0x71
1375 +
1376 +void show_stack(struct task_struct *tsk, unsigned long *sp);
1377 +extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
1378 +extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity);
1379 +
1380 +struct mtk_nand_host mtk_nand_host; /* include mtd_info and nand_chip structs */
1381 +struct mtk_nand_host_hw mt7621_nand_hw = {
1382 + .nfi_bus_width = 8,
1383 + .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING,
1384 + .nfi_cs_num = NFI_CS_NUM,
1385 + .nand_sec_size = 512,
1386 + .nand_sec_shift = 9,
1387 + .nand_ecc_size = 2048,
1388 + .nand_ecc_bytes = 32,
1389 + .nand_ecc_mode = NAND_ECC_HW,
1390 +};
1391 +
1392 +
1393 +/*******************************************************************************
1394 + * Gloable Varible Definition
1395 + *******************************************************************************/
1396 +
1397 +#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
1398 + do { \
1399 + DRV_WriteReg(NFI_CMD_REG16,cmd);\
1400 + while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
1401 + DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
1402 + DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
1403 + DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
1404 + while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
1405 + }while(0);
1406 +
1407 +//-------------------------------------------------------------------------------
1408 +static struct NAND_CMD g_kCMD;
1409 +static u32 g_u4ChipVer;
1410 +bool g_bInitDone;
1411 +static bool g_bcmdstatus;
1412 +static u32 g_value = 0;
1413 +static int g_page_size;
1414 +
1415 +BOOL g_bHwEcc = true;
1416 +
1417 +
1418 +static u8 *local_buffer_16_align; // 16 byte aligned buffer, for HW issue
1419 +static u8 local_buffer[4096 + 512];
1420 +
1421 +extern void nand_release_device(struct mtd_info *mtd);
1422 +extern int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state);
1423 +
1424 +#if defined(MTK_NAND_BMT)
1425 +static bmt_struct *g_bmt;
1426 +#endif
1427 +struct mtk_nand_host *host;
1428 +extern struct mtd_partition g_pasStatic_Partition[];
1429 +int part_num = NUM_PARTITIONS;
1430 +int manu_id;
1431 +int dev_id;
1432 +
1433 +static u8 local_oob_buf[NAND_MAX_OOBSIZE];
1434 +
1435 +static u8 nand_badblock_offset = 0;
1436 +
1437 +void nand_enable_clock(void)
1438 +{
1439 + //enable_clock(MT65XX_PDN_PERI_NFI, "NAND");
1440 +}
1441 +
1442 +void nand_disable_clock(void)
1443 +{
1444 + //disable_clock(MT65XX_PDN_PERI_NFI, "NAND");
1445 +}
1446 +
1447 +static struct nand_ecclayout nand_oob_16 = {
1448 + .eccbytes = 8,
1449 + .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
1450 + .oobfree = {{1, 6}, {0, 0}}
1451 +};
1452 +
1453 +struct nand_ecclayout nand_oob_64 = {
1454 + .eccbytes = 32,
1455 + .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
1456 + 40, 41, 42, 43, 44, 45, 46, 47,
1457 + 48, 49, 50, 51, 52, 53, 54, 55,
1458 + 56, 57, 58, 59, 60, 61, 62, 63},
1459 + .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
1460 +};
1461 +
1462 +struct nand_ecclayout nand_oob_128 = {
1463 + .eccbytes = 64,
1464 + .eccpos = {
1465 + 64, 65, 66, 67, 68, 69, 70, 71,
1466 + 72, 73, 74, 75, 76, 77, 78, 79,
1467 + 80, 81, 82, 83, 84, 85, 86, 86,
1468 + 88, 89, 90, 91, 92, 93, 94, 95,
1469 + 96, 97, 98, 99, 100, 101, 102, 103,
1470 + 104, 105, 106, 107, 108, 109, 110, 111,
1471 + 112, 113, 114, 115, 116, 117, 118, 119,
1472 + 120, 121, 122, 123, 124, 125, 126, 127},
1473 + .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
1474 +};
1475 +
1476 +flashdev_info devinfo;
1477 +
1478 +void dump_nfi(void)
1479 +{
1480 +}
1481 +
1482 +void dump_ecc(void)
1483 +{
1484 +}
1485 +
1486 +u32
1487 +nand_virt_to_phys_add(u32 va)
1488 +{
1489 + u32 pageOffset = (va & (PAGE_SIZE - 1));
1490 + pgd_t *pgd;
1491 + pmd_t *pmd;
1492 + pte_t *pte;
1493 + u32 pa;
1494 +
1495 + if (virt_addr_valid(va))
1496 + return __virt_to_phys(va);
1497 +
1498 + if (NULL == current) {
1499 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR ,current is NULL! \n");
1500 + return 0;
1501 + }
1502 +
1503 + if (NULL == current->mm) {
1504 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
1505 + return 0;
1506 + }
1507 +
1508 + pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
1509 + if (pgd_none(*pgd) || pgd_bad(*pgd)) {
1510 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va);
1511 + return 0;
1512 + }
1513 +
1514 + pmd = pmd_offset((pud_t *)pgd, va);
1515 + if (pmd_none(*pmd) || pmd_bad(*pmd)) {
1516 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va);
1517 + return 0;
1518 + }
1519 +
1520 + pte = pte_offset_map(pmd, va);
1521 + if (pte_present(*pte)) {
1522 + pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset;
1523 + return pa;
1524 + }
1525 +
1526 + printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va);
1527 + return 0;
1528 +}
1529 +EXPORT_SYMBOL(nand_virt_to_phys_add);
1530 +
1531 +bool
1532 +get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo)
1533 +{
1534 + u32 index;
1535 + for (index = 0; gen_FlashTable[index].id != 0; index++) {
1536 + if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) {
1537 + pdevinfo->id = gen_FlashTable[index].id;
1538 + pdevinfo->ext_id = gen_FlashTable[index].ext_id;
1539 + pdevinfo->blocksize = gen_FlashTable[index].blocksize;
1540 + pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle;
1541 + pdevinfo->iowidth = gen_FlashTable[index].iowidth;
1542 + pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting;
1543 + pdevinfo->advancedmode = gen_FlashTable[index].advancedmode;
1544 + pdevinfo->pagesize = gen_FlashTable[index].pagesize;
1545 + pdevinfo->sparesize = gen_FlashTable[index].sparesize;
1546 + pdevinfo->totalsize = gen_FlashTable[index].totalsize;
1547 + memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename));
1548 + printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id);
1549 +
1550 + goto find;
1551 + }
1552 + }
1553 +
1554 +find:
1555 + if (0 == pdevinfo->id) {
1556 + printk(KERN_INFO "Device not found, ID: %x\n", id);
1557 + return false;
1558 + } else {
1559 + return true;
1560 + }
1561 +}
1562 +
1563 +static void
1564 +ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit)
1565 +{
1566 + u32 u4ENCODESize;
1567 + u32 u4DECODESize;
1568 + u32 ecc_bit_cfg = ECC_CNFG_ECC4;
1569 +
1570 + switch(ecc_bit){
1571 + case 4:
1572 + ecc_bit_cfg = ECC_CNFG_ECC4;
1573 + break;
1574 + case 8:
1575 + ecc_bit_cfg = ECC_CNFG_ECC8;
1576 + break;
1577 + case 10:
1578 + ecc_bit_cfg = ECC_CNFG_ECC10;
1579 + break;
1580 + case 12:
1581 + ecc_bit_cfg = ECC_CNFG_ECC12;
1582 + break;
1583 + default:
1584 + break;
1585 + }
1586 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1587 + do {
1588 + } while (!DRV_Reg16(ECC_DECIDLE_REG16));
1589 +
1590 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1591 + do {
1592 + } while (!DRV_Reg32(ECC_ENCIDLE_REG32));
1593 +
1594 + /* setup FDM register base */
1595 + DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
1596 +
1597 + /* Sector + FDM */
1598 + u4ENCODESize = (hw->nand_sec_size + 8) << 3;
1599 + /* Sector + FDM + YAFFS2 meta data bits */
1600 + u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13;
1601 +
1602 + /* configure ECC decoder && encoder */
1603 + DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT));
1604 +
1605 + DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
1606 + NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
1607 +}
1608 +
1609 +static void
1610 +ECC_Decode_Start(void)
1611 +{
1612 + while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
1613 + ;
1614 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
1615 +}
1616 +
1617 +static void
1618 +ECC_Decode_End(void)
1619 +{
1620 + while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
1621 + ;
1622 + DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
1623 +}
1624 +
1625 +static void
1626 +ECC_Encode_Start(void)
1627 +{
1628 + while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE))
1629 + ;
1630 + mb();
1631 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
1632 +}
1633 +
1634 +static void
1635 +ECC_Encode_End(void)
1636 +{
1637 + /* wait for device returning idle */
1638 + while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ;
1639 + mb();
1640 + DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
1641 +}
1642 +
1643 +static bool
1644 +mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr)
1645 +{
1646 + bool bRet = true;
1647 + u16 u2SectorDoneMask = 1 << u4SecIndex;
1648 + u32 u4ErrorNumDebug, i, u4ErrNum;
1649 + u32 timeout = 0xFFFF;
1650 + // int el;
1651 + u32 au4ErrBitLoc[6];
1652 + u32 u4ErrByteLoc, u4BitOffset;
1653 + u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
1654 +
1655 + //4 // Wait for Decode Done
1656 + while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) {
1657 + timeout--;
1658 + if (0 == timeout)
1659 + return false;
1660 + }
1661 + /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
1662 + memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
1663 + u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32);
1664 + u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2);
1665 + u4ErrNum &= 0xF;
1666 +
1667 + if (u4ErrNum) {
1668 + if (0xF == u4ErrNum) {
1669 + mtd->ecc_stats.failed++;
1670 + bRet = false;
1671 + //printk(KERN_ERR"UnCorrectable at PageAddr=%d\n", u4PageAddr);
1672 + } else {
1673 + for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) {
1674 + au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
1675 + u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF;
1676 + if (u4ErrBitLoc1th < 0x1000) {
1677 + u4ErrByteLoc = u4ErrBitLoc1th / 8;
1678 + u4BitOffset = u4ErrBitLoc1th % 8;
1679 + pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1680 + mtd->ecc_stats.corrected++;
1681 + } else {
1682 + mtd->ecc_stats.failed++;
1683 + }
1684 + u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF;
1685 + if (0 != u4ErrBitLoc2nd) {
1686 + if (u4ErrBitLoc2nd < 0x1000) {
1687 + u4ErrByteLoc = u4ErrBitLoc2nd / 8;
1688 + u4BitOffset = u4ErrBitLoc2nd % 8;
1689 + pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
1690 + mtd->ecc_stats.corrected++;
1691 + } else {
1692 + mtd->ecc_stats.failed++;
1693 + //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
1694 + }
1695 + }
1696 + }
1697 + }
1698 + if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
1699 + bRet = false;
1700 + }
1701 + return bRet;
1702 +}
1703 +
1704 +static bool
1705 +mtk_nand_RFIFOValidSize(u16 u2Size)
1706 +{
1707 + u32 timeout = 0xFFFF;
1708 + while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) {
1709 + timeout--;
1710 + if (0 == timeout)
1711 + return false;
1712 + }
1713 + return true;
1714 +}
1715 +
1716 +static bool
1717 +mtk_nand_WFIFOValidSize(u16 u2Size)
1718 +{
1719 + u32 timeout = 0xFFFF;
1720 +
1721 + while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) {
1722 + timeout--;
1723 + if (0 == timeout)
1724 + return false;
1725 + }
1726 + return true;
1727 +}
1728 +
1729 +static bool
1730 +mtk_nand_status_ready(u32 u4Status)
1731 +{
1732 + u32 timeout = 0xFFFF;
1733 +
1734 + while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) {
1735 + timeout--;
1736 + if (0 == timeout)
1737 + return false;
1738 + }
1739 + return true;
1740 +}
1741 +
1742 +static bool
1743 +mtk_nand_reset(void)
1744 +{
1745 + int timeout = 0xFFFF;
1746 + if (DRV_Reg16(NFI_MASTERSTA_REG16)) {
1747 + mb();
1748 + DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1749 + while (DRV_Reg16(NFI_MASTERSTA_REG16)) {
1750 + timeout--;
1751 + if (!timeout)
1752 + MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
1753 + }
1754 + }
1755 + /* issue reset operation */
1756 + mb();
1757 + DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
1758 +
1759 + return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0);
1760 +}
1761 +
1762 +static void
1763 +mtk_nand_set_mode(u16 u2OpMode)
1764 +{
1765 + u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
1766 + u2Mode &= ~CNFG_OP_MODE_MASK;
1767 + u2Mode |= u2OpMode;
1768 + DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
1769 +}
1770 +
1771 +static void
1772 +mtk_nand_set_autoformat(bool bEnable)
1773 +{
1774 + if (bEnable)
1775 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1776 + else
1777 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
1778 +}
1779 +
1780 +static void
1781 +mtk_nand_configure_fdm(u16 u2FDMSize)
1782 +{
1783 + NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
1784 + NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
1785 + NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
1786 +}
1787 +
1788 +static void
1789 +mtk_nand_configure_lock(void)
1790 +{
1791 + u32 u4WriteColNOB = 2;
1792 + u32 u4WriteRowNOB = 3;
1793 + u32 u4EraseColNOB = 0;
1794 + u32 u4EraseRowNOB = 3;
1795 + DRV_WriteReg16(NFI_LOCKANOB_REG16,
1796 + (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT));
1797 +
1798 + if (CHIPVER_ECO_1 == g_u4ChipVer) {
1799 + int i;
1800 + for (i = 0; i < 16; ++i) {
1801 + DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF);
1802 + DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF);
1803 + }
1804 + //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0);
1805 + DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF);
1806 + DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON);
1807 + }
1808 +}
1809 +
1810 +static bool
1811 +mtk_nand_pio_ready(void)
1812 +{
1813 + int count = 0;
1814 + while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) {
1815 + count++;
1816 + if (count > 0xffff) {
1817 + printk("PIO_DIRDY timeout\n");
1818 + return false;
1819 + }
1820 + }
1821 +
1822 + return true;
1823 +}
1824 +
1825 +static bool
1826 +mtk_nand_set_command(u16 command)
1827 +{
1828 + mb();
1829 + DRV_WriteReg16(NFI_CMD_REG16, command);
1830 + return mtk_nand_status_ready(STA_CMD_STATE);
1831 +}
1832 +
1833 +static bool
1834 +mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
1835 +{
1836 + mb();
1837 + DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr);
1838 + DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr);
1839 + DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT));
1840 + return mtk_nand_status_ready(STA_ADDR_STATE);
1841 +}
1842 +
1843 +static bool
1844 +mtk_nand_check_RW_count(u16 u2WriteSize)
1845 +{
1846 + u32 timeout = 0xFFFF;
1847 + u16 u2SecNum = u2WriteSize >> 9;
1848 +
1849 + while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) {
1850 + timeout--;
1851 + if (0 == timeout) {
1852 + printk(KERN_INFO "[%s] timeout\n", __FUNCTION__);
1853 + return false;
1854 + }
1855 + }
1856 + return true;
1857 +}
1858 +
1859 +static bool
1860 +mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf)
1861 +{
1862 + /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
1863 + bool bRet = false;
1864 + u16 sec_num = 1 << (nand->page_shift - 9);
1865 + u32 col_addr = u4ColAddr;
1866 + u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
1867 + if (nand->options & NAND_BUSWIDTH_16)
1868 + col_addr /= 2;
1869 +
1870 + if (!mtk_nand_reset())
1871 + goto cleanup;
1872 + if (g_bHwEcc) {
1873 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1874 + } else {
1875 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1876 + }
1877 +
1878 + mtk_nand_set_mode(CNFG_OP_READ);
1879 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
1880 + DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
1881 +
1882 + if (full) {
1883 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1884 +
1885 + if (g_bHwEcc)
1886 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1887 + else
1888 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1889 + } else {
1890 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1891 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1892 + }
1893 +
1894 + mtk_nand_set_autoformat(full);
1895 + if (full)
1896 + if (g_bHwEcc)
1897 + ECC_Decode_Start();
1898 + if (!mtk_nand_set_command(NAND_CMD_READ0))
1899 + goto cleanup;
1900 + if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
1901 + goto cleanup;
1902 + if (!mtk_nand_set_command(NAND_CMD_READSTART))
1903 + goto cleanup;
1904 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
1905 + goto cleanup;
1906 +
1907 + bRet = true;
1908 +
1909 +cleanup:
1910 + return bRet;
1911 +}
1912 +
1913 +static bool
1914 +mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf)
1915 +{
1916 + bool bRet = false;
1917 + u32 sec_num = 1 << (nand->page_shift - 9);
1918 + u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
1919 + if (nand->options & NAND_BUSWIDTH_16)
1920 + col_addr /= 2;
1921 +
1922 + /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
1923 + if (!mtk_nand_reset())
1924 + return false;
1925 +
1926 + mtk_nand_set_mode(CNFG_OP_PRGM);
1927 +
1928 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
1929 +
1930 + DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
1931 +
1932 + if (full) {
1933 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1934 + if (g_bHwEcc)
1935 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1936 + else
1937 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1938 + } else {
1939 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
1940 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
1941 + }
1942 +
1943 + mtk_nand_set_autoformat(full);
1944 +
1945 + if (full)
1946 + if (g_bHwEcc)
1947 + ECC_Encode_Start();
1948 +
1949 + if (!mtk_nand_set_command(NAND_CMD_SEQIN))
1950 + goto cleanup;
1951 + //1 FIXED ME: For Any Kind of AddrCycle
1952 + if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
1953 + goto cleanup;
1954 +
1955 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
1956 + goto cleanup;
1957 +
1958 + bRet = true;
1959 +
1960 +cleanup:
1961 + return bRet;
1962 +}
1963 +
1964 +static bool
1965 +mtk_nand_check_dececc_done(u32 u4SecNum)
1966 +{
1967 + u32 timeout, dec_mask;
1968 +
1969 + timeout = 0xffff;
1970 + dec_mask = (1 << u4SecNum) - 1;
1971 + while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0)
1972 + timeout--;
1973 + if (timeout == 0) {
1974 + MSG(VERIFY, "ECC_DECDONE: timeout\n");
1975 + return false;
1976 + }
1977 + return true;
1978 +}
1979 +
1980 +static bool
1981 +mtk_nand_mcu_read_data(u8 * buf, u32 length)
1982 +{
1983 + int timeout = 0xffff;
1984 + u32 i;
1985 + u32 *buf32 = (u32 *) buf;
1986 + if ((u32) buf % 4 || length % 4)
1987 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
1988 + else
1989 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
1990 +
1991 + //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
1992 + mb();
1993 + NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD);
1994 +
1995 + if ((u32) buf % 4 || length % 4) {
1996 + for (i = 0; (i < (length)) && (timeout > 0);) {
1997 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
1998 + *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
1999 + i++;
2000 + } else {
2001 + timeout--;
2002 + }
2003 + if (0 == timeout) {
2004 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2005 + dump_nfi();
2006 + return false;
2007 + }
2008 + }
2009 + } else {
2010 + for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
2011 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2012 + *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
2013 + i++;
2014 + } else {
2015 + timeout--;
2016 + }
2017 + if (0 == timeout) {
2018 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2019 + dump_nfi();
2020 + return false;
2021 + }
2022 + }
2023 + }
2024 + return true;
2025 +}
2026 +
2027 +static bool
2028 +mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size)
2029 +{
2030 + return mtk_nand_mcu_read_data(pDataBuf, u4Size);
2031 +}
2032 +
2033 +static bool
2034 +mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length)
2035 +{
2036 + u32 timeout = 0xFFFF;
2037 + u32 i;
2038 + u32 *pBuf32;
2039 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2040 + mb();
2041 + NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR);
2042 + pBuf32 = (u32 *) buf;
2043 +
2044 + if ((u32) buf % 4 || length % 4)
2045 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2046 + else
2047 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2048 +
2049 + if ((u32) buf % 4 || length % 4) {
2050 + for (i = 0; (i < (length)) && (timeout > 0);) {
2051 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2052 + DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
2053 + i++;
2054 + } else {
2055 + timeout--;
2056 + }
2057 + if (0 == timeout) {
2058 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2059 + dump_nfi();
2060 + return false;
2061 + }
2062 + }
2063 + } else {
2064 + for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
2065 + if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
2066 + DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
2067 + i++;
2068 + } else {
2069 + timeout--;
2070 + }
2071 + if (0 == timeout) {
2072 + printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
2073 + dump_nfi();
2074 + return false;
2075 + }
2076 + }
2077 + }
2078 +
2079 + return true;
2080 +}
2081 +
2082 +static bool
2083 +mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size)
2084 +{
2085 + return mtk_nand_mcu_write_data(mtd, buf, size);
2086 +}
2087 +
2088 +static void
2089 +mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum)
2090 +{
2091 + u32 i;
2092 + u32 *pBuf32 = (u32 *) pDataBuf;
2093 +
2094 + if (pBuf32) {
2095 + for (i = 0; i < u4SecNum; ++i) {
2096 + *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
2097 + *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
2098 + }
2099 + }
2100 +}
2101 +
2102 +static u8 fdm_buf[64];
2103 +static void
2104 +mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum)
2105 +{
2106 + u32 i, j;
2107 + u8 checksum = 0;
2108 + bool empty = true;
2109 + struct nand_oobfree *free_entry;
2110 + u32 *pBuf32;
2111 +
2112 + memcpy(fdm_buf, pDataBuf, u4SecNum * 8);
2113 +
2114 + free_entry = chip->ecc.layout->oobfree;
2115 + for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) {
2116 + for (j = 0; j < free_entry[i].length; j++) {
2117 + if (pDataBuf[free_entry[i].offset + j] != 0xFF)
2118 + empty = false;
2119 + checksum ^= pDataBuf[free_entry[i].offset + j];
2120 + }
2121 + }
2122 +
2123 + if (!empty) {
2124 + fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum;
2125 + }
2126 +
2127 + pBuf32 = (u32 *) fdm_buf;
2128 + for (i = 0; i < u4SecNum; ++i) {
2129 + DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++);
2130 + DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++);
2131 + }
2132 +}
2133 +
2134 +static void
2135 +mtk_nand_stop_read(void)
2136 +{
2137 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
2138 + mtk_nand_reset();
2139 + if (g_bHwEcc)
2140 + ECC_Decode_End();
2141 + DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2142 +}
2143 +
2144 +static void
2145 +mtk_nand_stop_write(void)
2146 +{
2147 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
2148 + if (g_bHwEcc)
2149 + ECC_Encode_End();
2150 + DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
2151 +}
2152 +
2153 +bool
2154 +mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
2155 +{
2156 + u8 *buf;
2157 + bool bRet = true;
2158 + struct nand_chip *nand = mtd->priv;
2159 + u32 u4SecNum = u4PageSize >> 9;
2160 +
2161 + if (((u32) pPageBuf % 16) && local_buffer_16_align)
2162 + buf = local_buffer_16_align;
2163 + else
2164 + buf = pPageBuf;
2165 + if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) {
2166 + int j;
2167 + for (j = 0 ; j < u4SecNum; j++) {
2168 + if (!mtk_nand_read_page_data(mtd, buf+j*512, 512))
2169 + bRet = false;
2170 + if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1))
2171 + bRet = false;
2172 + if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr))
2173 + bRet = false;
2174 + }
2175 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2176 + bRet = false;
2177 +
2178 + mtk_nand_read_fdm_data(pFDMBuf, u4SecNum);
2179 + mtk_nand_stop_read();
2180 + }
2181 +
2182 + if (buf == local_buffer_16_align)
2183 + memcpy(pPageBuf, buf, u4PageSize);
2184 +
2185 + return bRet;
2186 +}
2187 +
2188 +int
2189 +mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
2190 +{
2191 + struct nand_chip *chip = mtd->priv;
2192 + u32 u4SecNum = u4PageSize >> 9;
2193 + u8 *buf;
2194 + u8 status;
2195 +
2196 + MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
2197 +
2198 + if (((u32) pPageBuf % 16) && local_buffer_16_align) {
2199 + printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf);
2200 + memcpy(local_buffer_16_align, pPageBuf, mtd->writesize);
2201 + buf = local_buffer_16_align;
2202 + } else
2203 + buf = pPageBuf;
2204 +
2205 + if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) {
2206 + mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
2207 + (void)mtk_nand_write_page_data(mtd, buf, u4PageSize);
2208 + (void)mtk_nand_check_RW_count(u4PageSize);
2209 + mtk_nand_stop_write();
2210 + (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
2211 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
2212 + }
2213 +
2214 + status = chip->waitfunc(mtd, chip);
2215 + if (status & NAND_STATUS_FAIL)
2216 + return -EIO;
2217 + return 0;
2218 +}
2219 +
2220 +static int
2221 +get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk)
2222 +{
2223 + struct nand_chip *chip = mtd->priv;
2224 + int i;
2225 +
2226 + *start_blk = 0;
2227 + for (i = 0; i <= part_num; i++)
2228 + {
2229 + if (i == part_num)
2230 + {
2231 + // try the last reset partition
2232 + *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1;
2233 + if (*start_blk <= *end_blk)
2234 + {
2235 + if ((block >= *start_blk) && (block <= *end_blk))
2236 + break;
2237 + }
2238 + }
2239 + // skip All partition entry
2240 + else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL)
2241 + {
2242 + continue;
2243 + }
2244 + *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1;
2245 + if ((block >= *start_blk) && (block <= *end_blk))
2246 + break;
2247 + *start_blk = *end_blk + 1;
2248 + }
2249 + if (*start_blk > *end_blk)
2250 + {
2251 + return -1;
2252 + }
2253 + return 0;
2254 +}
2255 +
2256 +static int
2257 +block_remap(struct mtd_info *mtd, int block)
2258 +{
2259 + struct nand_chip *chip = mtd->priv;
2260 + int start_blk, end_blk;
2261 + int j, block_offset;
2262 + int bad_block = 0;
2263 +
2264 + if (chip->bbt == NULL) {
2265 + printk("ERROR!! no bbt table for block_remap\n");
2266 + return -1;
2267 + }
2268 +
2269 + if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) {
2270 + printk("ERROR!! can not find start_blk and end_blk\n");
2271 + return -1;
2272 + }
2273 +
2274 + block_offset = block - start_blk;
2275 + for (j = start_blk; j <= end_blk;j++) {
2276 + if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) {
2277 + if (!block_offset)
2278 + break;
2279 + block_offset--;
2280 + } else {
2281 + bad_block++;
2282 + }
2283 + }
2284 + if (j <= end_blk) {
2285 + return j;
2286 + } else {
2287 + // remap to the bad block
2288 + for (j = end_blk; bad_block > 0; j--)
2289 + {
2290 + if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0)
2291 + {
2292 + bad_block--;
2293 + if (bad_block <= block_offset)
2294 + return j;
2295 + }
2296 + }
2297 + }
2298 +
2299 + printk("Error!! block_remap error\n");
2300 + return -1;
2301 +}
2302 +
2303 +int
2304 +check_block_remap(struct mtd_info *mtd, int block)
2305 +{
2306 + if (shift_on_bbt)
2307 + return block_remap(mtd, block);
2308 + else
2309 + return block;
2310 +}
2311 +EXPORT_SYMBOL(check_block_remap);
2312 +
2313 +
2314 +static int
2315 +write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk)
2316 +{
2317 + struct nand_chip *chip = mtd->priv;
2318 + int i, j, to_page = 0, first_page;
2319 + char *buf, *oob;
2320 + int start_blk = 0, end_blk;
2321 + int mapped_block;
2322 + int page_per_block_bit = chip->phys_erase_shift - chip->page_shift;
2323 + int block = page >> page_per_block_bit;
2324 +
2325 + // find next available block in the same MTD partition
2326 + mapped_block = block_remap(mtd, block);
2327 + if (mapped_block == -1)
2328 + return NAND_STATUS_FAIL;
2329 +
2330 + get_start_end_block(mtd, block, &start_blk, &end_blk);
2331 +
2332 + buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA);
2333 + if (buf == NULL)
2334 + return -1;
2335 +
2336 + oob = buf + mtd->writesize;
2337 + for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) {
2338 + if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) {
2339 + int status;
2340 + status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit);
2341 + if (status & NAND_STATUS_FAIL) {
2342 + mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift);
2343 + nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3);
2344 + } else {
2345 + /* good block */
2346 + to_page = (*to_blk) << page_per_block_bit;
2347 + break;
2348 + }
2349 + }
2350 + }
2351 +
2352 + if (!to_page) {
2353 + kfree(buf);
2354 + return -1;
2355 + }
2356 +
2357 + first_page = (page >> page_per_block_bit) << page_per_block_bit;
2358 + for (i = 0; i < (1 << page_per_block_bit); i++) {
2359 + if ((first_page + i) != page) {
2360 + mtk_nand_read_oob_hw(mtd, chip, (first_page+i));
2361 + for (j = 0; j < mtd->oobsize; j++)
2362 + if (chip->oob_poi[j] != (unsigned char)0xff)
2363 + break;
2364 + if (j < mtd->oobsize) {
2365 + mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob);
2366 + memset(oob, 0xff, mtd->oobsize);
2367 + if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) {
2368 + int ret, new_blk = 0;
2369 + nand_bbt_set(mtd, to_page, 0x3);
2370 + ret = write_next_on_fail(mtd, buf, to_page + i, &new_blk);
2371 + if (ret) {
2372 + kfree(buf);
2373 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2374 + return ret;
2375 + }
2376 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2377 + *to_blk = new_blk;
2378 + to_page = ((*to_blk) << page_per_block_bit);
2379 + }
2380 + }
2381 + } else {
2382 + memset(chip->oob_poi, 0xff, mtd->oobsize);
2383 + if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) {
2384 + int ret, new_blk = 0;
2385 + nand_bbt_set(mtd, to_page, 0x3);
2386 + ret = write_next_on_fail(mtd, write_buf, to_page + i, &new_blk);
2387 + if (ret) {
2388 + kfree(buf);
2389 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2390 + return ret;
2391 + }
2392 + mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
2393 + *to_blk = new_blk;
2394 + to_page = ((*to_blk) << page_per_block_bit);
2395 + }
2396 + }
2397 + }
2398 +
2399 + kfree(buf);
2400 +
2401 + return 0;
2402 +}
2403 +
2404 +static int
2405 +mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset,
2406 + int data_len, const u8 * buf, int oob_required, int page, int cached, int raw)
2407 +{
2408 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2409 + int block = page / page_per_block;
2410 + u16 page_in_block = page % page_per_block;
2411 + int mapped_block = block;
2412 +
2413 +#if defined(MTK_NAND_BMT)
2414 + mapped_block = get_mapping_block_index(block);
2415 + // write bad index into oob
2416 + if (mapped_block != block)
2417 + set_bad_index_to_oob(chip->oob_poi, block);
2418 + else
2419 + set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
2420 +#else
2421 + if (shift_on_bbt) {
2422 + mapped_block = block_remap(mtd, block);
2423 + if (mapped_block == -1)
2424 + return NAND_STATUS_FAIL;
2425 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2426 + return NAND_STATUS_FAIL;
2427 + }
2428 +#endif
2429 + do {
2430 + if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) {
2431 + MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
2432 +#if defined(MTK_NAND_BMT)
2433 + if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
2434 + MSG(INIT, "Update BMT success\n");
2435 + return 0;
2436 + } else {
2437 + MSG(INIT, "Update BMT fail\n");
2438 + return -EIO;
2439 + }
2440 +#else
2441 + {
2442 + int new_blk;
2443 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
2444 + if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0)
2445 + {
2446 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2447 + return NAND_STATUS_FAIL;
2448 + }
2449 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2450 + break;
2451 + }
2452 +#endif
2453 + } else
2454 + break;
2455 + } while(1);
2456 +
2457 + return 0;
2458 +}
2459 +
2460 +static void
2461 +mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
2462 +{
2463 + struct nand_chip *nand = mtd->priv;
2464 +
2465 + switch (command) {
2466 + case NAND_CMD_SEQIN:
2467 + memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
2468 + g_kCMD.pDataBuf = NULL;
2469 + g_kCMD.u4RowAddr = page_addr;
2470 + g_kCMD.u4ColAddr = column;
2471 + break;
2472 +
2473 + case NAND_CMD_PAGEPROG:
2474 + if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) {
2475 + u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
2476 + mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB);
2477 + g_kCMD.u4RowAddr = (u32) - 1;
2478 + g_kCMD.u4OOBRowAddr = (u32) - 1;
2479 + }
2480 + break;
2481 +
2482 + case NAND_CMD_READOOB:
2483 + g_kCMD.u4RowAddr = page_addr;
2484 + g_kCMD.u4ColAddr = column + mtd->writesize;
2485 + break;
2486 +
2487 + case NAND_CMD_READ0:
2488 + g_kCMD.u4RowAddr = page_addr;
2489 + g_kCMD.u4ColAddr = column;
2490 + break;
2491 +
2492 + case NAND_CMD_ERASE1:
2493 + nand->state=FL_ERASING;
2494 + (void)mtk_nand_reset();
2495 + mtk_nand_set_mode(CNFG_OP_ERASE);
2496 + (void)mtk_nand_set_command(NAND_CMD_ERASE1);
2497 + (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2);
2498 + break;
2499 +
2500 + case NAND_CMD_ERASE2:
2501 + (void)mtk_nand_set_command(NAND_CMD_ERASE2);
2502 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
2503 + ;
2504 + break;
2505 +
2506 + case NAND_CMD_STATUS:
2507 + (void)mtk_nand_reset();
2508 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
2509 + mtk_nand_set_mode(CNFG_OP_SRD);
2510 + mtk_nand_set_mode(CNFG_READ_EN);
2511 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2512 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2513 + (void)mtk_nand_set_command(NAND_CMD_STATUS);
2514 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
2515 + mb();
2516 + DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
2517 + g_bcmdstatus = true;
2518 + break;
2519 +
2520 + case NAND_CMD_RESET:
2521 + (void)mtk_nand_reset();
2522 + DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN);
2523 + (void)mtk_nand_set_command(NAND_CMD_RESET);
2524 + DRV_WriteReg16(NFI_BASE+0x44, 0xF1);
2525 + while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN))
2526 + ;
2527 + break;
2528 +
2529 + case NAND_CMD_READID:
2530 + mtk_nand_reset();
2531 + /* Disable HW ECC */
2532 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2533 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2534 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW);
2535 + (void)mtk_nand_reset();
2536 + mb();
2537 + mtk_nand_set_mode(CNFG_OP_SRD);
2538 + (void)mtk_nand_set_command(NAND_CMD_READID);
2539 + (void)mtk_nand_set_address(0, 0, 1, 0);
2540 + DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD);
2541 + while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE)
2542 + ;
2543 + break;
2544 +
2545 + default:
2546 + BUG();
2547 + break;
2548 + }
2549 +}
2550 +
2551 +static void
2552 +mtk_nand_select_chip(struct mtd_info *mtd, int chip)
2553 +{
2554 + if ((chip == -1) && (false == g_bInitDone)) {
2555 + struct nand_chip *nand = mtd->priv;
2556 + struct mtk_nand_host *host = nand->priv;
2557 + struct mtk_nand_host_hw *hw = host->hw;
2558 + u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512);
2559 + u32 ecc_bit = 4;
2560 + u32 spare_bit = PAGEFMT_SPARE_16;
2561 +
2562 + if (spare_per_sector >= 28) {
2563 + spare_bit = PAGEFMT_SPARE_28;
2564 + ecc_bit = 12;
2565 + spare_per_sector = 28;
2566 + } else if (spare_per_sector >= 27) {
2567 + spare_bit = PAGEFMT_SPARE_27;
2568 + ecc_bit = 8;
2569 + spare_per_sector = 27;
2570 + } else if (spare_per_sector >= 26) {
2571 + spare_bit = PAGEFMT_SPARE_26;
2572 + ecc_bit = 8;
2573 + spare_per_sector = 26;
2574 + } else if (spare_per_sector >= 16) {
2575 + spare_bit = PAGEFMT_SPARE_16;
2576 + ecc_bit = 4;
2577 + spare_per_sector = 16;
2578 + } else {
2579 + MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector);
2580 + ASSERT(0);
2581 + }
2582 + mtd->oobsize = spare_per_sector*(mtd->writesize/512);
2583 + MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector);
2584 + /* Setup PageFormat */
2585 + if (4096 == mtd->writesize) {
2586 + NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
2587 + nand->cmdfunc = mtk_nand_command_bp;
2588 + } else if (2048 == mtd->writesize) {
2589 + NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
2590 + nand->cmdfunc = mtk_nand_command_bp;
2591 + }
2592 + ECC_Config(hw,ecc_bit);
2593 + g_bInitDone = true;
2594 + }
2595 + switch (chip) {
2596 + case -1:
2597 + break;
2598 + case 0:
2599 + case 1:
2600 + /* Jun Shen, 2011.04.13 */
2601 + /* Note: MT6577 EVB NAND is mounted on CS0, but FPGA is CS1 */
2602 + DRV_WriteReg16(NFI_CSEL_REG16, chip);
2603 + /* Jun Shen, 2011.04.13 */
2604 + break;
2605 + }
2606 +}
2607 +
2608 +static uint8_t
2609 +mtk_nand_read_byte(struct mtd_info *mtd)
2610 +{
2611 + uint8_t retval = 0;
2612 +
2613 + if (!mtk_nand_pio_ready()) {
2614 + printk("pio ready timeout\n");
2615 + retval = false;
2616 + }
2617 +
2618 + if (g_bcmdstatus) {
2619 + retval = DRV_Reg8(NFI_DATAR_REG32);
2620 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
2621 + mtk_nand_reset();
2622 + if (g_bHwEcc) {
2623 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2624 + } else {
2625 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2626 + }
2627 + g_bcmdstatus = false;
2628 + } else
2629 + retval = DRV_Reg8(NFI_DATAR_REG32);
2630 +
2631 + return retval;
2632 +}
2633 +
2634 +static void
2635 +mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len)
2636 +{
2637 + struct nand_chip *nand = (struct nand_chip *)mtd->priv;
2638 + struct NAND_CMD *pkCMD = &g_kCMD;
2639 + u32 u4ColAddr = pkCMD->u4ColAddr;
2640 + u32 u4PageSize = mtd->writesize;
2641 +
2642 + if (u4ColAddr < u4PageSize) {
2643 + if ((u4ColAddr == 0) && (len >= u4PageSize)) {
2644 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB);
2645 + if (len > u4PageSize) {
2646 + u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
2647 + memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
2648 + }
2649 + } else {
2650 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
2651 + memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
2652 + }
2653 + pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
2654 + } else {
2655 + u32 u4Offset = u4ColAddr - u4PageSize;
2656 + u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
2657 + if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) {
2658 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
2659 + pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
2660 + }
2661 + memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
2662 + }
2663 + pkCMD->u4ColAddr += len;
2664 +}
2665 +
2666 +static void
2667 +mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
2668 +{
2669 + struct NAND_CMD *pkCMD = &g_kCMD;
2670 + u32 u4ColAddr = pkCMD->u4ColAddr;
2671 + u32 u4PageSize = mtd->writesize;
2672 + int i4Size, i;
2673 +
2674 + if (u4ColAddr >= u4PageSize) {
2675 + u32 u4Offset = u4ColAddr - u4PageSize;
2676 + u8 *pOOB = pkCMD->au1OOB + u4Offset;
2677 + i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
2678 + for (i = 0; i < i4Size; i++) {
2679 + pOOB[i] &= buf[i];
2680 + }
2681 + } else {
2682 + pkCMD->pDataBuf = (u8 *) buf;
2683 + }
2684 +
2685 + pkCMD->u4ColAddr += len;
2686 +}
2687 +
2688 +static int
2689 +mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required)
2690 +{
2691 + mtk_nand_write_buf(mtd, buf, mtd->writesize);
2692 + mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
2693 + return 0;
2694 +}
2695 +
2696 +static int
2697 +mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page)
2698 +{
2699 + struct NAND_CMD *pkCMD = &g_kCMD;
2700 + u32 u4ColAddr = pkCMD->u4ColAddr;
2701 + u32 u4PageSize = mtd->writesize;
2702 +
2703 + if (u4ColAddr == 0) {
2704 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
2705 + pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
2706 + }
2707 +
2708 + return 0;
2709 +}
2710 +
2711 +static int
2712 +mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page)
2713 +{
2714 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2715 + int block = page / page_per_block;
2716 + u16 page_in_block = page % page_per_block;
2717 + int mapped_block = block;
2718 +
2719 +#if defined (MTK_NAND_BMT)
2720 + mapped_block = get_mapping_block_index(block);
2721 + if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block,
2722 + mtd->writesize, buf, chip->oob_poi))
2723 + return 0;
2724 +#else
2725 + if (shift_on_bbt) {
2726 + mapped_block = block_remap(mtd, block);
2727 + if (mapped_block == -1)
2728 + return NAND_STATUS_FAIL;
2729 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2730 + return NAND_STATUS_FAIL;
2731 + }
2732 +
2733 + if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi))
2734 + return 0;
2735 + else
2736 + return -EIO;
2737 +#endif
2738 +}
2739 +
2740 +int
2741 +mtk_nand_erase_hw(struct mtd_info *mtd, int page)
2742 +{
2743 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2744 +
2745 + chip->erase_cmd(mtd, page);
2746 +
2747 + return chip->waitfunc(mtd, chip);
2748 +}
2749 +
2750 +static int
2751 +mtk_nand_erase(struct mtd_info *mtd, int page)
2752 +{
2753 + // get mapping
2754 + struct nand_chip *chip = mtd->priv;
2755 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2756 + int page_in_block = page % page_per_block;
2757 + int block = page / page_per_block;
2758 + int mapped_block = block;
2759 +
2760 +#if defined(MTK_NAND_BMT)
2761 + mapped_block = get_mapping_block_index(block);
2762 +#else
2763 + if (shift_on_bbt) {
2764 + mapped_block = block_remap(mtd, block);
2765 + if (mapped_block == -1)
2766 + return NAND_STATUS_FAIL;
2767 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2768 + return NAND_STATUS_FAIL;
2769 + }
2770 +#endif
2771 +
2772 + do {
2773 + int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
2774 +
2775 + if (status & NAND_STATUS_FAIL) {
2776 +#if defined (MTK_NAND_BMT)
2777 + if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift,
2778 + UPDATE_ERASE_FAIL, NULL, NULL))
2779 + {
2780 + MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
2781 + return 0;
2782 + } else {
2783 + MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
2784 + return NAND_STATUS_FAIL;
2785 + }
2786 +#else
2787 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
2788 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
2789 + if (shift_on_bbt) {
2790 + mapped_block = block_remap(mtd, block);
2791 + if (mapped_block == -1)
2792 + return NAND_STATUS_FAIL;
2793 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2794 + return NAND_STATUS_FAIL;
2795 + } else
2796 + return NAND_STATUS_FAIL;
2797 +#endif
2798 + } else
2799 + break;
2800 + } while(1);
2801 +
2802 + return 0;
2803 +}
2804 +
2805 +static int
2806 +mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len)
2807 +{
2808 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
2809 + u32 col_addr = 0;
2810 + u32 sector = 0;
2811 + int res = 0;
2812 + u32 colnob = 2, rawnob = devinfo.addr_cycle - 2;
2813 + int randomread = 0;
2814 + int read_len = 0;
2815 + int sec_num = 1<<(chip->page_shift-9);
2816 + int spare_per_sector = mtd->oobsize/sec_num;
2817 +
2818 + if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
2819 + printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
2820 + return -EINVAL;
2821 + }
2822 + if (len > spare_per_sector)
2823 + randomread = 1;
2824 + if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) {
2825 + while (len > 0) {
2826 + read_len = min(len, spare_per_sector);
2827 + col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16
2828 + if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) {
2829 + printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n");
2830 + res = -EIO;
2831 + goto error;
2832 + }
2833 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2834 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n");
2835 + res = -EIO;
2836 + goto error;
2837 + }
2838 + mtk_nand_check_RW_count(read_len);
2839 + mtk_nand_stop_read();
2840 + sector++;
2841 + len -= read_len;
2842 + }
2843 + } else {
2844 + col_addr = NAND_SECTOR_SIZE;
2845 + if (chip->options & NAND_BUSWIDTH_16)
2846 + col_addr /= 2;
2847 + if (!mtk_nand_reset())
2848 + goto error;
2849 + mtk_nand_set_mode(0x6000);
2850 + NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
2851 + DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
2852 +
2853 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
2854 + NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
2855 +
2856 + mtk_nand_set_autoformat(false);
2857 +
2858 + if (!mtk_nand_set_command(NAND_CMD_READ0))
2859 + goto error;
2860 + //1 FIXED ME: For Any Kind of AddrCycle
2861 + if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob))
2862 + goto error;
2863 + if (!mtk_nand_set_command(NAND_CMD_READSTART))
2864 + goto error;
2865 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2866 + goto error;
2867 + read_len = min(len, spare_per_sector);
2868 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2869 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
2870 + res = -EIO;
2871 + goto error;
2872 + }
2873 + sector++;
2874 + len -= read_len;
2875 + mtk_nand_stop_read();
2876 + while (len > 0) {
2877 + read_len = min(len, spare_per_sector);
2878 + if (!mtk_nand_set_command(0x05))
2879 + goto error;
2880 + col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector);
2881 + if (chip->options & NAND_BUSWIDTH_16)
2882 + col_addr /= 2;
2883 + DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
2884 + DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
2885 + DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
2886 + if (!mtk_nand_status_ready(STA_ADDR_STATE))
2887 + goto error;
2888 + if (!mtk_nand_set_command(0xE0))
2889 + goto error;
2890 + if (!mtk_nand_status_ready(STA_NAND_BUSY))
2891 + goto error;
2892 + if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
2893 + printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
2894 + res = -EIO;
2895 + goto error;
2896 + }
2897 + mtk_nand_stop_read();
2898 + sector++;
2899 + len -= read_len;
2900 + }
2901 + }
2902 +error:
2903 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
2904 + return res;
2905 +}
2906 +
2907 +static int
2908 +mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len)
2909 +{
2910 + struct nand_chip *chip = mtd->priv;
2911 + u32 col_addr = 0;
2912 + u32 sector = 0;
2913 + int write_len = 0;
2914 + int status;
2915 + int sec_num = 1<<(chip->page_shift-9);
2916 + int spare_per_sector = mtd->oobsize/sec_num;
2917 +
2918 + if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
2919 + printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
2920 + return -EINVAL;
2921 + }
2922 +
2923 + while (len > 0) {
2924 + write_len = min(len, spare_per_sector);
2925 + col_addr = sector * (NAND_SECTOR_SIZE + spare_per_sector) + NAND_SECTOR_SIZE;
2926 + if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
2927 + return -EIO;
2928 + if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len))
2929 + return -EIO;
2930 + (void)mtk_nand_check_RW_count(write_len);
2931 + NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
2932 + (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
2933 + while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
2934 + ;
2935 + status = chip->waitfunc(mtd, chip);
2936 + if (status & NAND_STATUS_FAIL) {
2937 + printk(KERN_INFO "status: %d\n", status);
2938 + return -EIO;
2939 + }
2940 + len -= write_len;
2941 + sector++;
2942 + }
2943 +
2944 + return 0;
2945 +}
2946 +
2947 +static int
2948 +mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
2949 +{
2950 + int i, iter;
2951 + int sec_num = 1<<(chip->page_shift-9);
2952 + int spare_per_sector = mtd->oobsize/sec_num;
2953 +
2954 + memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
2955 +
2956 + // copy ecc data
2957 + for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
2958 + iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
2959 + local_oob_buf[iter] = chip->oob_poi[chip->ecc.layout->eccpos[i]];
2960 + }
2961 +
2962 + // copy FDM data
2963 + for (i = 0; i < sec_num; i++)
2964 + memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
2965 +
2966 + return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
2967 +}
2968 +
2969 +static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
2970 +{
2971 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2972 + int block = page / page_per_block;
2973 + u16 page_in_block = page % page_per_block;
2974 + int mapped_block = block;
2975 +
2976 +#if defined(MTK_NAND_BMT)
2977 + mapped_block = get_mapping_block_index(block);
2978 + // write bad index into oob
2979 + if (mapped_block != block)
2980 + set_bad_index_to_oob(chip->oob_poi, block);
2981 + else
2982 + set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
2983 +#else
2984 + if (shift_on_bbt)
2985 + {
2986 + mapped_block = block_remap(mtd, block);
2987 + if (mapped_block == -1)
2988 + return NAND_STATUS_FAIL;
2989 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
2990 + return NAND_STATUS_FAIL;
2991 + }
2992 +#endif
2993 + do {
2994 + if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) {
2995 + MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
2996 +#if defined(MTK_NAND_BMT)
2997 + if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift,
2998 + UPDATE_WRITE_FAIL, NULL, chip->oob_poi))
2999 + {
3000 + MSG(INIT, "Update BMT success\n");
3001 + return 0;
3002 + } else {
3003 + MSG(INIT, "Update BMT fail\n");
3004 + return -EIO;
3005 + }
3006 +#else
3007 + mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
3008 + nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
3009 + if (shift_on_bbt) {
3010 + mapped_block = block_remap(mtd, mapped_block);
3011 + if (mapped_block == -1)
3012 + return NAND_STATUS_FAIL;
3013 + if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
3014 + return NAND_STATUS_FAIL;
3015 + } else {
3016 + return NAND_STATUS_FAIL;
3017 + }
3018 +#endif
3019 + } else
3020 + break;
3021 + } while (1);
3022 +
3023 + return 0;
3024 +}
3025 +
3026 +int
3027 +mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset)
3028 +{
3029 + struct nand_chip *chip = mtd->priv;
3030 + int block = (int)offset >> chip->phys_erase_shift;
3031 + int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
3032 + u8 buf[8];
3033 +
3034 + memset(buf, 0xFF, 8);
3035 + buf[0] = 0;
3036 + return mtk_nand_write_oob_raw(mtd, buf, page, 8);
3037 +}
3038 +
3039 +static int
3040 +mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset)
3041 +{
3042 + struct nand_chip *chip = mtd->priv;
3043 + int block = (int)offset >> chip->phys_erase_shift;
3044 + int ret;
3045 + int mapped_block = block;
3046 +
3047 + nand_get_device(chip, mtd, FL_WRITING);
3048 +
3049 +#if defined(MTK_NAND_BMT)
3050 + mapped_block = get_mapping_block_index(block);
3051 + ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
3052 +#else
3053 + if (shift_on_bbt) {
3054 + mapped_block = block_remap(mtd, block);
3055 + if (mapped_block == -1) {
3056 + printk("NAND mark bad failed\n");
3057 + nand_release_device(mtd);
3058 + return NAND_STATUS_FAIL;
3059 + }
3060 + }
3061 + ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
3062 +#endif
3063 + nand_release_device(mtd);
3064 +
3065 + return ret;
3066 +}
3067 +
3068 +int
3069 +mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
3070 +{
3071 + int i;
3072 + u8 iter = 0;
3073 +
3074 + int sec_num = 1<<(chip->page_shift-9);
3075 + int spare_per_sector = mtd->oobsize/sec_num;
3076 +
3077 + if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) {
3078 + printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__);
3079 + return -EIO;
3080 + }
3081 +
3082 + // adjust to ecc physical layout to memory layout
3083 + /*********************************************************/
3084 + /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
3085 + /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
3086 + /*********************************************************/
3087 +
3088 + memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
3089 + // copy ecc data
3090 + for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
3091 + iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
3092 + chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter];
3093 + }
3094 +
3095 + // copy FDM data
3096 + for (i = 0; i < sec_num; i++) {
3097 + memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR);
3098 + }
3099 +
3100 + return 0;
3101 +}
3102 +
3103 +static int
3104 +mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
3105 +{
3106 + int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3107 + int block = page / page_per_block;
3108 + u16 page_in_block = page % page_per_block;
3109 + int mapped_block = block;
3110 +
3111 +#if defined (MTK_NAND_BMT)
3112 + mapped_block = get_mapping_block_index(block);
3113 + mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block);
3114 +#else
3115 + if (shift_on_bbt) {
3116 + mapped_block = block_remap(mtd, block);
3117 + if (mapped_block == -1)
3118 + return NAND_STATUS_FAIL;
3119 + // allow to read oob even if the block is bad
3120 + }
3121 + if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0)
3122 + return -1;
3123 +#endif
3124 + return 0;
3125 +}
3126 +
3127 +int
3128 +mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs)
3129 +{
3130 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3131 + int page_addr = (int)(ofs >> chip->page_shift);
3132 + unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3133 + unsigned char oob_buf[8];
3134 +
3135 + page_addr &= ~(page_per_block - 1);
3136 + if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) {
3137 + printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n");
3138 + return 1;
3139 + }
3140 +
3141 + if (oob_buf[0] != 0xff) {
3142 + printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]);
3143 + // dump_nfi();
3144 + return 1;
3145 + }
3146 +
3147 + return 0;
3148 +}
3149 +
3150 +static int
3151 +mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
3152 +{
3153 + int chipnr = 0;
3154 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3155 + int block = (int)ofs >> chip->phys_erase_shift;
3156 + int mapped_block = block;
3157 + int ret;
3158 +
3159 + if (getchip) {
3160 + chipnr = (int)(ofs >> chip->chip_shift);
3161 + nand_get_device(chip, mtd, FL_READING);
3162 + /* Select the NAND device */
3163 + chip->select_chip(mtd, chipnr);
3164 + }
3165 +
3166 +#if defined(MTK_NAND_BMT)
3167 + mapped_block = get_mapping_block_index(block);
3168 +#else
3169 + if (shift_on_bbt) {
3170 + mapped_block = block_remap(mtd, block);
3171 + if (mapped_block == -1) {
3172 + if (getchip)
3173 + nand_release_device(mtd);
3174 + return NAND_STATUS_FAIL;
3175 + }
3176 + }
3177 +#endif
3178 +
3179 + ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift);
3180 +#if defined (MTK_NAND_BMT)
3181 + if (ret) {
3182 + MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block);
3183 + if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) {
3184 + MSG(INIT, "Update BMT success\n");
3185 + ret = 0;
3186 + } else {
3187 + MSG(INIT, "Update BMT fail\n");
3188 + ret = 1;
3189 + }
3190 + }
3191 +#endif
3192 +
3193 + if (getchip)
3194 + nand_release_device(mtd);
3195 +
3196 + return ret;
3197 +}
3198 +
3199 +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
3200 +char gacBuf[4096 + 288];
3201 +
3202 +static int
3203 +mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
3204 +{
3205 + struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3206 + struct NAND_CMD *pkCMD = &g_kCMD;
3207 + u32 u4PageSize = mtd->writesize;
3208 + u32 *pSrc, *pDst;
3209 + int i;
3210 +
3211 + mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
3212 +
3213 + pSrc = (u32 *) buf;
3214 + pDst = (u32 *) gacBuf;
3215 + len = len / sizeof(u32);
3216 + for (i = 0; i < len; ++i) {
3217 + if (*pSrc != *pDst) {
3218 + MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
3219 + return -1;
3220 + }
3221 + pSrc++;
3222 + pDst++;
3223 + }
3224 +
3225 + pSrc = (u32 *) chip->oob_poi;
3226 + pDst = (u32 *) (gacBuf + u4PageSize);
3227 +
3228 + if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) {
3229 + // TODO: Ask Designer Why?
3230 + //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7]))
3231 + MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
3232 + MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
3233 + MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
3234 + return -1;
3235 + }
3236 + return 0;
3237 +}
3238 +#endif
3239 +
3240 +static void
3241 +mtk_nand_init_hw(struct mtk_nand_host *host) {
3242 + struct mtk_nand_host_hw *hw = host->hw;
3243 + u32 data;
3244 +
3245 + data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
3246 + data &= ~((0x3<<18)|(0x3<<16));
3247 + data |= ((0x2<<18) |(0x2<<16));
3248 + DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
3249 +
3250 + MSG(INIT, "Enable NFI Clock\n");
3251 + nand_enable_clock();
3252 +
3253 + g_bInitDone = false;
3254 + g_kCMD.u4OOBRowAddr = (u32) - 1;
3255 +
3256 + /* Set default NFI access timing control */
3257 + DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing);
3258 + DRV_WriteReg16(NFI_CNFG_REG16, 0);
3259 + DRV_WriteReg16(NFI_PAGEFMT_REG16, 0);
3260 +
3261 + /* Reset the state machine and data FIFO, because flushing FIFO */
3262 + (void)mtk_nand_reset();
3263 +
3264 + /* Set the ECC engine */
3265 + if (hw->nand_ecc_mode == NAND_ECC_HW) {
3266 + MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
3267 + if (g_bHwEcc)
3268 + NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
3269 + ECC_Config(host->hw,4);
3270 + mtk_nand_configure_fdm(8);
3271 + mtk_nand_configure_lock();
3272 + }
3273 +
3274 + NFI_SET_REG16(NFI_IOCON_REG16, 0x47);
3275 +}
3276 +
3277 +static int mtk_nand_dev_ready(struct mtd_info *mtd)
3278 +{
3279 + return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
3280 +}
3281 +
3282 +#define FACT_BBT_BLOCK_NUM 32 // use the latest 32 BLOCK for factory bbt table
3283 +#define FACT_BBT_OOB_SIGNATURE 1
3284 +#define FACT_BBT_SIGNATURE_LEN 7
3285 +const u8 oob_signature[] = "mtknand";
3286 +static u8 *fact_bbt = 0;
3287 +static u32 bbt_size = 0;
3288 +
3289 +static int
3290 +read_fact_bbt(struct mtd_info *mtd, unsigned int page)
3291 +{
3292 + struct nand_chip *chip = mtd->priv;
3293 +
3294 + // read oob
3295 + if (mtk_nand_read_oob_hw(mtd, chip, page)==0)
3296 + {
3297 + if (chip->oob_poi[nand_badblock_offset] != 0xFF)
3298 + {
3299 + printk("Bad Block on Page %x\n", page);
3300 + return -1;
3301 + }
3302 + if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0)
3303 + {
3304 + printk("compare signature failed %x\n", page);
3305 + return -1;
3306 + }
3307 + if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi))
3308 + {
3309 + printk("Signature matched and data read!\n");
3310 + memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize);
3311 + return 0;
3312 + }
3313 +
3314 + }
3315 + printk("failed at page %x\n", page);
3316 + return -1;
3317 +}
3318 +
3319 +static int
3320 +load_fact_bbt(struct mtd_info *mtd)
3321 +{
3322 + struct nand_chip *chip = mtd->priv;
3323 + int i;
3324 + u32 total_block;
3325 +
3326 + total_block = 1 << (chip->chip_shift - chip->phys_erase_shift);
3327 + bbt_size = total_block >> 2;
3328 +
3329 + if ((!fact_bbt) && (bbt_size))
3330 + fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL);
3331 + if (!fact_bbt)
3332 + return -1;
3333 +
3334 + for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--)
3335 + {
3336 + if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0)
3337 + {
3338 + printk("load_fact_bbt success %d\n", i);
3339 + return 0;
3340 + }
3341 +
3342 + }
3343 + printk("load_fact_bbt failed\n");
3344 + return -1;
3345 +}
3346 +
3347 +static int
3348 +mtk_nand_probe(struct platform_device *pdev)
3349 +{
3350 + struct mtd_part_parser_data ppdata;
3351 + struct mtk_nand_host_hw *hw;
3352 + struct mtd_info *mtd;
3353 + struct nand_chip *nand_chip;
3354 + u8 ext_id1, ext_id2, ext_id3;
3355 + int err = 0;
3356 + int id;
3357 + u32 ext_id;
3358 + int i;
3359 + u32 data;
3360 +
3361 + data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
3362 + data &= ~((0x3<<18)|(0x3<<16));
3363 + data |= ((0x2<<18) |(0x2<<16));
3364 + DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
3365 +
3366 + hw = &mt7621_nand_hw,
3367 + BUG_ON(!hw);
3368 + /* Allocate memory for the device structure (and zero it) */
3369 + host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL);
3370 + if (!host) {
3371 + MSG(INIT, "mtk_nand: failed to allocate device structure.\n");
3372 + return -ENOMEM;
3373 + }
3374 +
3375 + /* Allocate memory for 16 byte aligned buffer */
3376 + local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16);
3377 + printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
3378 + host->hw = hw;
3379 +
3380 + /* init mtd data structure */
3381 + nand_chip = &host->nand_chip;
3382 + nand_chip->priv = host; /* link the private data structures */
3383 +
3384 + mtd = &host->mtd;
3385 + mtd->priv = nand_chip;
3386 + mtd->owner = THIS_MODULE;
3387 + mtd->name = "MT7621-NAND";
3388 +
3389 + hw->nand_ecc_mode = NAND_ECC_HW;
3390 +
3391 + /* Set address of NAND IO lines */
3392 + nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32;
3393 + nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32;
3394 + nand_chip->chip_delay = 20; /* 20us command delay time */
3395 + nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */
3396 + nand_chip->ecc.strength = 1;
3397 + nand_chip->read_byte = mtk_nand_read_byte;
3398 + nand_chip->read_buf = mtk_nand_read_buf;
3399 + nand_chip->write_buf = mtk_nand_write_buf;
3400 +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
3401 + nand_chip->verify_buf = mtk_nand_verify_buf;
3402 +#endif
3403 + nand_chip->select_chip = mtk_nand_select_chip;
3404 + nand_chip->dev_ready = mtk_nand_dev_ready;
3405 + nand_chip->cmdfunc = mtk_nand_command_bp;
3406 + nand_chip->ecc.read_page = mtk_nand_read_page_hwecc;
3407 + nand_chip->ecc.write_page = mtk_nand_write_page_hwecc;
3408 +
3409 + nand_chip->ecc.layout = &nand_oob_64;
3410 + nand_chip->ecc.size = hw->nand_ecc_size; //2048
3411 + nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32
3412 +
3413 + // For BMT, we need to revise driver architecture
3414 + nand_chip->write_page = mtk_nand_write_page;
3415 + nand_chip->ecc.write_oob = mtk_nand_write_oob;
3416 + nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device().
3417 + // nand_chip->erase = mtk_nand_erase;
3418 + // nand_chip->read_page = mtk_nand_read_page;
3419 + nand_chip->ecc.read_oob = mtk_nand_read_oob;
3420 + nand_chip->block_bad = mtk_nand_block_bad;
3421 +
3422 + //Qwert:Add for Uboot
3423 + mtk_nand_init_hw(host);
3424 + /* Select the device */
3425 + nand_chip->select_chip(mtd, NFI_DEFAULT_CS);
3426 +
3427 + /*
3428 + * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
3429 + * after power-up
3430 + */
3431 + nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
3432 +
3433 + memset(&devinfo, 0 , sizeof(flashdev_info));
3434 +
3435 + /* Send the command for reading device ID */
3436 +
3437 + nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
3438 +
3439 + /* Read manufacturer and device IDs */
3440 + manu_id = nand_chip->read_byte(mtd);
3441 + dev_id = nand_chip->read_byte(mtd);
3442 + id = dev_id | (manu_id << 8);
3443 + ext_id1 = nand_chip->read_byte(mtd);
3444 + ext_id2 = nand_chip->read_byte(mtd);
3445 + ext_id3 = nand_chip->read_byte(mtd);
3446 + ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3;
3447 + if (!get_device_info(id, ext_id, &devinfo)) {
3448 + u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F;
3449 + MSG(INIT, "Not Support this Device! \r\n");
3450 + memset(&devinfo, 0 , sizeof(flashdev_info));
3451 + MSG(INIT, "chip_mode=%08X\n",chip_mode);
3452 +
3453 + /* apply bootstrap first */
3454 + devinfo.addr_cycle = 5;
3455 + devinfo.iowidth = 8;
3456 +
3457 + switch (chip_mode) {
3458 + case 10:
3459 + devinfo.pagesize = 2048;
3460 + devinfo.sparesize = 128;
3461 + devinfo.totalsize = 128;
3462 + devinfo.blocksize = 128;
3463 + break;
3464 + case 11:
3465 + devinfo.pagesize = 4096;
3466 + devinfo.sparesize = 128;
3467 + devinfo.totalsize = 1024;
3468 + devinfo.blocksize = 256;
3469 + break;
3470 + case 12:
3471 + devinfo.pagesize = 4096;
3472 + devinfo.sparesize = 224;
3473 + devinfo.totalsize = 2048;
3474 + devinfo.blocksize = 512;
3475 + break;
3476 + default:
3477 + case 1:
3478 + devinfo.pagesize = 2048;
3479 + devinfo.sparesize = 64;
3480 + devinfo.totalsize = 128;
3481 + devinfo.blocksize = 128;
3482 + break;
3483 + }
3484 +
3485 + devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING;
3486 + devinfo.devciename[0] = 'U';
3487 + devinfo.advancedmode = 0;
3488 + }
3489 + mtd->writesize = devinfo.pagesize;
3490 + mtd->erasesize = (devinfo.blocksize<<10);
3491 + mtd->oobsize = devinfo.sparesize;
3492 +
3493 + nand_chip->chipsize = (devinfo.totalsize<<20);
3494 + nand_chip->page_shift = ffs(mtd->writesize) - 1;
3495 + nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1;
3496 + nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
3497 + nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1;
3498 + nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize;
3499 + nand_chip->badblockpos = 0;
3500 +
3501 + if (devinfo.pagesize == 4096)
3502 + nand_chip->ecc.layout = &nand_oob_128;
3503 + else if (devinfo.pagesize == 2048)
3504 + nand_chip->ecc.layout = &nand_oob_64;
3505 + else if (devinfo.pagesize == 512)
3506 + nand_chip->ecc.layout = &nand_oob_16;
3507 +
3508 + nand_chip->ecc.layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE);
3509 + for (i = 0; i < nand_chip->ecc.layout->eccbytes; i++)
3510 + nand_chip->ecc.layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i;
3511 +
3512 + MSG(INIT, "Support this Device in MTK table! %x \r\n", id);
3513 + hw->nfi_bus_width = devinfo.iowidth;
3514 + DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting);
3515 +
3516 + /* 16-bit bus width */
3517 + if (hw->nfi_bus_width == 16) {
3518 + MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
3519 + nand_chip->options |= NAND_BUSWIDTH_16;
3520 + }
3521 + mtd->oobsize = devinfo.sparesize;
3522 + hw->nfi_cs_num = 1;
3523 +
3524 + /* Scan to find existance of the device */
3525 + if (nand_scan(mtd, hw->nfi_cs_num)) {
3526 + MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
3527 + err = -ENXIO;
3528 + goto out;
3529 + }
3530 +
3531 + g_page_size = mtd->writesize;
3532 + platform_set_drvdata(pdev, host);
3533 + if (hw->nfi_bus_width == 16) {
3534 + NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
3535 + }
3536 +
3537 + nand_chip->select_chip(mtd, 0);
3538 +#if defined(MTK_NAND_BMT)
3539 + nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift;
3540 +#endif
3541 + mtd->size = nand_chip->chipsize;
3542 +
3543 + CFG_BLOCKSIZE = mtd->erasesize;
3544 +
3545 +#if defined(MTK_NAND_BMT)
3546 + if (!g_bmt) {
3547 + if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) {
3548 + MSG(INIT, "Error: init bmt failed\n");
3549 + return 0;
3550 + }
3551 + }
3552 +#endif
3553 +
3554 + ppdata.of_node = pdev->dev.of_node;
3555 + err = mtd_device_parse_register(mtd, probe_types, &ppdata,
3556 + NULL, 0);
3557 + if (!err) {
3558 + MSG(INIT, "[mtk_nand] probe successfully!\n");
3559 + nand_disable_clock();
3560 + shift_on_bbt = 1;
3561 + if (load_fact_bbt(mtd) == 0) {
3562 + int i;
3563 + for (i = 0; i < 0x100; i++)
3564 + nand_chip->bbt[i] |= fact_bbt[i];
3565 + }
3566 +
3567 + return err;
3568 + }
3569 +
3570 +out:
3571 + MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err);
3572 + nand_release(mtd);
3573 + platform_set_drvdata(pdev, NULL);
3574 + kfree(host);
3575 + nand_disable_clock();
3576 + return err;
3577 +}
3578 +
3579 +static int
3580 +mtk_nand_remove(struct platform_device *pdev)
3581 +{
3582 + struct mtk_nand_host *host = platform_get_drvdata(pdev);
3583 + struct mtd_info *mtd = &host->mtd;
3584 +
3585 + nand_release(mtd);
3586 + kfree(host);
3587 + nand_disable_clock();
3588 +
3589 + return 0;
3590 +}
3591 +
3592 +static const struct of_device_id mt7621_nand_match[] = {
3593 + { .compatible = "mtk,mt7621-nand" },
3594 + {},
3595 +};
3596 +MODULE_DEVICE_TABLE(of, mt7621_nand_match);
3597 +
3598 +static struct platform_driver mtk_nand_driver = {
3599 + .probe = mtk_nand_probe,
3600 + .remove = mtk_nand_remove,
3601 + .driver = {
3602 + .name = "MT7621-NAND",
3603 + .owner = THIS_MODULE,
3604 + .of_match_table = mt7621_nand_match,
3605 + },
3606 +};
3607 +
3608 +static int __init
3609 +mtk_nand_init(void)
3610 +{
3611 + printk("MediaTek Nand driver init, version %s\n", VERSION);
3612 +
3613 + return platform_driver_register(&mtk_nand_driver);
3614 +}
3615 +
3616 +static void __exit
3617 +mtk_nand_exit(void)
3618 +{
3619 + platform_driver_unregister(&mtk_nand_driver);
3620 +}
3621 +
3622 +module_init(mtk_nand_init);
3623 +module_exit(mtk_nand_exit);
3624 +MODULE_LICENSE("GPL");
3625 diff --git a/drivers/mtd/nand/mtk_nand.h b/drivers/mtd/nand/mtk_nand.h
3626 new file mode 100644
3627 index 0000000..6db88c4
3628 --- /dev/null
3629 +++ b/drivers/mtd/nand/mtk_nand.h
3630 @@ -0,0 +1,452 @@
3631 +#ifndef __MTK_NAND_H
3632 +#define __MTK_NAND_H
3633 +
3634 +#define RALINK_NAND_CTRL_BASE 0xBE003000
3635 +#define RALINK_SYSCTL_BASE 0xBE000000
3636 +#define RALINK_NANDECC_CTRL_BASE 0xBE003800
3637 +/*******************************************************************************
3638 + * NFI Register Definition
3639 + *******************************************************************************/
3640 +
3641 +#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000))
3642 +#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004))
3643 +#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008))
3644 +#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C))
3645 +#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010))
3646 +#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014))
3647 +
3648 +#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020))
3649 +
3650 +#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030))
3651 +#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034))
3652 +#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038))
3653 +
3654 +#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040))
3655 +
3656 +#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050))
3657 +#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054))
3658 +#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058))
3659 +
3660 +#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060))
3661 +#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064))
3662 +#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068))
3663 +
3664 +#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070))
3665 +
3666 +#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080))
3667 +#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084))
3668 +
3669 +#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090))
3670 +#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094))
3671 +
3672 +#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0))
3673 +#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4))
3674 +
3675 +#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100))
3676 +#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104))
3677 +#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108))
3678 +#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110))
3679 +#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114))
3680 +#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118))
3681 +#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C))
3682 +#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120))
3683 +#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124))
3684 +#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128))
3685 +#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C))
3686 +#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130))
3687 +#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134))
3688 +#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138))
3689 +#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C))
3690 +#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140))
3691 +#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144))
3692 +#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148))
3693 +#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C))
3694 +#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150))
3695 +#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154))
3696 +#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158))
3697 +#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C))
3698 +#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160))
3699 +#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164))
3700 +#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168))
3701 +#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C))
3702 +#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170))
3703 +#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174))
3704 +#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178))
3705 +#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C))
3706 +#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180))
3707 +#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184))
3708 +#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188))
3709 +#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C))
3710 +
3711 +#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190))
3712 +#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194))
3713 +#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198))
3714 +#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C))
3715 +#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210))
3716 +
3717 +
3718 +/*******************************************************************************
3719 + * NFI Register Field Definition
3720 + *******************************************************************************/
3721 +
3722 +/* NFI_CNFG */
3723 +#define CNFG_AHB (0x0001)
3724 +#define CNFG_READ_EN (0x0002)
3725 +#define CNFG_DMA_BURST_EN (0x0004)
3726 +#define CNFG_BYTE_RW (0x0040)
3727 +#define CNFG_HW_ECC_EN (0x0100)
3728 +#define CNFG_AUTO_FMT_EN (0x0200)
3729 +#define CNFG_OP_IDLE (0x0000)
3730 +#define CNFG_OP_READ (0x1000)
3731 +#define CNFG_OP_SRD (0x2000)
3732 +#define CNFG_OP_PRGM (0x3000)
3733 +#define CNFG_OP_ERASE (0x4000)
3734 +#define CNFG_OP_RESET (0x5000)
3735 +#define CNFG_OP_CUST (0x6000)
3736 +#define CNFG_OP_MODE_MASK (0x7000)
3737 +#define CNFG_OP_MODE_SHIFT (12)
3738 +
3739 +/* NFI_PAGEFMT */
3740 +#define PAGEFMT_512 (0x0000)
3741 +#define PAGEFMT_2K (0x0001)
3742 +#define PAGEFMT_4K (0x0002)
3743 +
3744 +#define PAGEFMT_PAGE_MASK (0x0003)
3745 +
3746 +#define PAGEFMT_DBYTE_EN (0x0008)
3747 +
3748 +#define PAGEFMT_SPARE_16 (0x0000)
3749 +#define PAGEFMT_SPARE_26 (0x0001)
3750 +#define PAGEFMT_SPARE_27 (0x0002)
3751 +#define PAGEFMT_SPARE_28 (0x0003)
3752 +#define PAGEFMT_SPARE_MASK (0x0030)
3753 +#define PAGEFMT_SPARE_SHIFT (4)
3754 +
3755 +#define PAGEFMT_FDM_MASK (0x0F00)
3756 +#define PAGEFMT_FDM_SHIFT (8)
3757 +
3758 +#define PAGEFMT_FDM_ECC_MASK (0xF000)
3759 +#define PAGEFMT_FDM_ECC_SHIFT (12)
3760 +
3761 +/* NFI_CON */
3762 +#define CON_FIFO_FLUSH (0x0001)
3763 +#define CON_NFI_RST (0x0002)
3764 +#define CON_NFI_SRD (0x0010)
3765 +
3766 +#define CON_NFI_NOB_MASK (0x0060)
3767 +#define CON_NFI_NOB_SHIFT (5)
3768 +
3769 +#define CON_NFI_BRD (0x0100)
3770 +#define CON_NFI_BWR (0x0200)
3771 +
3772 +#define CON_NFI_SEC_MASK (0xF000)
3773 +#define CON_NFI_SEC_SHIFT (12)
3774 +
3775 +/* NFI_ACCCON */
3776 +#define ACCCON_SETTING ()
3777 +
3778 +/* NFI_INTR_EN */
3779 +#define INTR_RD_DONE_EN (0x0001)
3780 +#define INTR_WR_DONE_EN (0x0002)
3781 +#define INTR_RST_DONE_EN (0x0004)
3782 +#define INTR_ERASE_DONE_EN (0x0008)
3783 +#define INTR_BSY_RTN_EN (0x0010)
3784 +#define INTR_ACC_LOCK_EN (0x0020)
3785 +#define INTR_AHB_DONE_EN (0x0040)
3786 +#define INTR_ALL_INTR_DE (0x0000)
3787 +#define INTR_ALL_INTR_EN (0x007F)
3788 +
3789 +/* NFI_INTR */
3790 +#define INTR_RD_DONE (0x0001)
3791 +#define INTR_WR_DONE (0x0002)
3792 +#define INTR_RST_DONE (0x0004)
3793 +#define INTR_ERASE_DONE (0x0008)
3794 +#define INTR_BSY_RTN (0x0010)
3795 +#define INTR_ACC_LOCK (0x0020)
3796 +#define INTR_AHB_DONE (0x0040)
3797 +
3798 +/* NFI_ADDRNOB */
3799 +#define ADDR_COL_NOB_MASK (0x0003)
3800 +#define ADDR_COL_NOB_SHIFT (0)
3801 +#define ADDR_ROW_NOB_MASK (0x0030)
3802 +#define ADDR_ROW_NOB_SHIFT (4)
3803 +
3804 +/* NFI_STA */
3805 +#define STA_READ_EMPTY (0x00001000)
3806 +#define STA_ACC_LOCK (0x00000010)
3807 +#define STA_CMD_STATE (0x00000001)
3808 +#define STA_ADDR_STATE (0x00000002)
3809 +#define STA_DATAR_STATE (0x00000004)
3810 +#define STA_DATAW_STATE (0x00000008)
3811 +
3812 +#define STA_NAND_FSM_MASK (0x1F000000)
3813 +#define STA_NAND_BUSY (0x00000100)
3814 +#define STA_NAND_BUSY_RETURN (0x00000200)
3815 +#define STA_NFI_FSM_MASK (0x000F0000)
3816 +#define STA_NFI_OP_MASK (0x0000000F)
3817 +
3818 +/* NFI_FIFOSTA */
3819 +#define FIFO_RD_EMPTY (0x0040)
3820 +#define FIFO_RD_FULL (0x0080)
3821 +#define FIFO_WR_FULL (0x8000)
3822 +#define FIFO_WR_EMPTY (0x4000)
3823 +#define FIFO_RD_REMAIN(x) (0x1F&(x))
3824 +#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8)
3825 +
3826 +/* NFI_ADDRCNTR */
3827 +#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12)
3828 +#define ADDRCNTR_OFFSET(x) (0x03FF&(x))
3829 +
3830 +/* NFI_LOCK */
3831 +#define NFI_LOCK_ON (0x0001)
3832 +
3833 +/* NFI_LOCKANOB */
3834 +#define PROG_RADD_NOB_MASK (0x7000)
3835 +#define PROG_RADD_NOB_SHIFT (12)
3836 +#define PROG_CADD_NOB_MASK (0x0300)
3837 +#define PROG_CADD_NOB_SHIFT (8)
3838 +#define ERASE_RADD_NOB_MASK (0x0070)
3839 +#define ERASE_RADD_NOB_SHIFT (4)
3840 +#define ERASE_CADD_NOB_MASK (0x0007)
3841 +#define ERASE_CADD_NOB_SHIFT (0)
3842 +
3843 +/*******************************************************************************
3844 + * ECC Register Definition
3845 + *******************************************************************************/
3846 +
3847 +#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000))
3848 +#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004))
3849 +#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008))
3850 +#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C))
3851 +#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010))
3852 +#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014))
3853 +#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018))
3854 +#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C))
3855 +#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020))
3856 +#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024))
3857 +#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028))
3858 +#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C))
3859 +
3860 +#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100))
3861 +#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104))
3862 +#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108))
3863 +#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C))
3864 +#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110))
3865 +#define ECC_DECENUM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114))
3866 +#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x0118))
3867 +#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x011C))
3868 +#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120))
3869 +#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124))
3870 +#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128))
3871 +#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C))
3872 +#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130))
3873 +#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0134))
3874 +#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138))
3875 +#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C))
3876 +#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0140))
3877 +#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0144))
3878 +#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148))
3879 +#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C))
3880 +
3881 +/*******************************************************************************
3882 + * ECC register definition
3883 + *******************************************************************************/
3884 +/* ECC_ENCON */
3885 +#define ENC_EN (0x0001)
3886 +#define ENC_DE (0x0000)
3887 +
3888 +/* ECC_ENCCNFG */
3889 +#define ECC_CNFG_ECC4 (0x0000)
3890 +#define ECC_CNFG_ECC6 (0x0001)
3891 +#define ECC_CNFG_ECC8 (0x0002)
3892 +#define ECC_CNFG_ECC10 (0x0003)
3893 +#define ECC_CNFG_ECC12 (0x0004)
3894 +#define ECC_CNFG_ECC_MASK (0x00000007)
3895 +
3896 +#define ENC_CNFG_NFI (0x0010)
3897 +#define ENC_CNFG_MODE_MASK (0x0010)
3898 +
3899 +#define ENC_CNFG_META6 (0x10300000)
3900 +#define ENC_CNFG_META8 (0x10400000)
3901 +
3902 +#define ENC_CNFG_MSG_MASK (0x1FFF0000)
3903 +#define ENC_CNFG_MSG_SHIFT (0x10)
3904 +
3905 +/* ECC_ENCIDLE */
3906 +#define ENC_IDLE (0x0001)
3907 +
3908 +/* ECC_ENCSTA */
3909 +#define STA_FSM (0x001F)
3910 +#define STA_COUNT_PS (0xFF10)
3911 +#define STA_COUNT_MS (0x3FFF0000)
3912 +
3913 +/* ECC_ENCIRQEN */
3914 +#define ENC_IRQEN (0x0001)
3915 +
3916 +/* ECC_ENCIRQSTA */
3917 +#define ENC_IRQSTA (0x0001)
3918 +
3919 +/* ECC_DECCON */
3920 +#define DEC_EN (0x0001)
3921 +#define DEC_DE (0x0000)
3922 +
3923 +/* ECC_ENCCNFG */
3924 +#define DEC_CNFG_ECC4 (0x0000)
3925 +//#define DEC_CNFG_ECC6 (0x0001)
3926 +//#define DEC_CNFG_ECC12 (0x0002)
3927 +#define DEC_CNFG_NFI (0x0010)
3928 +//#define DEC_CNFG_META6 (0x10300000)
3929 +//#define DEC_CNFG_META8 (0x10400000)
3930 +
3931 +#define DEC_CNFG_FER (0x01000)
3932 +#define DEC_CNFG_EL (0x02000)
3933 +#define DEC_CNFG_CORRECT (0x03000)
3934 +#define DEC_CNFG_TYPE_MASK (0x03000)
3935 +
3936 +#define DEC_CNFG_EMPTY_EN (0x80000000)
3937 +
3938 +#define DEC_CNFG_CODE_MASK (0x1FFF0000)
3939 +#define DEC_CNFG_CODE_SHIFT (0x10)
3940 +
3941 +/* ECC_DECIDLE */
3942 +#define DEC_IDLE (0x0001)
3943 +
3944 +/* ECC_DECFER */
3945 +#define DEC_FER0 (0x0001)
3946 +#define DEC_FER1 (0x0002)
3947 +#define DEC_FER2 (0x0004)
3948 +#define DEC_FER3 (0x0008)
3949 +#define DEC_FER4 (0x0010)
3950 +#define DEC_FER5 (0x0020)
3951 +#define DEC_FER6 (0x0040)
3952 +#define DEC_FER7 (0x0080)
3953 +
3954 +/* ECC_DECENUM */
3955 +#define ERR_NUM0 (0x0000000F)
3956 +#define ERR_NUM1 (0x000000F0)
3957 +#define ERR_NUM2 (0x00000F00)
3958 +#define ERR_NUM3 (0x0000F000)
3959 +#define ERR_NUM4 (0x000F0000)
3960 +#define ERR_NUM5 (0x00F00000)
3961 +#define ERR_NUM6 (0x0F000000)
3962 +#define ERR_NUM7 (0xF0000000)
3963 +
3964 +/* ECC_DECDONE */
3965 +#define DEC_DONE0 (0x0001)
3966 +#define DEC_DONE1 (0x0002)
3967 +#define DEC_DONE2 (0x0004)
3968 +#define DEC_DONE3 (0x0008)
3969 +#define DEC_DONE4 (0x0010)
3970 +#define DEC_DONE5 (0x0020)
3971 +#define DEC_DONE6 (0x0040)
3972 +#define DEC_DONE7 (0x0080)
3973 +
3974 +/* ECC_DECIRQEN */
3975 +#define DEC_IRQEN (0x0001)
3976 +
3977 +/* ECC_DECIRQSTA */
3978 +#define DEC_IRQSTA (0x0001)
3979 +
3980 +#define CHIPVER_ECO_1 (0x8a00)
3981 +#define CHIPVER_ECO_2 (0x8a01)
3982 +
3983 +//#define NAND_PFM
3984 +
3985 +/*******************************************************************************
3986 + * Data Structure Definition
3987 + *******************************************************************************/
3988 +struct mtk_nand_host
3989 +{
3990 + struct nand_chip nand_chip;
3991 + struct mtd_info mtd;
3992 + struct mtk_nand_host_hw *hw;
3993 +};
3994 +
3995 +struct NAND_CMD
3996 +{
3997 + u32 u4ColAddr;
3998 + u32 u4RowAddr;
3999 + u32 u4OOBRowAddr;
4000 + u8 au1OOB[288];
4001 + u8* pDataBuf;
4002 +#ifdef NAND_PFM
4003 + u32 pureReadOOB;
4004 + u32 pureReadOOBNum;
4005 +#endif
4006 +};
4007 +
4008 +/*
4009 + * ECC layout control structure. Exported to userspace for
4010 + * diagnosis and to allow creation of raw images
4011 +struct nand_ecclayout {
4012 + uint32_t eccbytes;
4013 + uint32_t eccpos[64];
4014 + uint32_t oobavail;
4015 + struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES];
4016 +};
4017 +*/
4018 +#define __DEBUG_NAND 1 /* Debug information on/off */
4019 +
4020 +/* Debug message event */
4021 +#define DBG_EVT_NONE 0x00000000 /* No event */
4022 +#define DBG_EVT_INIT 0x00000001 /* Initial related event */
4023 +#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */
4024 +#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */
4025 +#define DBG_EVT_READ 0x00000008 /* Read related event */
4026 +#define DBG_EVT_WRITE 0x00000010 /* Write related event */
4027 +#define DBG_EVT_ERASE 0x00000020 /* Erase related event */
4028 +#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */
4029 +#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */
4030 +
4031 +#define DBG_EVT_ALL 0xffffffff
4032 +
4033 +#define DBG_EVT_MASK (DBG_EVT_INIT)
4034 +
4035 +#if __DEBUG_NAND
4036 +#define MSG(evt, fmt, args...) \
4037 +do { \
4038 + if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \
4039 + printk(fmt, ##args); \
4040 + } \
4041 +} while(0)
4042 +
4043 +#define MSG_FUNC_ENTRY(f) MSG(FUC, "<FUN_ENT>: %s\n", __FUNCTION__)
4044 +#else
4045 +#define MSG(evt, fmt, args...) do{}while(0)
4046 +#define MSG_FUNC_ENTRY(f) do{}while(0)
4047 +#endif
4048 +
4049 +#define RAMDOM_READ 1<<0
4050 +#define CACHE_READ 1<<1
4051 +
4052 +typedef struct
4053 +{
4054 + u16 id; //deviceid+menuid
4055 + u32 ext_id;
4056 + u8 addr_cycle;
4057 + u8 iowidth;
4058 + u16 totalsize;
4059 + u16 blocksize;
4060 + u16 pagesize;
4061 + u16 sparesize;
4062 + u32 timmingsetting;
4063 + char devciename[14];
4064 + u32 advancedmode; //
4065 +}flashdev_info,*pflashdev_info;
4066 +
4067 +/* NAND driver */
4068 +#if 0
4069 +struct mtk_nand_host_hw {
4070 + unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
4071 + unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
4072 + unsigned int nfi_cs_num; /* NFI_CS_NUM */
4073 + unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
4074 + unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
4075 + unsigned int nand_ecc_size;
4076 + unsigned int nand_ecc_bytes;
4077 + unsigned int nand_ecc_mode;
4078 +};
4079 +extern struct mtk_nand_host_hw mt7621_nand_hw;
4080 +extern u32 CFG_BLOCKSIZE;
4081 +#endif
4082 +#endif
4083 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
4084 index ceb68ca..04dbc69 100644
4085 --- a/drivers/mtd/nand/nand_base.c
4086 +++ b/drivers/mtd/nand/nand_base.c
4087 @@ -92,7 +92,7 @@ static struct nand_ecclayout nand_oob_128 = {
4088 .length = 78} }
4089 };
4090
4091 -static int nand_get_device(struct mtd_info *mtd, int new_state);
4092 +int nand_get_device(struct mtd_info *mtd, int new_state);
4093
4094 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
4095 struct mtd_oob_ops *ops);
4096 @@ -130,7 +130,7 @@ static int check_offs_len(struct mtd_info *mtd,
4097 *
4098 * Release chip lock and wake up anyone waiting on the device.
4099 */
4100 -static void nand_release_device(struct mtd_info *mtd)
4101 +void nand_release_device(struct mtd_info *mtd)
4102 {
4103 struct nand_chip *chip = mtd->priv;
4104
4105 @@ -820,7 +820,7 @@ static void panic_nand_get_device(struct nand_chip *chip,
4106 *
4107 * Get the device and lock it for exclusive access
4108 */
4109 -static int
4110 +int
4111 nand_get_device(struct mtd_info *mtd, int new_state)
4112 {
4113 struct nand_chip *chip = mtd->priv;
4114 diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
4115 index 63a1a36..d036b9a 100644
4116 --- a/drivers/mtd/nand/nand_bbt.c
4117 +++ b/drivers/mtd/nand/nand_bbt.c
4118 @@ -1374,4 +1374,23 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
4119 return ret;
4120 }
4121
4122 +void nand_bbt_set(struct mtd_info *mtd, int page, int flag)
4123 +{
4124 + struct nand_chip *this = mtd->priv;
4125 + int block;
4126 +
4127 + block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
4128 + this->bbt[block >> 3] &= ~(0x03 << (block & 0x6));
4129 + this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6);
4130 +}
4131 +
4132 +int nand_bbt_get(struct mtd_info *mtd, int page)
4133 +{
4134 + struct nand_chip *this = mtd->priv;
4135 + int block;
4136 +
4137 + block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
4138 + return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
4139 +}
4140 +
4141 EXPORT_SYMBOL(nand_scan_bbt);
4142 diff --git a/drivers/mtd/nand/nand_def.h b/drivers/mtd/nand/nand_def.h
4143 new file mode 100644
4144 index 0000000..82e957d
4145 --- /dev/null
4146 +++ b/drivers/mtd/nand/nand_def.h
4147 @@ -0,0 +1,123 @@
4148 +#ifndef __NAND_DEF_H__
4149 +#define __NAND_DEF_H__
4150 +
4151 +#define VERSION "v2.1 Fix AHB virt2phys error"
4152 +#define MODULE_NAME "# MTK NAND #"
4153 +#define PROCNAME "driver/nand"
4154 +
4155 +#undef TESTTIME
4156 +//#define __UBOOT_NAND__ 1
4157 +#define __KERNEL_NAND__ 1
4158 +//#define __PRELOADER_NAND__ 1
4159 +//#define PMT 1
4160 +//#define _MTK_NAND_DUMMY_DRIVER
4161 +//#define CONFIG_BADBLOCK_CHECK 1
4162 +//#ifdef CONFIG_BADBLOCK_CHECK
4163 +//#define MTK_NAND_BMT 1
4164 +//#endif
4165 +#define ECC_ENABLE 1
4166 +#define MANUAL_CORRECT 1
4167 +//#define __INTERNAL_USE_AHB_MODE__ (0)
4168 +#define SKIP_BAD_BLOCK
4169 +#define FACT_BBT
4170 +
4171 +#ifndef NAND_OTP_SUPPORT
4172 +#define NAND_OTP_SUPPORT 0
4173 +#endif
4174 +
4175 +/*******************************************************************************
4176 + * Macro definition
4177 + *******************************************************************************/
4178 +//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value)))
4179 +//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value)))
4180 +//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value))))
4181 +//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value))))
4182 +
4183 +#if defined (__KERNEL_NAND__)
4184 +#define NFI_SET_REG32(reg, value) \
4185 +do { \
4186 + g_value = (DRV_Reg32(reg) | (value));\
4187 + DRV_WriteReg32(reg, g_value); \
4188 +} while(0)
4189 +
4190 +#define NFI_SET_REG16(reg, value) \
4191 +do { \
4192 + g_value = (DRV_Reg16(reg) | (value));\
4193 + DRV_WriteReg16(reg, g_value); \
4194 +} while(0)
4195 +
4196 +#define NFI_CLN_REG32(reg, value) \
4197 +do { \
4198 + g_value = (DRV_Reg32(reg) & (~(value)));\
4199 + DRV_WriteReg32(reg, g_value); \
4200 +} while(0)
4201 +
4202 +#define NFI_CLN_REG16(reg, value) \
4203 +do { \
4204 + g_value = (DRV_Reg16(reg) & (~(value)));\
4205 + DRV_WriteReg16(reg, g_value); \
4206 +} while(0)
4207 +#endif
4208 +
4209 +#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
4210 +#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
4211 +
4212 +
4213 +#define NAND_SECTOR_SIZE (512)
4214 +#define OOB_PER_SECTOR (16)
4215 +#define OOB_AVAI_PER_SECTOR (8)
4216 +
4217 +#ifndef PART_SIZE_BMTPOOL
4218 +#define BMT_POOL_SIZE (80)
4219 +#else
4220 +#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
4221 +#endif
4222 +
4223 +#define PMT_POOL_SIZE (2)
4224 +
4225 +#define TIMEOUT_1 0x1fff
4226 +#define TIMEOUT_2 0x8ff
4227 +#define TIMEOUT_3 0xffff
4228 +#define TIMEOUT_4 0xffff//5000 //PIO
4229 +
4230 +
4231 +/* temporarity definiation */
4232 +#if !defined (__KERNEL_NAND__)
4233 +#define KERN_INFO
4234 +#define KERN_WARNING
4235 +#define KERN_ERR
4236 +#define PAGE_SIZE (4096)
4237 +#endif
4238 +#define AddStorageTrace //AddStorageTrace
4239 +#define STORAGE_LOGGER_MSG_NAND 0
4240 +#define NFI_BASE RALINK_NAND_CTRL_BASE
4241 +#define NFIECC_BASE RALINK_NANDECC_CTRL_BASE
4242 +
4243 +#ifdef __INTERNAL_USE_AHB_MODE__
4244 +#define MT65xx_POLARITY_LOW 0
4245 +#define MT65XX_PDN_PERI_NFI 0
4246 +#define MT65xx_EDGE_SENSITIVE 0
4247 +#define MT6575_NFI_IRQ_ID (58)
4248 +#endif
4249 +
4250 +#if defined (__KERNEL_NAND__)
4251 +#define RALINK_REG(x) (*((volatile u32 *)(x)))
4252 +#define __virt_to_phys(x) virt_to_phys((volatile void*)x)
4253 +#else
4254 +#define CONFIG_MTD_NAND_VERIFY_WRITE (1)
4255 +#define printk printf
4256 +#define ra_dbg printf
4257 +#define BUG() //BUG()
4258 +#define BUG_ON(x) //BUG_ON()
4259 +#define NUM_PARTITIONS 1
4260 +#endif
4261 +
4262 +#define NFI_DEFAULT_ACCESS_TIMING (0x30C77fff) //(0x44333)
4263 +
4264 +//uboot only support 1 cs
4265 +#define NFI_CS_NUM (1)
4266 +#define NFI_DEFAULT_CS (0)
4267 +
4268 +#include "mt6575_typedefs.h"
4269 +
4270 +#endif /* __NAND_DEF_H__ */
4271 diff --git a/drivers/mtd/nand/nand_device_list.h b/drivers/mtd/nand/nand_device_list.h
4272 new file mode 100644
4273 index 0000000..4c36b3a
4274 --- /dev/null
4275 +++ b/drivers/mtd/nand/nand_device_list.h
4276 @@ -0,0 +1,55 @@
4277 +/* Copyright Statement:
4278 + *
4279 + * This software/firmware and related documentation ("MediaTek Software") are
4280 + * protected under relevant copyright laws. The information contained herein
4281 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
4282 + * Without the prior written permission of MediaTek inc. and/or its licensors,
4283 + * any reproduction, modification, use or disclosure of MediaTek Software,
4284 + * and information contained herein, in whole or in part, shall be strictly prohibited.
4285 + */
4286 +/* MediaTek Inc. (C) 2010. All rights reserved.
4287 + *
4288 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
4289 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
4290 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
4291 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
4292 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
4293 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
4294 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
4295 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
4296 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
4297 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
4298 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
4299 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
4300 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
4301 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
4302 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
4303 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
4304 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
4305 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
4306 + *
4307 + * The following software/firmware and/or related documentation ("MediaTek Software")
4308 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
4309 + * applicable license agreements with MediaTek Inc.
4310 + */
4311 +
4312 +#ifndef __NAND_DEVICE_LIST_H__
4313 +#define __NAND_DEVICE_LIST_H__
4314 +
4315 +static const flashdev_info gen_FlashTable[]={
4316 + {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0},
4317 + {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0},
4318 + {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0},
4319 + {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0},
4320 + {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0},
4321 + {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0},
4322 + {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0},
4323 + {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0},
4324 + {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0},
4325 + {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0},
4326 + {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0},
4327 + {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0},
4328 +};
4329 +
4330 +
4331 +#endif
4332 diff --git a/drivers/mtd/nand/partition.h b/drivers/mtd/nand/partition.h
4333 new file mode 100644
4334 index 0000000..034e1af
4335 --- /dev/null
4336 +++ b/drivers/mtd/nand/partition.h
4337 @@ -0,0 +1,115 @@
4338 +/* Copyright Statement:
4339 + *
4340 + * This software/firmware and related documentation ("MediaTek Software") are
4341 + * protected under relevant copyright laws. The information contained herein
4342 + * is confidential and proprietary to MediaTek Inc. and/or its licensors.
4343 + * Without the prior written permission of MediaTek inc. and/or its licensors,
4344 + * any reproduction, modification, use or disclosure of MediaTek Software,
4345 + * and information contained herein, in whole or in part, shall be strictly prohibited.
4346 + */
4347 +/* MediaTek Inc. (C) 2010. All rights reserved.
4348 + *
4349 + * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
4350 + * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
4351 + * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
4352 + * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
4353 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
4354 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
4355 + * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
4356 + * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
4357 + * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
4358 + * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
4359 + * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
4360 + * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
4361 + * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
4362 + * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
4363 + * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
4364 + * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
4365 + * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
4366 + * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
4367 + *
4368 + * The following software/firmware and/or related documentation ("MediaTek Software")
4369 + * have been modified by MediaTek Inc. All revisions are subject to any receiver's
4370 + * applicable license agreements with MediaTek Inc.
4371 + */
4372 +
4373 +#include <linux/mtd/mtd.h>
4374 +#include <linux/mtd/nand.h>
4375 +#include <linux/mtd/partitions.h>
4376 +
4377 +#define RECONFIG_PARTITION_SIZE 1
4378 +
4379 +#define MTD_BOOT_PART_SIZE 0x80000
4380 +#define MTD_CONFIG_PART_SIZE 0x20000
4381 +#define MTD_FACTORY_PART_SIZE 0x20000
4382 +
4383 +extern unsigned int CFG_BLOCKSIZE;
4384 +#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2)
4385 +#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2)
4386 +#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1)
4387 +
4388 +/*=======================================================================*/
4389 +/* NAND PARTITION Mapping */
4390 +/*=======================================================================*/
4391 +//#ifdef CONFIG_MTD_PARTITIONS
4392 +static struct mtd_partition g_pasStatic_Partition[] = {
4393 + {
4394 + name: "ALL",
4395 + size: MTDPART_SIZ_FULL,
4396 + offset: 0,
4397 + },
4398 + /* Put your own partition definitions here */
4399 + {
4400 + name: "Bootloader",
4401 + size: MTD_BOOT_PART_SIZE,
4402 + offset: 0,
4403 + }, {
4404 + name: "Config",
4405 + size: MTD_CONFIG_PART_SIZE,
4406 + offset: MTDPART_OFS_APPEND
4407 + }, {
4408 + name: "Factory",
4409 + size: MTD_FACTORY_PART_SIZE,
4410 + offset: MTDPART_OFS_APPEND
4411 +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
4412 + }, {
4413 + name: "Kernel",
4414 + size: MTD_KERN_PART_SIZE,
4415 + offset: MTDPART_OFS_APPEND,
4416 + }, {
4417 + name: "RootFS",
4418 + size: MTD_ROOTFS_PART_SIZE,
4419 + offset: MTDPART_OFS_APPEND,
4420 +#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING
4421 + }, {
4422 + name: "Kernel_RootFS",
4423 + size: MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE,
4424 + offset: MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE,
4425 +#endif
4426 +#else //CONFIG_RT2880_ROOTFS_IN_RAM
4427 + }, {
4428 + name: "Kernel",
4429 + size: 0x10000,
4430 + offset: MTDPART_OFS_APPEND,
4431 +#endif
4432 +#ifdef CONFIG_DUAL_IMAGE
4433 + }, {
4434 + name: "Kernel2",
4435 + size: MTD_KERN2_PART_SIZE,
4436 + offset: MTD_KERN2_PART_OFFSET,
4437 +#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
4438 + }, {
4439 + name: "RootFS2",
4440 + size: MTD_ROOTFS2_PART_SIZE,
4441 + offset: MTD_ROOTFS2_PART_OFFSET,
4442 +#endif
4443 +#endif
4444 + }
4445 +
4446 +};
4447 +
4448 +#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition)
4449 +extern int part_num; // = NUM_PARTITIONS;
4450 +//#endif
4451 +#undef RECONFIG_PARTITION_SIZE
4452 +
4453 --
4454 1.7.10.4
4455