1 From a369af5149e6eb442b22ce89b564dd7a76e03638 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Tue, 26 Apr 2016 19:05:01 +0200
4 Subject: [PATCH 072/102] mtd: backport v4.7-0day patches from Boris
6 Signed-off-by: John Crispin <blogic@openwrt.org>
8 drivers/mtd/Kconfig | 4 +-
9 drivers/mtd/cmdlinepart.c | 3 +-
10 drivers/mtd/devices/m25p80.c | 44 +--
11 drivers/mtd/maps/physmap_of.c | 6 +-
12 drivers/mtd/mtdchar.c | 123 ++++++--
13 drivers/mtd/mtdconcat.c | 2 +-
14 drivers/mtd/mtdcore.c | 428 ++++++++++++++++++++++++--
15 drivers/mtd/mtdcore.h | 7 +-
16 drivers/mtd/mtdpart.c | 161 ++++++----
17 drivers/mtd/mtdswap.c | 24 +-
18 drivers/mtd/nand/Kconfig | 21 +-
19 drivers/mtd/nand/Makefile | 2 +
20 drivers/mtd/nand/nand_base.c | 571 +++++++++++++++++++----------------
21 drivers/mtd/nand/nand_bbt.c | 34 +--
22 drivers/mtd/nand/nand_bch.c | 52 ++--
23 drivers/mtd/nand/nand_ecc.c | 6 +-
24 drivers/mtd/nand/nand_ids.c | 4 +-
25 drivers/mtd/nand/nandsim.c | 43 +--
26 drivers/mtd/ofpart.c | 53 ++--
27 drivers/mtd/spi-nor/Kconfig | 10 +-
28 drivers/mtd/spi-nor/Makefile | 1 +
29 drivers/mtd/spi-nor/mtk-quadspi.c | 485 +++++++++++++++++++++++++++++
30 drivers/mtd/spi-nor/spi-nor.c | 321 +++++++++++++-------
31 drivers/mtd/tests/mtd_nandecctest.c | 2 +-
32 drivers/mtd/tests/oobtest.c | 49 ++-
33 drivers/mtd/tests/pagetest.c | 3 +-
34 drivers/mtd/ubi/cdev.c | 4 +-
35 drivers/mtd/ubi/misc.c | 49 +++
36 drivers/mtd/ubi/ubi.h | 16 +-
37 drivers/mtd/ubi/upd.c | 2 +-
38 drivers/mtd/ubi/wl.c | 21 +-
39 include/linux/mtd/bbm.h | 1 -
40 include/linux/mtd/fsmc.h | 18 --
41 include/linux/mtd/inftl.h | 1 -
42 include/linux/mtd/map.h | 9 +-
43 include/linux/mtd/mtd.h | 80 ++++-
44 include/linux/mtd/nand.h | 94 ++++--
45 include/linux/mtd/nand_bch.h | 10 +-
46 include/linux/mtd/nftl.h | 1 -
47 include/linux/mtd/onenand.h | 2 -
48 include/linux/mtd/partitions.h | 27 +-
49 include/linux/mtd/sh_flctl.h | 4 +-
50 include/linux/mtd/sharpsl.h | 2 +-
51 include/linux/mtd/spi-nor.h | 23 +-
52 include/uapi/mtd/mtd-abi.h | 2 +-
53 45 files changed, 2077 insertions(+), 748 deletions(-)
54 create mode 100644 drivers/mtd/spi-nor/mtk-quadspi.c
56 --- a/drivers/mtd/Kconfig
57 +++ b/drivers/mtd/Kconfig
61 tristate "ARM Firmware Suite partition parsing"
63 + depends on (ARM || ARM64)
65 The ARM Firmware Suite allows the user to divide flash devices into
66 multiple 'images'. Each such image has a header containing its name
69 config MTD_BCM63XX_PARTS
70 tristate "BCM63XX CFE partitioning support"
72 + depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
75 This provides partions parsing for BCM63xx devices with CFE
76 --- a/drivers/mtd/cmdlinepart.c
77 +++ b/drivers/mtd/cmdlinepart.c
79 * the first one in the chain if a NULL mtd_id is passed in.
81 static int parse_cmdline_partitions(struct mtd_info *master,
82 - struct mtd_partition **pparts,
83 + const struct mtd_partition **pparts,
84 struct mtd_part_parser_data *data)
86 unsigned long long offset;
88 __setup("mtdparts=", mtdpart_setup);
90 static struct mtd_part_parser cmdline_parser = {
91 - .owner = THIS_MODULE,
92 .parse_fn = parse_cmdline_partitions,
93 .name = "cmdlinepart",
95 --- a/drivers/mtd/devices/m25p80.c
96 +++ b/drivers/mtd/devices/m25p80.c
101 -static int m25p80_erase(struct spi_nor *nor, loff_t offset)
103 - struct m25p *flash = nor->priv;
105 - dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
106 - flash->spi_nor.mtd.erasesize / 1024, (u32)offset);
108 - /* Set up command buffer. */
109 - flash->command[0] = nor->erase_opcode;
110 - m25p_addr2cmd(nor, offset, flash->command);
112 - spi_write(flash->spi, flash->command, m25p_cmdsz(nor));
118 * board specific setup should have ensured the SPI clock used here
119 * matches what the READ command supports, at least until this driver
120 @@ -197,12 +181,11 @@
122 static int m25p_probe(struct spi_device *spi)
124 - struct mtd_part_parser_data ppdata;
125 struct flash_platform_data *data;
128 enum read_mode mode = SPI_NOR_NORMAL;
129 - char *flash_name = NULL;
133 data = dev_get_platdata(&spi->dev);
134 @@ -216,12 +199,11 @@
135 /* install the hooks */
136 nor->read = m25p80_read;
137 nor->write = m25p80_write;
138 - nor->erase = m25p80_erase;
139 nor->write_reg = m25p80_write_reg;
140 nor->read_reg = m25p80_read_reg;
142 nor->dev = &spi->dev;
143 - nor->flash_node = spi->dev.of_node;
144 + spi_nor_set_flash_node(nor, spi->dev.of_node);
147 spi_set_drvdata(spi, flash);
150 if (data && data->type)
151 flash_name = data->type;
152 + else if (!strcmp(spi->modalias, "spi-nor"))
153 + flash_name = NULL; /* auto-detect */
155 flash_name = spi->modalias;
161 - ppdata.of_node = spi->dev.of_node;
163 - return mtd_device_parse_register(&nor->mtd, NULL, &ppdata,
164 - data ? data->parts : NULL,
165 - data ? data->nr_parts : 0);
166 + return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
167 + data ? data->nr_parts : 0);
171 @@ -279,14 +260,21 @@
173 static const struct spi_device_id m25p_ids[] = {
175 + * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
176 + * hack around the fact that the SPI core does not provide uevent
177 + * matching for .of_match_table
182 * Entries not used in DTs that should be safe to drop after replacing
183 - * them with "nor-jedec" in platform data.
184 + * them with "spi-nor" in platform data.
186 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
189 - * Entries that were used in DTs without "nor-jedec" fallback and should
190 - * be kept for backward compatibility.
191 + * Entries that were used in DTs without "jedec,spi-nor" fallback and
192 + * should be kept for backward compatibility.
194 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
196 --- a/drivers/mtd/maps/physmap_of.c
197 +++ b/drivers/mtd/maps/physmap_of.c
200 struct mtd_info **mtd_list = NULL;
201 resource_size_t res_size;
202 - struct mtd_part_parser_data ppdata;
204 const char *mtd_name = NULL;
210 - ppdata.of_node = dp;
211 - mtd_device_parse_register(info->cmtd, part_probe_types_def, &ppdata,
212 + info->cmtd->dev.parent = &dev->dev;
213 + mtd_set_of_node(info->cmtd, dp);
214 + mtd_device_parse_register(info->cmtd, part_probe_types_def, NULL,
218 --- a/drivers/mtd/mtdchar.c
219 +++ b/drivers/mtd/mtdchar.c
220 @@ -465,38 +465,111 @@
224 - * Copies (and truncates, if necessary) data from the larger struct,
225 - * nand_ecclayout, to the smaller, deprecated layout struct,
226 - * nand_ecclayout_user. This is necessary only to support the deprecated
227 - * API ioctl ECCGETLAYOUT while allowing all new functionality to use
228 - * nand_ecclayout flexibly (i.e. the struct may change size in new
229 - * releases without requiring major rewrites).
230 + * Copies (and truncates, if necessary) OOB layout information to the
231 + * deprecated layout struct, nand_ecclayout_user. This is necessary only to
232 + * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
233 + * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
234 + * can describe any kind of OOB layout with almost zero overhead from a
235 + * memory usage point of view).
237 -static int shrink_ecclayout(const struct nand_ecclayout *from,
238 - struct nand_ecclayout_user *to)
239 +static int shrink_ecclayout(struct mtd_info *mtd,
240 + struct nand_ecclayout_user *to)
243 + struct mtd_oob_region oobregion;
244 + int i, section = 0, ret;
250 memset(to, 0, sizeof(*to));
252 - to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
253 - for (i = 0; i < to->eccbytes; i++)
254 - to->eccpos[i] = from->eccpos[i];
256 + for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
259 + ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
261 + if (ret != -ERANGE)
267 + eccpos = oobregion.offset;
268 + for (; i < MTD_MAX_ECCPOS_ENTRIES &&
269 + eccpos < oobregion.offset + oobregion.length; i++) {
270 + to->eccpos[i] = eccpos++;
275 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
276 - if (from->oobfree[i].length == 0 &&
277 - from->oobfree[i].offset == 0)
278 + ret = mtd_ooblayout_free(mtd, i, &oobregion);
280 + if (ret != -ERANGE)
284 - to->oobavail += from->oobfree[i].length;
285 - to->oobfree[i] = from->oobfree[i];
288 + to->oobfree[i].offset = oobregion.offset;
289 + to->oobfree[i].length = oobregion.length;
290 + to->oobavail += to->oobfree[i].length;
296 +static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
298 + struct mtd_oob_region oobregion;
299 + int i, section = 0, ret;
304 + memset(to, 0, sizeof(*to));
307 + for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
310 + ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
312 + if (ret != -ERANGE)
318 + if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
321 + eccpos = oobregion.offset;
322 + for (; eccpos < oobregion.offset + oobregion.length; i++) {
323 + to->eccpos[i] = eccpos++;
328 + for (i = 0; i < 8; i++) {
329 + ret = mtd_ooblayout_free(mtd, i, &oobregion);
331 + if (ret != -ERANGE)
337 + to->oobfree[i][0] = oobregion.offset;
338 + to->oobfree[i][1] = oobregion.length;
341 + to->useecc = MTD_NANDECC_AUTOPLACE;
346 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
347 struct blkpg_ioctl_arg *arg)
349 @@ -815,16 +888,12 @@
351 struct nand_oobinfo oi;
353 - if (!mtd->ecclayout)
354 + if (!mtd->ooblayout)
356 - if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
359 - oi.useecc = MTD_NANDECC_AUTOPLACE;
360 - memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
361 - memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
362 - sizeof(oi.oobfree));
363 - oi.eccbytes = mtd->ecclayout->eccbytes;
364 + ret = get_oobinfo(mtd, &oi);
368 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
370 @@ -913,14 +982,14 @@
372 struct nand_ecclayout_user *usrlay;
374 - if (!mtd->ecclayout)
375 + if (!mtd->ooblayout)
378 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
382 - shrink_ecclayout(mtd->ecclayout, usrlay);
383 + shrink_ecclayout(mtd, usrlay);
385 if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
387 --- a/drivers/mtd/mtdconcat.c
388 +++ b/drivers/mtd/mtdconcat.c
393 - concat->mtd.ecclayout = subdev[0]->ecclayout;
394 + mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
396 concat->num_subdev = num_devs;
397 concat->mtd.name = name;
398 --- a/drivers/mtd/mtdcore.c
399 +++ b/drivers/mtd/mtdcore.c
401 #include <linux/err.h>
402 #include <linux/ioctl.h>
403 #include <linux/init.h>
404 +#include <linux/of.h>
405 #include <linux/proc_fs.h>
406 #include <linux/idr.h>
407 #include <linux/backing-dev.h>
409 mtd->dev.devt = MTD_DEVT(i);
410 dev_set_name(&mtd->dev, "mtd%d", i);
411 dev_set_drvdata(&mtd->dev, mtd);
412 + of_node_get(mtd_get_of_node(mtd));
413 error = device_register(&mtd->dev);
420 + of_node_put(mtd_get_of_node(mtd));
421 idr_remove(&mtd_idr, i);
423 mutex_unlock(&mtd_table_mutex);
425 device_unregister(&mtd->dev);
427 idr_remove(&mtd_idr, mtd->index);
428 + of_node_put(mtd_get_of_node(mtd));
430 module_put(THIS_MODULE);
435 static int mtd_add_device_partitions(struct mtd_info *mtd,
436 - struct mtd_partition *real_parts,
438 + struct mtd_partitions *parts)
440 + const struct mtd_partition *real_parts = parts->parts;
441 + int nbparts = parts->nr_parts;
444 if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
445 @@ -600,29 +605,29 @@
446 const struct mtd_partition *parts,
449 + struct mtd_partitions parsed;
451 - struct mtd_partition *real_parts = NULL;
453 mtd_set_dev_defaults(mtd);
455 - ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
456 - if (ret <= 0 && nr_parts && parts) {
457 - real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
464 - /* Didn't come up with either parsed OR fallback partitions */
466 - pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
467 + memset(&parsed, 0, sizeof(parsed));
469 + ret = parse_mtd_partitions(mtd, types, &parsed, parser_data);
470 + if ((ret < 0 || parsed.nr_parts == 0) && parts && nr_parts) {
471 + /* Fall back to driver-provided partitions */
472 + parsed = (struct mtd_partitions){
474 + .nr_parts = nr_parts,
476 + } else if (ret < 0) {
477 + /* Didn't come up with parsed OR fallback partitions */
478 + pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
480 /* Don't abort on errors; we can still use unpartitioned MTD */
482 + memset(&parsed, 0, sizeof(parsed));
485 - ret = mtd_add_device_partitions(mtd, real_parts, ret);
486 + ret = mtd_add_device_partitions(mtd, ret);
495 + /* Cleanup any parsed partitions */
496 + mtd_part_parser_cleanup(&parsed);
499 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
502 EXPORT_SYMBOL_GPL(get_mtd_device);
505 int __get_mtd_device(struct mtd_info *mtd)
508 @@ -1001,6 +1006,366 @@
510 EXPORT_SYMBOL_GPL(mtd_read_oob);
513 + * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
514 + * @mtd: MTD device structure
515 + * @section: ECC section. Depending on the layout you may have all the ECC
516 + * bytes stored in a single contiguous section, or one section
517 + * per ECC chunk (and sometime several sections for a single ECC
519 + * @oobecc: OOB region struct filled with the appropriate ECC position
522 + * This functions return ECC section information in the OOB area. I you want
523 + * to get all the ECC bytes information, then you should call
524 + * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
526 + * Returns zero on success, a negative error code otherwise.
528 +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
529 + struct mtd_oob_region *oobecc)
531 + memset(oobecc, 0, sizeof(*oobecc));
533 + if (!mtd || section < 0)
536 + if (!mtd->ooblayout || !mtd->ooblayout->ecc)
539 + return mtd->ooblayout->ecc(mtd, section, oobecc);
541 +EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
544 + * mtd_ooblayout_free - Get the OOB region definition of a specific free
546 + * @mtd: MTD device structure
547 + * @section: Free section you are interested in. Depending on the layout
548 + * you may have all the free bytes stored in a single contiguous
549 + * section, or one section per ECC chunk plus an extra section
550 + * for the remaining bytes (or other funky layout).
551 + * @oobfree: OOB region struct filled with the appropriate free position
554 + * This functions return free bytes position in the OOB area. I you want
555 + * to get all the free bytes information, then you should call
556 + * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
558 + * Returns zero on success, a negative error code otherwise.
560 +int mtd_ooblayout_free(struct mtd_info *mtd, int section,
561 + struct mtd_oob_region *oobfree)
563 + memset(oobfree, 0, sizeof(*oobfree));
565 + if (!mtd || section < 0)
568 + if (!mtd->ooblayout || !mtd->ooblayout->free)
571 + return mtd->ooblayout->free(mtd, section, oobfree);
573 +EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
576 + * mtd_ooblayout_find_region - Find the region attached to a specific byte
577 + * @mtd: mtd info structure
578 + * @byte: the byte we are searching for
579 + * @sectionp: pointer where the section id will be stored
580 + * @oobregion: used to retrieve the ECC position
581 + * @iter: iterator function. Should be either mtd_ooblayout_free or
582 + * mtd_ooblayout_ecc depending on the region type you're searching for
584 + * This functions returns the section id and oobregion information of a
585 + * specific byte. For example, say you want to know where the 4th ECC byte is
586 + * stored, you'll use:
588 + * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
590 + * Returns zero on success, a negative error code otherwise.
592 +static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
593 + int *sectionp, struct mtd_oob_region *oobregion,
594 + int (*iter)(struct mtd_info *,
596 + struct mtd_oob_region *oobregion))
598 + int pos = 0, ret, section = 0;
600 + memset(oobregion, 0, sizeof(*oobregion));
603 + ret = iter(mtd, section, oobregion);
607 + if (pos + oobregion->length > byte)
610 + pos += oobregion->length;
615 + * Adjust region info to make it start at the beginning at the
616 + * 'start' ECC byte.
618 + oobregion->offset += byte - pos;
619 + oobregion->length -= byte - pos;
620 + *sectionp = section;
626 + * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
628 + * @mtd: mtd info structure
629 + * @eccbyte: the byte we are searching for
630 + * @sectionp: pointer where the section id will be stored
631 + * @oobregion: OOB region information
633 + * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
636 + * Returns zero on success, a negative error code otherwise.
638 +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
640 + struct mtd_oob_region *oobregion)
642 + return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
643 + mtd_ooblayout_ecc);
645 +EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
648 + * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
649 + * @mtd: mtd info structure
650 + * @buf: destination buffer to store OOB bytes
651 + * @oobbuf: OOB buffer
652 + * @start: first byte to retrieve
653 + * @nbytes: number of bytes to retrieve
654 + * @iter: section iterator
656 + * Extract bytes attached to a specific category (ECC or free)
657 + * from the OOB buffer and copy them into buf.
659 + * Returns zero on success, a negative error code otherwise.
661 +static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
662 + const u8 *oobbuf, int start, int nbytes,
663 + int (*iter)(struct mtd_info *,
665 + struct mtd_oob_region *oobregion))
667 + struct mtd_oob_region oobregion = { };
668 + int section = 0, ret;
670 + ret = mtd_ooblayout_find_region(mtd, start, §ion,
676 + cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
677 + memcpy(buf, oobbuf + oobregion.offset, cnt);
684 + ret = iter(mtd, ++section, &oobregion);
691 + * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
692 + * @mtd: mtd info structure
693 + * @buf: source buffer to get OOB bytes from
694 + * @oobbuf: OOB buffer
695 + * @start: first OOB byte to set
696 + * @nbytes: number of OOB bytes to set
697 + * @iter: section iterator
699 + * Fill the OOB buffer with data provided in buf. The category (ECC or free)
700 + * is selected by passing the appropriate iterator.
702 + * Returns zero on success, a negative error code otherwise.
704 +static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
705 + u8 *oobbuf, int start, int nbytes,
706 + int (*iter)(struct mtd_info *,
708 + struct mtd_oob_region *oobregion))
710 + struct mtd_oob_region oobregion = { };
711 + int section = 0, ret;
713 + ret = mtd_ooblayout_find_region(mtd, start, §ion,
719 + cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
720 + memcpy(oobbuf + oobregion.offset, buf, cnt);
727 + ret = iter(mtd, ++section, &oobregion);
734 + * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
735 + * @mtd: mtd info structure
736 + * @iter: category iterator
738 + * Count the number of bytes in a given category.
740 + * Returns a positive value on success, a negative error code otherwise.
742 +static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
743 + int (*iter)(struct mtd_info *,
745 + struct mtd_oob_region *oobregion))
747 + struct mtd_oob_region oobregion = { };
748 + int section = 0, ret, nbytes = 0;
751 + ret = iter(mtd, section++, &oobregion);
753 + if (ret == -ERANGE)
758 + nbytes += oobregion.length;
765 + * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
766 + * @mtd: mtd info structure
767 + * @eccbuf: destination buffer to store ECC bytes
768 + * @oobbuf: OOB buffer
769 + * @start: first ECC byte to retrieve
770 + * @nbytes: number of ECC bytes to retrieve
772 + * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
774 + * Returns zero on success, a negative error code otherwise.
776 +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
777 + const u8 *oobbuf, int start, int nbytes)
779 + return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
780 + mtd_ooblayout_ecc);
782 +EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
785 + * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
786 + * @mtd: mtd info structure
787 + * @eccbuf: source buffer to get ECC bytes from
788 + * @oobbuf: OOB buffer
789 + * @start: first ECC byte to set
790 + * @nbytes: number of ECC bytes to set
792 + * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
794 + * Returns zero on success, a negative error code otherwise.
796 +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
797 + u8 *oobbuf, int start, int nbytes)
799 + return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
800 + mtd_ooblayout_ecc);
802 +EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
805 + * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
806 + * @mtd: mtd info structure
807 + * @databuf: destination buffer to store ECC bytes
808 + * @oobbuf: OOB buffer
809 + * @start: first ECC byte to retrieve
810 + * @nbytes: number of ECC bytes to retrieve
812 + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
814 + * Returns zero on success, a negative error code otherwise.
816 +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
817 + const u8 *oobbuf, int start, int nbytes)
819 + return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
820 + mtd_ooblayout_free);
822 +EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
825 + * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
826 + * @mtd: mtd info structure
827 + * @eccbuf: source buffer to get data bytes from
828 + * @oobbuf: OOB buffer
829 + * @start: first ECC byte to set
830 + * @nbytes: number of ECC bytes to set
832 + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
834 + * Returns zero on success, a negative error code otherwise.
836 +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
837 + u8 *oobbuf, int start, int nbytes)
839 + return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
840 + mtd_ooblayout_free);
842 +EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
845 + * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
846 + * @mtd: mtd info structure
848 + * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
850 + * Returns zero on success, a negative error code otherwise.
852 +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
854 + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
856 +EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
859 + * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
860 + * @mtd: mtd info structure
862 + * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
864 + * Returns zero on success, a negative error code otherwise.
866 +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
868 + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
870 +EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
873 * Method to access the protection register area, present in some flash
874 * devices. The user data is one time programmable but the factory data is read
875 --- a/drivers/mtd/mtdcore.h
876 +++ b/drivers/mtd/mtdcore.h
878 int del_mtd_device(struct mtd_info *mtd);
879 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
880 int del_mtd_partitions(struct mtd_info *);
882 +struct mtd_partitions;
884 int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
885 - struct mtd_partition **pparts,
886 + struct mtd_partitions *pparts,
887 struct mtd_part_parser_data *data);
889 +void mtd_part_parser_cleanup(struct mtd_partitions *parts);
891 int __init init_mtdchar(void);
892 void __exit cleanup_mtdchar(void);
894 --- a/drivers/mtd/mtdpart.c
895 +++ b/drivers/mtd/mtdpart.c
899 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
900 - * the pointer to that structure with this macro.
901 + * the pointer to that structure.
903 -#define PART(x) ((struct mtd_part *)(x))
904 +static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
906 + return container_of(mtd, struct mtd_part, mtd);
912 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
913 size_t *retlen, u_char *buf)
915 - struct mtd_part *part = PART(mtd);
916 + struct mtd_part *part = mtd_to_part(mtd);
917 struct mtd_ecc_stats stats;
921 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
922 size_t *retlen, void **virt, resource_size_t *phys)
924 - struct mtd_part *part = PART(mtd);
925 + struct mtd_part *part = mtd_to_part(mtd);
927 return part->master->_point(part->master, from + part->offset, len,
931 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
933 - struct mtd_part *part = PART(mtd);
934 + struct mtd_part *part = mtd_to_part(mtd);
936 return part->master->_unpoint(part->master, from + part->offset, len);
939 unsigned long offset,
942 - struct mtd_part *part = PART(mtd);
943 + struct mtd_part *part = mtd_to_part(mtd);
945 offset += part->offset;
946 return part->master->_get_unmapped_area(part->master, len, offset,
948 static int part_read_oob(struct mtd_info *mtd, loff_t from,
949 struct mtd_oob_ops *ops)
951 - struct mtd_part *part = PART(mtd);
952 + struct mtd_part *part = mtd_to_part(mtd);
955 if (from >= mtd->size)
960 - if (ops->mode == MTD_OPS_AUTO_OOB)
961 - len = mtd->oobavail;
963 - len = mtd->oobsize;
964 + len = mtd_oobavail(mtd, ops);
965 pages = mtd_div_by_ws(mtd->size, mtd);
966 pages -= mtd_div_by_ws(from, mtd);
967 if (ops->ooboffs + ops->ooblen > pages * len)
969 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
970 size_t len, size_t *retlen, u_char *buf)
972 - struct mtd_part *part = PART(mtd);
973 + struct mtd_part *part = mtd_to_part(mtd);
974 return part->master->_read_user_prot_reg(part->master, from, len,
978 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
979 size_t *retlen, struct otp_info *buf)
981 - struct mtd_part *part = PART(mtd);
982 + struct mtd_part *part = mtd_to_part(mtd);
983 return part->master->_get_user_prot_info(part->master, len, retlen,
987 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
988 size_t len, size_t *retlen, u_char *buf)
990 - struct mtd_part *part = PART(mtd);
991 + struct mtd_part *part = mtd_to_part(mtd);
992 return part->master->_read_fact_prot_reg(part->master, from, len,
996 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
997 size_t *retlen, struct otp_info *buf)
999 - struct mtd_part *part = PART(mtd);
1000 + struct mtd_part *part = mtd_to_part(mtd);
1001 return part->master->_get_fact_prot_info(part->master, len, retlen,
1005 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
1006 size_t *retlen, const u_char *buf)
1008 - struct mtd_part *part = PART(mtd);
1009 + struct mtd_part *part = mtd_to_part(mtd);
1010 return part->master->_write(part->master, to + part->offset, len,
1014 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1015 size_t *retlen, const u_char *buf)
1017 - struct mtd_part *part = PART(mtd);
1018 + struct mtd_part *part = mtd_to_part(mtd);
1019 return part->master->_panic_write(part->master, to + part->offset, len,
1023 static int part_write_oob(struct mtd_info *mtd, loff_t to,
1024 struct mtd_oob_ops *ops)
1026 - struct mtd_part *part = PART(mtd);
1027 + struct mtd_part *part = mtd_to_part(mtd);
1029 if (to >= mtd->size)
1032 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1033 size_t len, size_t *retlen, u_char *buf)
1035 - struct mtd_part *part = PART(mtd);
1036 + struct mtd_part *part = mtd_to_part(mtd);
1037 return part->master->_write_user_prot_reg(part->master, from, len,
1040 @@ -221,21 +221,21 @@
1041 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1044 - struct mtd_part *part = PART(mtd);
1045 + struct mtd_part *part = mtd_to_part(mtd);
1046 return part->master->_lock_user_prot_reg(part->master, from, len);
1049 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
1050 unsigned long count, loff_t to, size_t *retlen)
1052 - struct mtd_part *part = PART(mtd);
1053 + struct mtd_part *part = mtd_to_part(mtd);
1054 return part->master->_writev(part->master, vecs, count,
1055 to + part->offset, retlen);
1058 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
1060 - struct mtd_part *part = PART(mtd);
1061 + struct mtd_part *part = mtd_to_part(mtd);
1066 void mtd_erase_callback(struct erase_info *instr)
1068 if (instr->mtd->_erase == part_erase) {
1069 - struct mtd_part *part = PART(instr->mtd);
1070 + struct mtd_part *part = mtd_to_part(instr->mtd);
1073 if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
1074 @@ -330,13 +330,13 @@
1076 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1078 - struct mtd_part *part = PART(mtd);
1079 + struct mtd_part *part = mtd_to_part(mtd);
1080 return part->master->_lock(part->master, ofs + part->offset, len);
1083 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1085 - struct mtd_part *part = PART(mtd);
1086 + struct mtd_part *part = mtd_to_part(mtd);
1088 ofs += part->offset;
1089 if (mtd->flags & MTD_ERASE_PARTIAL) {
1090 @@ -349,45 +349,45 @@
1092 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1094 - struct mtd_part *part = PART(mtd);
1095 + struct mtd_part *part = mtd_to_part(mtd);
1096 return part->master->_is_locked(part->master, ofs + part->offset, len);
1099 static void part_sync(struct mtd_info *mtd)
1101 - struct mtd_part *part = PART(mtd);
1102 + struct mtd_part *part = mtd_to_part(mtd);
1103 part->master->_sync(part->master);
1106 static int part_suspend(struct mtd_info *mtd)
1108 - struct mtd_part *part = PART(mtd);
1109 + struct mtd_part *part = mtd_to_part(mtd);
1110 return part->master->_suspend(part->master);
1113 static void part_resume(struct mtd_info *mtd)
1115 - struct mtd_part *part = PART(mtd);
1116 + struct mtd_part *part = mtd_to_part(mtd);
1117 part->master->_resume(part->master);
1120 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1122 - struct mtd_part *part = PART(mtd);
1123 + struct mtd_part *part = mtd_to_part(mtd);
1124 ofs += part->offset;
1125 return part->master->_block_isreserved(part->master, ofs);
1128 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
1130 - struct mtd_part *part = PART(mtd);
1131 + struct mtd_part *part = mtd_to_part(mtd);
1132 ofs += part->offset;
1133 return part->master->_block_isbad(part->master, ofs);
1136 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
1138 - struct mtd_part *part = PART(mtd);
1139 + struct mtd_part *part = mtd_to_part(mtd);
1142 ofs += part->offset;
1143 @@ -397,6 +397,27 @@
1147 +static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
1148 + struct mtd_oob_region *oobregion)
1150 + struct mtd_part *part = mtd_to_part(mtd);
1152 + return mtd_ooblayout_ecc(part->master, section, oobregion);
1155 +static int part_ooblayout_free(struct mtd_info *mtd, int section,
1156 + struct mtd_oob_region *oobregion)
1158 + struct mtd_part *part = mtd_to_part(mtd);
1160 + return mtd_ooblayout_free(part->master, section, oobregion);
1163 +static const struct mtd_ooblayout_ops part_ooblayout_ops = {
1164 + .ecc = part_ooblayout_ecc,
1165 + .free = part_ooblayout_free,
1168 static inline void free_partition(struct mtd_part *p)
1172 slave->mtd.erasesize = slave->mtd.size;
1175 - slave->mtd.ecclayout = master->ecclayout;
1176 + mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
1177 slave->mtd.ecc_step_size = master->ecc_step_size;
1178 slave->mtd.ecc_strength = master->ecc_strength;
1179 slave->mtd.bitflip_threshold = master->bitflip_threshold;
1181 struct device_attribute *attr, char *buf)
1183 struct mtd_info *mtd = dev_get_drvdata(dev);
1184 - struct mtd_part *part = PART(mtd);
1185 + struct mtd_part *part = mtd_to_part(mtd);
1186 return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
1189 @@ -677,11 +698,10 @@
1193 + memset(&part, 0, sizeof(part));
1196 part.offset = offset;
1197 - part.mask_flags = 0;
1198 - part.ecclayout = NULL;
1200 new = allocate_partition(master, &part, -1, offset);
1203 static DEFINE_SPINLOCK(part_parser_lock);
1204 static LIST_HEAD(part_parsers);
1206 -static struct mtd_part_parser *get_partition_parser(const char *name)
1207 +static struct mtd_part_parser *mtd_part_parser_get(const char *name)
1209 struct mtd_part_parser *p, *ret = NULL;
1211 @@ -862,7 +882,20 @@
1215 -#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
1216 +static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
1218 + module_put(p->owner);
1222 + * Many partition parsers just expected the core to kfree() all their data in
1223 + * one chunk. Do that by default.
1225 +static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
1231 static struct mtd_part_parser *
1232 get_partition_parser_by_type(enum mtd_parser_type type,
1235 p = list_prepare_entry(start, &part_parsers, list);
1237 - put_partition_parser(start);
1238 + mtd_part_parser_put(start);
1240 list_for_each_entry_continue(p, &part_parsers, list) {
1241 if (p->type == type && try_module_get(p->owner)) {
1242 @@ -888,13 +921,19 @@
1246 -void register_mtd_parser(struct mtd_part_parser *p)
1248 +int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
1253 + p->cleanup = &mtd_part_parser_cleanup_default;
1255 spin_lock(&part_parser_lock);
1256 list_add(&p->list, &part_parsers);
1257 spin_unlock(&part_parser_lock);
1260 -EXPORT_SYMBOL_GPL(register_mtd_parser);
1261 +EXPORT_SYMBOL_GPL(__register_mtd_parser);
1263 void deregister_mtd_parser(struct mtd_part_parser *p)
1266 * parse_mtd_partitions - parse MTD partitions
1267 * @master: the master partition (describes whole MTD device)
1268 * @types: names of partition parsers to try or %NULL
1269 - * @pparts: array of partitions found is returned here
1270 + * @pparts: info about partitions found is returned here
1271 * @data: MTD partition parser-specific data
1273 * This function tries to find partition on MTD device @master. It uses MTD
1274 @@ -966,45 +1005,42 @@
1276 * This function may return:
1277 * o a negative error code in case of failure
1278 - * o zero if no partitions were found
1279 - * o a positive number of found partitions, in which case on exit @pparts will
1280 - * point to an array containing this number of &struct mtd_info objects.
1281 + * o zero otherwise, and @pparts will describe the partitions, number of
1282 + * partitions, and the parser which parsed them. Caller must release
1283 + * resources with mtd_part_parser_cleanup() when finished with the returned
1286 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
1287 - struct mtd_partition **pparts,
1288 + struct mtd_partitions *pparts,
1289 struct mtd_part_parser_data *data)
1291 struct mtd_part_parser *parser;
1293 const char *const *types_of = NULL;
1295 - if (data && data->of_node) {
1296 - types_of = of_get_probes(data->of_node);
1297 - if (types_of != NULL)
1302 types = default_mtd_part_types;
1304 for ( ; *types; types++) {
1305 pr_debug("%s: parsing partitions %s\n", master->name, *types);
1306 - parser = get_partition_parser(*types);
1307 + parser = mtd_part_parser_get(*types);
1308 if (!parser && !request_module("%s", *types))
1309 - parser = get_partition_parser(*types);
1310 + parser = mtd_part_parser_get(*types);
1311 pr_debug("%s: got parser %s\n", master->name,
1312 parser ? parser->name : NULL);
1315 - ret = (*parser->parse_fn)(master, pparts, data);
1316 + ret = (*parser->parse_fn)(master, &pparts->parts, data);
1317 pr_debug("%s: parser %s: %i\n",
1318 master->name, parser->name, ret);
1319 - put_partition_parser(parser);
1321 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
1322 ret, parser->name, master->name);
1324 + pparts->nr_parts = ret;
1325 + pparts->parser = parser;
1328 + mtd_part_parser_put(parser);
1330 * Stash the first error we see; only report it if no parser
1332 @@ -1034,7 +1070,7 @@
1333 ret = (*parser->parse_fn)(master, pparts, data);
1336 - put_partition_parser(parser);
1337 + mtd_part_parser_put(parser);
1339 "%d %s partitions found on MTD device %s\n",
1340 ret, parser->name, master->name);
1341 @@ -1048,6 +1084,22 @@
1343 EXPORT_SYMBOL_GPL(parse_mtd_partitions_by_type);
1345 +void mtd_part_parser_cleanup(struct mtd_partitions *parts)
1347 + const struct mtd_part_parser *parser;
1352 + parser = parts->parser;
1354 + if (parser->cleanup)
1355 + parser->cleanup(parts->parts, parts->nr_parts);
1357 + mtd_part_parser_put(parser);
1361 int mtd_is_partition(const struct mtd_info *mtd)
1363 struct mtd_part *part;
1364 @@ -1070,7 +1122,7 @@
1365 if (!mtd_is_partition(mtd))
1366 return (struct mtd_info *)mtd;
1368 - return PART(mtd)->master;
1369 + return mtd_to_part(mtd)->master;
1371 EXPORT_SYMBOL_GPL(mtdpart_get_master);
1373 @@ -1079,7 +1131,7 @@
1374 if (!mtd_is_partition(mtd))
1377 - return PART(mtd)->offset;
1378 + return mtd_to_part(mtd)->offset;
1380 EXPORT_SYMBOL_GPL(mtdpart_get_offset);
1382 @@ -1089,6 +1141,6 @@
1383 if (!mtd_is_partition(mtd))
1386 - return PART(mtd)->master->size;
1387 + return mtd_to_part(mtd)->master->size;
1389 EXPORT_SYMBOL_GPL(mtd_get_device_size);
1390 --- a/drivers/mtd/mtdswap.c
1391 +++ b/drivers/mtd/mtdswap.c
1393 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
1394 return MTDSWAP_SCANNED_BAD;
1396 - ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
1397 + ops.ooblen = 2 * d->mtd->oobavail;
1398 ops.oobbuf = d->oob_buf;
1403 data = (struct mtdswap_oobdata *)d->oob_buf;
1404 data2 = (struct mtdswap_oobdata *)
1405 - (d->oob_buf + d->mtd->ecclayout->oobavail);
1406 + (d->oob_buf + d->mtd->oobavail);
1408 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
1409 eb->erase_count = le32_to_cpu(data->count);
1412 ops.mode = MTD_OPS_AUTO_OOB;
1413 ops.len = mtd->writesize;
1414 - ops.ooblen = mtd->ecclayout->oobavail;
1415 + ops.ooblen = mtd->oobavail;
1417 ops.datbuf = d->page_buf;
1418 ops.oobbuf = d->oob_buf;
1420 for (i = 0; i < mtd_pages; i++) {
1421 patt = mtdswap_test_patt(test + i);
1422 memset(d->page_buf, patt, mtd->writesize);
1423 - memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
1424 + memset(d->oob_buf, patt, mtd->oobavail);
1425 ret = mtd_write_oob(mtd, pos, &ops);
1432 - for (j = 0; j < mtd->ecclayout->oobavail; j++)
1433 + for (j = 0; j < mtd->oobavail; j++)
1434 if (p2[j] != (unsigned char)patt)
1437 @@ -1387,7 +1387,7 @@
1441 - d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
1442 + d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL);
1446 @@ -1417,7 +1417,6 @@
1448 unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
1449 uint64_t swap_size, use_size, size_limit;
1450 - struct nand_ecclayout *oinfo;
1453 parts = &partitions[0];
1454 @@ -1447,17 +1446,10 @@
1458 - oinfo = mtd->ecclayout;
1460 - printk(KERN_ERR "%s: mtd%d does not have OOB\n",
1461 - MTDSWAP_PREFIX, mtd->index);
1465 - if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
1466 + if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
1467 printk(KERN_ERR "%s: Not enough free bytes in OOB, "
1468 "%d available, %zu needed.\n",
1469 - MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
1470 + MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
1474 --- a/drivers/mtd/nand/Kconfig
1475 +++ b/drivers/mtd/nand/Kconfig
1477 config MTD_NAND_DENALI_DT
1478 tristate "Support Denali NAND controller as a DT device"
1479 select MTD_NAND_DENALI
1480 - depends on HAS_DMA && HAVE_CLK
1481 + depends on HAS_DMA && HAVE_CLK && OF
1483 Enable the driver for NAND flash on platforms using a Denali NAND
1484 controller as a DT device.
1486 config MTD_NAND_GPIO
1487 tristate "GPIO assisted NAND Flash driver"
1488 depends on GPIOLIB || COMPILE_TEST
1489 + depends on HAS_IOMEM
1491 This enables a NAND flash driver where control signals are
1492 connected to GPIO pins, and commands and data are communicated
1494 config MTD_NAND_CS553X
1495 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
1497 + depends on !UML && HAS_IOMEM
1499 The CS553x companion chips for the AMD Geode processor
1500 include NAND flash controllers with built-in hardware ECC
1502 config MTD_NAND_VF610_NFC
1503 tristate "Support for Freescale NFC for VF610/MPC5125"
1504 depends on (SOC_VF610 || COMPILE_TEST)
1505 + depends on HAS_IOMEM
1507 Enables support for NAND Flash Controller on some Freescale
1508 processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
1511 config MTD_NAND_SH_FLCTL
1512 tristate "Support for NAND on Renesas SuperH FLCTL"
1513 - depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
1514 + depends on SUPERH || COMPILE_TEST
1515 depends on HAS_IOMEM
1518 @@ -519,6 +522,13 @@
1520 Enables support for NAND Flash on JZ4740 SoC based boards.
1522 +config MTD_NAND_JZ4780
1523 + tristate "Support for NAND on JZ4780 SoC"
1524 + depends on MACH_JZ4780 && JZ4780_NEMC
1526 + Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
1527 + based boards, using the BCH controller for hardware error correction.
1529 config MTD_NAND_FSMC
1530 tristate "Support for NAND on ST Micros FSMC"
1531 depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
1532 @@ -546,4 +556,11 @@
1534 Enables support for NAND controller on Hisilicon SoC Hip04.
1536 +config MTD_NAND_QCOM
1537 + tristate "Support for NAND on QCOM SoCs"
1538 + depends on ARCH_QCOM
1540 + Enables support for NAND flash chips on SoCs containing the EBI2 NAND
1541 + controller. This controller is found on IPQ806x SoC.
1544 --- a/drivers/mtd/nand/Makefile
1545 +++ b/drivers/mtd/nand/Makefile
1547 obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
1548 obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
1549 obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
1550 +obj-$(CONFIG_MTD_NAND_JZ4780) += jz4780_nand.o jz4780_bch.o
1551 obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
1552 obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
1553 obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
1554 obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
1555 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
1556 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
1557 +obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
1559 nand-objs := nand_base.o nand_bbt.o nand_timings.o
1560 --- a/drivers/mtd/nand/nand_base.c
1561 +++ b/drivers/mtd/nand/nand_base.c
1563 #include <linux/mtd/partitions.h>
1564 #include <linux/of_mtd.h>
1566 -/* Define default oob placement schemes for large and small page devices */
1567 -static struct nand_ecclayout nand_oob_8 = {
1569 - .eccpos = {0, 1, 2},
1577 -static struct nand_ecclayout nand_oob_16 = {
1579 - .eccpos = {0, 1, 2, 3, 6, 7},
1585 -static struct nand_ecclayout nand_oob_64 = {
1588 - 40, 41, 42, 43, 44, 45, 46, 47,
1589 - 48, 49, 50, 51, 52, 53, 54, 55,
1590 - 56, 57, 58, 59, 60, 61, 62, 63},
1596 -static struct nand_ecclayout nand_oob_128 = {
1599 - 80, 81, 82, 83, 84, 85, 86, 87,
1600 - 88, 89, 90, 91, 92, 93, 94, 95,
1601 - 96, 97, 98, 99, 100, 101, 102, 103,
1602 - 104, 105, 106, 107, 108, 109, 110, 111,
1603 - 112, 113, 114, 115, 116, 117, 118, 119,
1604 - 120, 121, 122, 123, 124, 125, 126, 127},
1610 static int nand_get_device(struct mtd_info *mtd, int new_state);
1612 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
1613 @@ -103,10 +59,96 @@
1615 DEFINE_LED_TRIGGER(nand_led_trigger);
1617 +/* Define default oob placement schemes for large and small page devices */
1618 +static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
1619 + struct mtd_oob_region *oobregion)
1621 + struct nand_chip *chip = mtd_to_nand(mtd);
1622 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1628 + oobregion->offset = 0;
1629 + oobregion->length = 4;
1631 + oobregion->offset = 6;
1632 + oobregion->length = ecc->total - 4;
1638 +static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
1639 + struct mtd_oob_region *oobregion)
1644 + if (mtd->oobsize == 16) {
1648 + oobregion->length = 8;
1649 + oobregion->offset = 8;
1651 + oobregion->length = 2;
1653 + oobregion->offset = 3;
1655 + oobregion->offset = 6;
1661 +const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
1662 + .ecc = nand_ooblayout_ecc_sp,
1663 + .free = nand_ooblayout_free_sp,
1665 +EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
1667 +static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
1668 + struct mtd_oob_region *oobregion)
1670 + struct nand_chip *chip = mtd_to_nand(mtd);
1671 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1676 + oobregion->length = ecc->total;
1677 + oobregion->offset = mtd->oobsize - oobregion->length;
1682 +static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
1683 + struct mtd_oob_region *oobregion)
1685 + struct nand_chip *chip = mtd_to_nand(mtd);
1686 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1691 + oobregion->length = mtd->oobsize - ecc->total - 2;
1692 + oobregion->offset = 2;
1697 +const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
1698 + .ecc = nand_ooblayout_ecc_lp,
1699 + .free = nand_ooblayout_free_lp,
1701 +EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
1703 static int check_offs_len(struct mtd_info *mtd,
1704 loff_t ofs, uint64_t len)
1706 - struct nand_chip *chip = mtd->priv;
1707 + struct nand_chip *chip = mtd_to_nand(mtd);
1710 /* Start address must align on block boundary */
1713 static void nand_release_device(struct mtd_info *mtd)
1715 - struct nand_chip *chip = mtd->priv;
1716 + struct nand_chip *chip = mtd_to_nand(mtd);
1718 /* Release the controller and the chip */
1719 spin_lock(&chip->controller->lock);
1722 static uint8_t nand_read_byte(struct mtd_info *mtd)
1724 - struct nand_chip *chip = mtd->priv;
1725 + struct nand_chip *chip = mtd_to_nand(mtd);
1726 return readb(chip->IO_ADDR_R);
1731 static uint8_t nand_read_byte16(struct mtd_info *mtd)
1733 - struct nand_chip *chip = mtd->priv;
1734 + struct nand_chip *chip = mtd_to_nand(mtd);
1735 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
1740 static u16 nand_read_word(struct mtd_info *mtd)
1742 - struct nand_chip *chip = mtd->priv;
1743 + struct nand_chip *chip = mtd_to_nand(mtd);
1744 return readw(chip->IO_ADDR_R);
1749 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
1751 - struct nand_chip *chip = mtd->priv;
1752 + struct nand_chip *chip = mtd_to_nand(mtd);
1758 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
1760 - struct nand_chip *chip = mtd->priv;
1761 + struct nand_chip *chip = mtd_to_nand(mtd);
1763 chip->write_buf(mtd, &byte, 1);
1767 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
1769 - struct nand_chip *chip = mtd->priv;
1770 + struct nand_chip *chip = mtd_to_nand(mtd);
1771 uint16_t word = byte;
1776 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
1778 - struct nand_chip *chip = mtd->priv;
1779 + struct nand_chip *chip = mtd_to_nand(mtd);
1781 iowrite8_rep(chip->IO_ADDR_W, buf, len);
1785 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1787 - struct nand_chip *chip = mtd->priv;
1788 + struct nand_chip *chip = mtd_to_nand(mtd);
1790 ioread8_rep(chip->IO_ADDR_R, buf, len);
1794 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
1796 - struct nand_chip *chip = mtd->priv;
1797 + struct nand_chip *chip = mtd_to_nand(mtd);
1798 u16 *p = (u16 *) buf;
1800 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
1803 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
1805 - struct nand_chip *chip = mtd->priv;
1806 + struct nand_chip *chip = mtd_to_nand(mtd);
1807 u16 *p = (u16 *) buf;
1809 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
1810 @@ -313,14 +355,13 @@
1811 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
1812 * @mtd: MTD device structure
1813 * @ofs: offset from device start
1814 - * @getchip: 0, if the chip is already selected
1816 * Check, if the block is bad.
1818 -static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
1819 +static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1821 - int page, chipnr, res = 0, i = 0;
1822 - struct nand_chip *chip = mtd->priv;
1823 + int page, res = 0, i = 0;
1824 + struct nand_chip *chip = mtd_to_nand(mtd);
1827 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
1828 @@ -328,15 +369,6 @@
1830 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1833 - chipnr = (int)(ofs >> chip->chip_shift);
1835 - nand_get_device(mtd, FL_READING);
1837 - /* Select the NAND device */
1838 - chip->select_chip(mtd, chipnr);
1842 if (chip->options & NAND_BUSWIDTH_16) {
1843 chip->cmdfunc(mtd, NAND_CMD_READOOB,
1844 @@ -361,11 +393,6 @@
1846 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
1849 - chip->select_chip(mtd, -1);
1850 - nand_release_device(mtd);
1858 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
1860 - struct nand_chip *chip = mtd->priv;
1861 + struct nand_chip *chip = mtd_to_nand(mtd);
1862 struct mtd_oob_ops ops;
1863 uint8_t buf[2] = { 0, 0 };
1864 int ret = 0, res, i = 0;
1867 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
1869 - struct nand_chip *chip = mtd->priv;
1870 + struct nand_chip *chip = mtd_to_nand(mtd);
1873 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
1876 static int nand_check_wp(struct mtd_info *mtd)
1878 - struct nand_chip *chip = mtd->priv;
1879 + struct nand_chip *chip = mtd_to_nand(mtd);
1881 /* Broken xD cards report WP despite being writable */
1882 if (chip->options & NAND_BROKEN_XD)
1885 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1887 - struct nand_chip *chip = mtd->priv;
1888 + struct nand_chip *chip = mtd_to_nand(mtd);
1892 @@ -503,19 +530,17 @@
1893 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
1894 * @mtd: MTD device structure
1895 * @ofs: offset from device start
1896 - * @getchip: 0, if the chip is already selected
1897 * @allowbbt: 1, if its allowed to access the bbt area
1899 * Check, if the block is bad. Either by reading the bad block table or
1900 * calling of the scan function.
1902 -static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
1904 +static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
1906 - struct nand_chip *chip = mtd->priv;
1907 + struct nand_chip *chip = mtd_to_nand(mtd);
1910 - return chip->block_bad(mtd, ofs, getchip);
1911 + return chip->block_bad(mtd, ofs);
1913 /* Return info from the table */
1914 return nand_isbad_bbt(mtd, ofs, allowbbt);
1917 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
1919 - struct nand_chip *chip = mtd->priv;
1920 + struct nand_chip *chip = mtd_to_nand(mtd);
1923 /* Wait for the device to get ready */
1926 void nand_wait_ready(struct mtd_info *mtd)
1928 - struct nand_chip *chip = mtd->priv;
1929 + struct nand_chip *chip = mtd_to_nand(mtd);
1930 unsigned long timeo = 400;
1932 if (in_interrupt() || oops_in_progress)
1935 } while (time_before(jiffies, timeo));
1937 - pr_warn_ratelimited(
1938 - "timeout while waiting for chip to become ready\n");
1939 + if (!chip->dev_ready(mtd))
1940 + pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
1942 led_trigger_event(nand_led_trigger, LED_OFF);
1946 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
1948 - register struct nand_chip *chip = mtd->priv;
1949 + register struct nand_chip *chip = mtd_to_nand(mtd);
1951 timeo = jiffies + msecs_to_jiffies(timeo);
1954 static void nand_command(struct mtd_info *mtd, unsigned int command,
1955 int column, int page_addr)
1957 - register struct nand_chip *chip = mtd->priv;
1958 + register struct nand_chip *chip = mtd_to_nand(mtd);
1959 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
1961 /* Write out the command to the device */
1963 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
1964 int column, int page_addr)
1966 - register struct nand_chip *chip = mtd->priv;
1967 + register struct nand_chip *chip = mtd_to_nand(mtd);
1969 /* Emulate NAND_CMD_READOOB */
1970 if (command == NAND_CMD_READOOB) {
1973 nand_get_device(struct mtd_info *mtd, int new_state)
1975 - struct nand_chip *chip = mtd->priv;
1976 + struct nand_chip *chip = mtd_to_nand(mtd);
1977 spinlock_t *lock = &chip->controller->lock;
1978 wait_queue_head_t *wq = &chip->controller->wq;
1979 DECLARE_WAITQUEUE(wait, current);
1984 - struct nand_chip *chip = mtd->priv;
1985 + struct nand_chip *chip = mtd_to_nand(mtd);
1987 /* Submit address of first page to unlock */
1988 page = ofs >> chip->page_shift;
1989 @@ -987,7 +1012,7 @@
1993 - struct nand_chip *chip = mtd->priv;
1994 + struct nand_chip *chip = mtd_to_nand(mtd);
1996 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1997 __func__, (unsigned long long)ofs, len);
1998 @@ -1050,7 +1075,7 @@
2001 int chipnr, status, page;
2002 - struct nand_chip *chip = mtd->priv;
2003 + struct nand_chip *chip = mtd_to_nand(mtd);
2005 pr_debug("%s: start = 0x%012llx, len = %llu\n",
2006 __func__, (unsigned long long)ofs, len);
2007 @@ -1309,13 +1334,12 @@
2008 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2009 uint8_t *buf, int oob_required, int page)
2011 - int i, eccsize = chip->ecc.size;
2012 + int i, eccsize = chip->ecc.size, ret;
2013 int eccbytes = chip->ecc.bytes;
2014 int eccsteps = chip->ecc.steps;
2016 uint8_t *ecc_calc = chip->buffers->ecccalc;
2017 uint8_t *ecc_code = chip->buffers->ecccode;
2018 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2019 unsigned int max_bitflips = 0;
2021 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
2022 @@ -1323,8 +1347,10 @@
2023 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2024 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2026 - for (i = 0; i < chip->ecc.total; i++)
2027 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2028 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2033 eccsteps = chip->ecc.steps;
2035 @@ -1356,14 +1382,14 @@
2036 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
2039 - int start_step, end_step, num_steps;
2040 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2041 + int start_step, end_step, num_steps, ret;
2043 int data_col_addr, i, gaps = 0;
2044 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2045 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2047 + int index, section = 0;
2048 unsigned int max_bitflips = 0;
2049 + struct mtd_oob_region oobregion = { };
2051 /* Column address within the page aligned to ECC size (256bytes) */
2052 start_step = data_offs / chip->ecc.size;
2053 @@ -1391,12 +1417,13 @@
2054 * The performance is faster if we position offsets according to
2055 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2057 - for (i = 0; i < eccfrag_len - 1; i++) {
2058 - if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
2063 + ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
2067 + if (oobregion.length < eccfrag_len)
2071 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
2072 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2073 @@ -1405,20 +1432,23 @@
2074 * Send the command to read the particular ECC bytes take care
2075 * about buswidth alignment in read_buf.
2077 - aligned_pos = eccpos[index] & ~(busw - 1);
2078 + aligned_pos = oobregion.offset & ~(busw - 1);
2079 aligned_len = eccfrag_len;
2080 - if (eccpos[index] & (busw - 1))
2081 + if (oobregion.offset & (busw - 1))
2083 - if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
2084 + if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2088 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
2089 - mtd->writesize + aligned_pos, -1);
2090 + mtd->writesize + aligned_pos, -1);
2091 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
2094 - for (i = 0; i < eccfrag_len; i++)
2095 - chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
2096 + ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
2097 + chip->oob_poi, index, eccfrag_len);
2101 p = bufpoi + data_col_addr;
2102 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2103 @@ -1426,6 +1456,16 @@
2105 stat = chip->ecc.correct(mtd, p,
2106 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
2107 + if (stat == -EBADMSG &&
2108 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2109 + /* check for empty pages with bitflips */
2110 + stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2111 + &chip->buffers->ecccode[i],
2114 + chip->ecc.strength);
2118 mtd->ecc_stats.failed++;
2120 @@ -1449,13 +1489,12 @@
2121 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2122 uint8_t *buf, int oob_required, int page)
2124 - int i, eccsize = chip->ecc.size;
2125 + int i, eccsize = chip->ecc.size, ret;
2126 int eccbytes = chip->ecc.bytes;
2127 int eccsteps = chip->ecc.steps;
2129 uint8_t *ecc_calc = chip->buffers->ecccalc;
2130 uint8_t *ecc_code = chip->buffers->ecccode;
2131 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2132 unsigned int max_bitflips = 0;
2134 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2135 @@ -1465,8 +1504,10 @@
2137 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2139 - for (i = 0; i < chip->ecc.total; i++)
2140 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2141 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2146 eccsteps = chip->ecc.steps;
2148 @@ -1475,6 +1516,15 @@
2151 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
2152 + if (stat == -EBADMSG &&
2153 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2154 + /* check for empty pages with bitflips */
2155 + stat = nand_check_erased_ecc_chunk(p, eccsize,
2156 + &ecc_code[i], eccbytes,
2158 + chip->ecc.strength);
2162 mtd->ecc_stats.failed++;
2164 @@ -1502,12 +1552,11 @@
2165 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
2166 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
2168 - int i, eccsize = chip->ecc.size;
2169 + int i, eccsize = chip->ecc.size, ret;
2170 int eccbytes = chip->ecc.bytes;
2171 int eccsteps = chip->ecc.steps;
2173 uint8_t *ecc_code = chip->buffers->ecccode;
2174 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2175 uint8_t *ecc_calc = chip->buffers->ecccalc;
2176 unsigned int max_bitflips = 0;
2178 @@ -1516,8 +1565,10 @@
2179 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2180 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
2182 - for (i = 0; i < chip->ecc.total; i++)
2183 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2184 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2189 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2191 @@ -1527,6 +1578,15 @@
2192 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2194 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
2195 + if (stat == -EBADMSG &&
2196 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2197 + /* check for empty pages with bitflips */
2198 + stat = nand_check_erased_ecc_chunk(p, eccsize,
2199 + &ecc_code[i], eccbytes,
2201 + chip->ecc.strength);
2205 mtd->ecc_stats.failed++;
2207 @@ -1554,6 +1614,7 @@
2208 int i, eccsize = chip->ecc.size;
2209 int eccbytes = chip->ecc.bytes;
2210 int eccsteps = chip->ecc.steps;
2211 + int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2213 uint8_t *oob = chip->oob_poi;
2214 unsigned int max_bitflips = 0;
2215 @@ -1573,19 +1634,29 @@
2216 chip->read_buf(mtd, oob, eccbytes);
2217 stat = chip->ecc.correct(mtd, p, oob, NULL);
2220 - mtd->ecc_stats.failed++;
2222 - mtd->ecc_stats.corrected += stat;
2223 - max_bitflips = max_t(unsigned int, max_bitflips, stat);
2228 if (chip->ecc.postpad) {
2229 chip->read_buf(mtd, oob, chip->ecc.postpad);
2230 oob += chip->ecc.postpad;
2233 + if (stat == -EBADMSG &&
2234 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2235 + /* check for empty pages with bitflips */
2236 + stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2237 + oob - eccpadbytes,
2240 + chip->ecc.strength);
2244 + mtd->ecc_stats.failed++;
2246 + mtd->ecc_stats.corrected += stat;
2247 + max_bitflips = max_t(unsigned int, max_bitflips, stat);
2251 /* Calculate remaining oob bytes */
2252 @@ -1598,14 +1669,17 @@
2255 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
2256 - * @chip: nand chip structure
2257 + * @mtd: mtd info structure
2258 * @oob: oob destination address
2259 * @ops: oob ops structure
2260 * @len: size of oob to transfer
2262 -static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
2263 +static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
2264 struct mtd_oob_ops *ops, size_t len)
2266 + struct nand_chip *chip = mtd_to_nand(mtd);
2269 switch (ops->mode) {
2271 case MTD_OPS_PLACE_OOB:
2272 @@ -1613,31 +1687,12 @@
2273 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
2276 - case MTD_OPS_AUTO_OOB: {
2277 - struct nand_oobfree *free = chip->ecc.layout->oobfree;
2278 - uint32_t boffs = 0, roffs = ops->ooboffs;
2281 - for (; free->length && len; free++, len -= bytes) {
2282 - /* Read request not from offset 0? */
2283 - if (unlikely(roffs)) {
2284 - if (roffs >= free->length) {
2285 - roffs -= free->length;
2288 - boffs = free->offset + roffs;
2289 - bytes = min_t(size_t, len,
2290 - (free->length - roffs));
2293 - bytes = min_t(size_t, len, free->length);
2294 - boffs = free->offset;
2296 - memcpy(oob, chip->oob_poi + boffs, bytes);
2301 + case MTD_OPS_AUTO_OOB:
2302 + ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
2303 + ops->ooboffs, len);
2310 @@ -1655,7 +1710,7 @@
2312 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
2314 - struct nand_chip *chip = mtd->priv;
2315 + struct nand_chip *chip = mtd_to_nand(mtd);
2317 pr_debug("setting READ RETRY mode %d\n", retry_mode);
2319 @@ -1680,12 +1735,11 @@
2320 struct mtd_oob_ops *ops)
2322 int chipnr, page, realpage, col, bytes, aligned, oob_required;
2323 - struct nand_chip *chip = mtd->priv;
2324 + struct nand_chip *chip = mtd_to_nand(mtd);
2326 uint32_t readlen = ops->len;
2327 uint32_t oobreadlen = ops->ooblen;
2328 - uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
2329 - mtd->oobavail : mtd->oobsize;
2330 + uint32_t max_oobsize = mtd_oobavail(mtd, ops);
2332 uint8_t *bufpoi, *oob, *buf;
2334 @@ -1772,7 +1826,7 @@
2335 int toread = min(oobreadlen, max_oobsize);
2338 - oob = nand_transfer_oob(chip,
2339 + oob = nand_transfer_oob(mtd,
2341 oobreadlen -= toread;
2343 @@ -2024,7 +2078,7 @@
2344 struct mtd_oob_ops *ops)
2346 int page, realpage, chipnr;
2347 - struct nand_chip *chip = mtd->priv;
2348 + struct nand_chip *chip = mtd_to_nand(mtd);
2349 struct mtd_ecc_stats stats;
2350 int readlen = ops->ooblen;
2352 @@ -2036,10 +2090,7 @@
2354 stats = mtd->ecc_stats;
2356 - if (ops->mode == MTD_OPS_AUTO_OOB)
2357 - len = chip->ecc.layout->oobavail;
2359 - len = mtd->oobsize;
2360 + len = mtd_oobavail(mtd, ops);
2362 if (unlikely(ops->ooboffs >= len)) {
2363 pr_debug("%s: attempt to start read outside oob\n",
2364 @@ -2073,7 +2124,7 @@
2367 len = min(len, readlen);
2368 - buf = nand_transfer_oob(chip, buf, ops, len);
2369 + buf = nand_transfer_oob(mtd, buf, ops, len);
2371 if (chip->options & NAND_NEED_READRDY) {
2372 /* Apply delay or wait for ready/busy pin */
2373 @@ -2232,19 +2283,20 @@
2374 const uint8_t *buf, int oob_required,
2377 - int i, eccsize = chip->ecc.size;
2378 + int i, eccsize = chip->ecc.size, ret;
2379 int eccbytes = chip->ecc.bytes;
2380 int eccsteps = chip->ecc.steps;
2381 uint8_t *ecc_calc = chip->buffers->ecccalc;
2382 const uint8_t *p = buf;
2383 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2385 /* Software ECC calculation */
2386 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2387 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2389 - for (i = 0; i < chip->ecc.total; i++)
2390 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2391 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2396 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2398 @@ -2261,12 +2313,11 @@
2399 const uint8_t *buf, int oob_required,
2402 - int i, eccsize = chip->ecc.size;
2403 + int i, eccsize = chip->ecc.size, ret;
2404 int eccbytes = chip->ecc.bytes;
2405 int eccsteps = chip->ecc.steps;
2406 uint8_t *ecc_calc = chip->buffers->ecccalc;
2407 const uint8_t *p = buf;
2408 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2410 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2411 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2412 @@ -2274,8 +2325,10 @@
2413 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2416 - for (i = 0; i < chip->ecc.total; i++)
2417 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2418 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2423 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2425 @@ -2303,11 +2356,10 @@
2426 int ecc_size = chip->ecc.size;
2427 int ecc_bytes = chip->ecc.bytes;
2428 int ecc_steps = chip->ecc.steps;
2429 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2430 uint32_t start_step = offset / ecc_size;
2431 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2432 int oob_bytes = mtd->oobsize / ecc_steps;
2436 for (step = 0; step < ecc_steps; step++) {
2437 /* configure controller for WRITE access */
2438 @@ -2335,8 +2387,10 @@
2439 /* copy calculated ECC for whole page to chip->buffer->oob */
2440 /* this include masked-value(0xFF) for unwritten subpages */
2441 ecc_calc = chip->buffers->ecccalc;
2442 - for (i = 0; i < chip->ecc.total; i++)
2443 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2444 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2449 /* write OOB buffer to NAND device */
2450 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2451 @@ -2472,7 +2526,8 @@
2452 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2453 struct mtd_oob_ops *ops)
2455 - struct nand_chip *chip = mtd->priv;
2456 + struct nand_chip *chip = mtd_to_nand(mtd);
2460 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2461 @@ -2487,31 +2542,12 @@
2462 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2465 - case MTD_OPS_AUTO_OOB: {
2466 - struct nand_oobfree *free = chip->ecc.layout->oobfree;
2467 - uint32_t boffs = 0, woffs = ops->ooboffs;
2470 - for (; free->length && len; free++, len -= bytes) {
2471 - /* Write request not from offset 0? */
2472 - if (unlikely(woffs)) {
2473 - if (woffs >= free->length) {
2474 - woffs -= free->length;
2477 - boffs = free->offset + woffs;
2478 - bytes = min_t(size_t, len,
2479 - (free->length - woffs));
2482 - bytes = min_t(size_t, len, free->length);
2483 - boffs = free->offset;
2485 - memcpy(chip->oob_poi + boffs, oob, bytes);
2490 + case MTD_OPS_AUTO_OOB:
2491 + ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2492 + ops->ooboffs, len);
2499 @@ -2532,12 +2568,11 @@
2500 struct mtd_oob_ops *ops)
2502 int chipnr, realpage, page, blockmask, column;
2503 - struct nand_chip *chip = mtd->priv;
2504 + struct nand_chip *chip = mtd_to_nand(mtd);
2505 uint32_t writelen = ops->len;
2507 uint32_t oobwritelen = ops->ooblen;
2508 - uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
2509 - mtd->oobavail : mtd->oobsize;
2510 + uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2512 uint8_t *oob = ops->oobbuf;
2513 uint8_t *buf = ops->datbuf;
2514 @@ -2662,7 +2697,7 @@
2515 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2516 size_t *retlen, const uint8_t *buf)
2518 - struct nand_chip *chip = mtd->priv;
2519 + struct nand_chip *chip = mtd_to_nand(mtd);
2520 struct mtd_oob_ops ops;
2523 @@ -2722,15 +2757,12 @@
2524 struct mtd_oob_ops *ops)
2526 int chipnr, page, status, len;
2527 - struct nand_chip *chip = mtd->priv;
2528 + struct nand_chip *chip = mtd_to_nand(mtd);
2530 pr_debug("%s: to = 0x%08x, len = %i\n",
2531 __func__, (unsigned int)to, (int)ops->ooblen);
2533 - if (ops->mode == MTD_OPS_AUTO_OOB)
2534 - len = chip->ecc.layout->oobavail;
2536 - len = mtd->oobsize;
2537 + len = mtd_oobavail(mtd, ops);
2539 /* Do not allow write past end of page */
2540 if ((ops->ooboffs + ops->ooblen) > len) {
2541 @@ -2847,7 +2879,7 @@
2543 static int single_erase(struct mtd_info *mtd, int page)
2545 - struct nand_chip *chip = mtd->priv;
2546 + struct nand_chip *chip = mtd_to_nand(mtd);
2547 /* Send commands to erase a block */
2548 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2549 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2550 @@ -2879,7 +2911,7 @@
2553 int page, status, pages_per_block, ret, chipnr;
2554 - struct nand_chip *chip = mtd->priv;
2555 + struct nand_chip *chip = mtd_to_nand(mtd);
2558 pr_debug("%s: start = 0x%012llx, len = %llu\n",
2559 @@ -2918,7 +2950,7 @@
2561 /* Check if we have a bad block, we do not erase bad blocks! */
2562 if (nand_block_checkbad(mtd, ((loff_t) page) <<
2563 - chip->page_shift, 0, allowbbt)) {
2564 + chip->page_shift, allowbbt)) {
2565 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
2567 instr->state = MTD_ERASE_FAILED;
2568 @@ -3005,7 +3037,20 @@
2570 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2572 - return nand_block_checkbad(mtd, offs, 1, 0);
2573 + struct nand_chip *chip = mtd_to_nand(mtd);
2574 + int chipnr = (int)(offs >> chip->chip_shift);
2577 + /* Select the NAND device */
2578 + nand_get_device(mtd, FL_READING);
2579 + chip->select_chip(mtd, chipnr);
2581 + ret = nand_block_checkbad(mtd, offs, 0);
2583 + chip->select_chip(mtd, -1);
2584 + nand_release_device(mtd);
2590 @@ -3094,7 +3139,7 @@
2592 static void nand_resume(struct mtd_info *mtd)
2594 - struct nand_chip *chip = mtd->priv;
2595 + struct nand_chip *chip = mtd_to_nand(mtd);
2597 if (chip->state == FL_PM_SUSPENDED)
2598 nand_release_device(mtd);
2599 @@ -3266,7 +3311,7 @@
2601 static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
2603 - struct nand_chip *chip = mtd->priv;
2604 + struct nand_chip *chip = mtd_to_nand(mtd);
2605 uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
2607 return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
2608 @@ -3937,10 +3982,13 @@
2612 -static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
2613 - struct device_node *dn)
2614 +static int nand_dt_init(struct nand_chip *chip)
2616 - int ecc_mode, ecc_strength, ecc_step;
2617 + struct device_node *dn = nand_get_flash_node(chip);
2618 + int ecc_mode, ecc_algo, ecc_strength, ecc_step;
2623 if (of_get_nand_bus_width(dn) == 16)
2624 chip->options |= NAND_BUSWIDTH_16;
2625 @@ -3949,6 +3997,7 @@
2626 chip->bbt_options |= NAND_BBT_USE_FLASH;
2628 ecc_mode = of_get_nand_ecc_mode(dn);
2629 + ecc_algo = of_get_nand_ecc_algo(dn);
2630 ecc_strength = of_get_nand_ecc_strength(dn);
2631 ecc_step = of_get_nand_ecc_step_size(dn);
2633 @@ -3961,6 +4010,9 @@
2635 chip->ecc.mode = ecc_mode;
2637 + if (ecc_algo >= 0)
2638 + chip->ecc.algo = ecc_algo;
2640 if (ecc_strength >= 0)
2641 chip->ecc.strength = ecc_strength;
2643 @@ -3984,15 +4036,16 @@
2644 struct nand_flash_dev *table)
2646 int i, nand_maf_id, nand_dev_id;
2647 - struct nand_chip *chip = mtd->priv;
2648 + struct nand_chip *chip = mtd_to_nand(mtd);
2649 struct nand_flash_dev *type;
2652 - if (chip->flash_node) {
2653 - ret = nand_dt_init(mtd, chip, chip->flash_node);
2657 + ret = nand_dt_init(chip);
2661 + if (!mtd->name && mtd->dev.parent)
2662 + mtd->name = dev_name(mtd->dev.parent);
2664 if (!mtd->name && mtd->dev.parent)
2665 mtd->name = dev_name(mtd->dev.parent);
2666 @@ -4055,7 +4108,7 @@
2668 static bool nand_ecc_strength_good(struct mtd_info *mtd)
2670 - struct nand_chip *chip = mtd->priv;
2671 + struct nand_chip *chip = mtd_to_nand(mtd);
2672 struct nand_ecc_ctrl *ecc = &chip->ecc;
2675 @@ -4083,10 +4136,10 @@
2677 int nand_scan_tail(struct mtd_info *mtd)
2680 - struct nand_chip *chip = mtd->priv;
2681 + struct nand_chip *chip = mtd_to_nand(mtd);
2682 struct nand_ecc_ctrl *ecc = &chip->ecc;
2683 struct nand_buffers *nbuf;
2686 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
2687 BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
2688 @@ -4113,19 +4166,15 @@
2690 * If no default placement scheme is given, select an appropriate one.
2692 - if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
2693 + if (!mtd->ooblayout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
2694 switch (mtd->oobsize) {
2696 - ecc->layout = &nand_oob_8;
2699 - ecc->layout = &nand_oob_16;
2700 + mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
2703 - ecc->layout = &nand_oob_64;
2706 - ecc->layout = &nand_oob_128;
2707 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
2710 pr_warn("No oob scheme defined for oobsize %d\n",
2711 @@ -4168,7 +4217,7 @@
2712 ecc->write_oob = nand_write_oob_std;
2713 if (!ecc->read_subpage)
2714 ecc->read_subpage = nand_read_subpage;
2715 - if (!ecc->write_subpage)
2716 + if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
2717 ecc->write_subpage = nand_write_subpage_hwecc;
2719 case NAND_ECC_HW_SYNDROME:
2720 @@ -4246,10 +4295,8 @@
2723 /* See nand_bch_init() for details. */
2724 - ecc->bytes = DIV_ROUND_UP(
2725 - ecc->strength * fls(8 * ecc->size), 8);
2726 - ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
2729 + ecc->priv = nand_bch_init(mtd);
2731 pr_warn("BCH ECC initialization failed!\n");
2733 @@ -4280,20 +4327,9 @@
2734 if (!ecc->write_oob_raw)
2735 ecc->write_oob_raw = ecc->write_oob;
2738 - * The number of bytes available for a client to place data into
2739 - * the out of band area.
2741 - ecc->layout->oobavail = 0;
2742 - for (i = 0; ecc->layout->oobfree[i].length
2743 - && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
2744 - ecc->layout->oobavail += ecc->layout->oobfree[i].length;
2745 - mtd->oobavail = ecc->layout->oobavail;
2747 - /* ECC sanity check: warn if it's too weak */
2748 - if (!nand_ecc_strength_good(mtd))
2749 - pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
2751 + /* propagate ecc info to mtd_info */
2752 + mtd->ecc_strength = ecc->strength;
2753 + mtd->ecc_step_size = ecc->size;
2756 * Set the number of read / write steps for one page depending on ECC
2757 @@ -4306,6 +4342,21 @@
2759 ecc->total = ecc->steps * ecc->bytes;
2762 + * The number of bytes available for a client to place data into
2763 + * the out of band area.
2765 + ret = mtd_ooblayout_count_freebytes(mtd);
2769 + mtd->oobavail = ret;
2771 + /* ECC sanity check: warn if it's too weak */
2772 + if (!nand_ecc_strength_good(mtd))
2773 + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
2776 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
2777 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
2778 switch (ecc->steps) {
2779 @@ -4362,10 +4413,6 @@
2780 mtd->_block_markbad = nand_block_markbad;
2781 mtd->writebufsize = mtd->writesize;
2783 - /* propagate ecc info to mtd_info */
2784 - mtd->ecclayout = ecc->layout;
2785 - mtd->ecc_strength = ecc->strength;
2786 - mtd->ecc_step_size = ecc->size;
2788 * Initialize bitflip_threshold to its default prior scan_bbt() call.
2789 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
2790 @@ -4421,7 +4468,7 @@
2792 void nand_release(struct mtd_info *mtd)
2794 - struct nand_chip *chip = mtd->priv;
2795 + struct nand_chip *chip = mtd_to_nand(mtd);
2797 if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
2798 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
2799 --- a/drivers/mtd/nand/nand_bbt.c
2800 +++ b/drivers/mtd/nand/nand_bbt.c
2802 struct nand_bbt_descr *td, int offs)
2804 int res, ret = 0, i, j, act = 0;
2805 - struct nand_chip *this = mtd->priv;
2806 + struct nand_chip *this = mtd_to_nand(mtd);
2807 size_t retlen, len, totlen;
2809 int bits = td->options & NAND_BBT_NRBITS_MSK;
2812 static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
2814 - struct nand_chip *this = mtd->priv;
2815 + struct nand_chip *this = mtd_to_nand(mtd);
2818 if (td->options & NAND_BBT_PERCHIP) {
2820 static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
2821 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
2823 - struct nand_chip *this = mtd->priv;
2824 + struct nand_chip *this = mtd_to_nand(mtd);
2826 /* Read the primary version, if available */
2827 if (td->options & NAND_BBT_VERSION) {
2829 static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
2830 struct nand_bbt_descr *bd, int chip)
2832 - struct nand_chip *this = mtd->priv;
2833 + struct nand_chip *this = mtd_to_nand(mtd);
2834 int i, numblocks, numpages;
2839 static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
2841 - struct nand_chip *this = mtd->priv;
2842 + struct nand_chip *this = mtd_to_nand(mtd);
2844 int startblock, block, dir;
2845 int scanlen = mtd->writesize + mtd->oobsize;
2847 struct nand_bbt_descr *td, struct nand_bbt_descr *md,
2850 - struct nand_chip *this = mtd->priv;
2851 + struct nand_chip *this = mtd_to_nand(mtd);
2852 struct erase_info einfo;
2853 int i, res, chip = 0;
2854 int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
2857 static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2859 - struct nand_chip *this = mtd->priv;
2860 + struct nand_chip *this = mtd_to_nand(mtd);
2862 return create_bbt(mtd, this->buffers->databuf, bd, -1);
2865 static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
2867 int i, chips, writeops, create, chipsel, res, res2;
2868 - struct nand_chip *this = mtd->priv;
2869 + struct nand_chip *this = mtd_to_nand(mtd);
2870 struct nand_bbt_descr *td = this->bbt_td;
2871 struct nand_bbt_descr *md = this->bbt_md;
2872 struct nand_bbt_descr *rd, *rd2;
2875 static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
2877 - struct nand_chip *this = mtd->priv;
2878 + struct nand_chip *this = mtd_to_nand(mtd);
2879 int i, j, chips, block, nrblocks, update;
2882 @@ -1022,7 +1022,7 @@
2884 static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2886 - struct nand_chip *this = mtd->priv;
2887 + struct nand_chip *this = mtd_to_nand(mtd);
2891 @@ -1074,7 +1074,7 @@
2893 static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2895 - struct nand_chip *this = mtd->priv;
2896 + struct nand_chip *this = mtd_to_nand(mtd);
2899 struct nand_bbt_descr *td = this->bbt_td;
2900 @@ -1147,7 +1147,7 @@
2902 static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
2904 - struct nand_chip *this = mtd->priv;
2905 + struct nand_chip *this = mtd_to_nand(mtd);
2909 @@ -1281,7 +1281,7 @@
2911 int nand_default_bbt(struct mtd_info *mtd)
2913 - struct nand_chip *this = mtd->priv;
2914 + struct nand_chip *this = mtd_to_nand(mtd);
2917 /* Is a flash based bad block table requested? */
2918 @@ -1317,7 +1317,7 @@
2920 int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
2922 - struct nand_chip *this = mtd->priv;
2923 + struct nand_chip *this = mtd_to_nand(mtd);
2926 block = (int)(offs >> this->bbt_erase_shift);
2927 @@ -1332,7 +1332,7 @@
2929 int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
2931 - struct nand_chip *this = mtd->priv;
2932 + struct nand_chip *this = mtd_to_nand(mtd);
2935 block = (int)(offs >> this->bbt_erase_shift);
2936 @@ -1359,7 +1359,7 @@
2938 int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
2940 - struct nand_chip *this = mtd->priv;
2941 + struct nand_chip *this = mtd_to_nand(mtd);
2944 block = (int)(offs >> this->bbt_erase_shift);
2945 @@ -1373,5 +1373,3 @@
2950 -EXPORT_SYMBOL(nand_scan_bbt);
2951 --- a/drivers/mtd/nand/nand_bch.c
2952 +++ b/drivers/mtd/nand/nand_bch.c
2955 * struct nand_bch_control - private NAND BCH control structure
2956 * @bch: BCH control structure
2957 - * @ecclayout: private ecc layout for this BCH configuration
2958 * @errloc: error location array
2959 * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
2961 struct nand_bch_control {
2962 struct bch_control *bch;
2963 - struct nand_ecclayout ecclayout;
2964 unsigned int *errloc;
2965 unsigned char *eccmask;
2968 int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
2969 unsigned char *code)
2971 - const struct nand_chip *chip = mtd->priv;
2972 + const struct nand_chip *chip = mtd_to_nand(mtd);
2973 struct nand_bch_control *nbc = chip->ecc.priv;
2977 int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
2978 unsigned char *read_ecc, unsigned char *calc_ecc)
2980 - const struct nand_chip *chip = mtd->priv;
2981 + const struct nand_chip *chip = mtd_to_nand(mtd);
2982 struct nand_bch_control *nbc = chip->ecc.priv;
2983 unsigned int *errloc = nbc->errloc;
2987 } else if (count < 0) {
2988 printk(KERN_ERR "ecc unrecoverable error\n");
2996 * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
2997 * @mtd: MTD block structure
2998 - * @eccsize: ecc block size in bytes
2999 - * @eccbytes: ecc length in bytes
3000 - * @ecclayout: output default layout
3003 * a pointer to a new NAND BCH control structure, or NULL upon failure
3004 @@ -123,14 +118,20 @@
3005 * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
3006 * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
3008 -struct nand_bch_control *
3009 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
3010 - struct nand_ecclayout **ecclayout)
3011 +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
3013 + struct nand_chip *nand = mtd_to_nand(mtd);
3014 unsigned int m, t, eccsteps, i;
3015 - struct nand_ecclayout *layout;
3016 struct nand_bch_control *nbc = NULL;
3017 unsigned char *erased_page;
3018 + unsigned int eccsize = nand->ecc.size;
3019 + unsigned int eccbytes = nand->ecc.bytes;
3020 + unsigned int eccstrength = nand->ecc.strength;
3022 + if (!eccbytes && eccstrength) {
3023 + eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
3024 + nand->ecc.bytes = eccbytes;
3027 if (!eccsize || !eccbytes) {
3028 printk(KERN_WARNING "ecc parameters not supplied\n");
3030 eccsteps = mtd->writesize/eccsize;
3032 /* if no ecc placement scheme was provided, build one */
3033 - if (!*ecclayout) {
3034 + if (!mtd->ooblayout) {
3036 /* handle large page devices only */
3037 if (mtd->oobsize < 64) {
3038 @@ -167,24 +168,7 @@
3042 - layout = &nbc->ecclayout;
3043 - layout->eccbytes = eccsteps*eccbytes;
3045 - /* reserve 2 bytes for bad block marker */
3046 - if (layout->eccbytes+2 > mtd->oobsize) {
3047 - printk(KERN_WARNING "no suitable oob scheme available "
3048 - "for oobsize %d eccbytes %u\n", mtd->oobsize,
3052 - /* put ecc bytes at oob tail */
3053 - for (i = 0; i < layout->eccbytes; i++)
3054 - layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
3056 - layout->oobfree[0].offset = 2;
3057 - layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
3059 - *ecclayout = layout;
3060 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
3065 printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
3068 - if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
3070 + if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
3071 printk(KERN_WARNING "invalid ecc layout\n");
3075 for (i = 0; i < eccbytes; i++)
3076 nbc->eccmask[i] ^= 0xff;
3079 + nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
3084 --- a/drivers/mtd/nand/nand_ecc.c
3085 +++ b/drivers/mtd/nand/nand_ecc.c
3087 unsigned char *code)
3089 __nand_calculate_ecc(buf,
3090 - ((struct nand_chip *)mtd->priv)->ecc.size, code);
3091 + mtd_to_nand(mtd)->ecc.size, code);
3096 unsigned char *read_ecc, unsigned char *calc_ecc)
3098 return __nand_correct_data(buf, read_ecc, calc_ecc,
3099 - ((struct nand_chip *)mtd->priv)->ecc.size);
3100 + mtd_to_nand(mtd)->ecc.size);
3102 EXPORT_SYMBOL(nand_correct_data);
3104 --- a/drivers/mtd/nand/nand_ids.c
3105 +++ b/drivers/mtd/nand/nand_ids.c
3107 SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
3108 {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
3109 { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
3110 - SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
3112 + SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
3113 + NAND_ECC_INFO(40, SZ_1K), 4 },
3115 LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
3116 LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
3117 --- a/drivers/mtd/nand/nandsim.c
3118 +++ b/drivers/mtd/nand/nandsim.c
3121 static int init_nandsim(struct mtd_info *mtd)
3123 - struct nand_chip *chip = mtd->priv;
3124 - struct nandsim *ns = chip->priv;
3125 + struct nand_chip *chip = mtd_to_nand(mtd);
3126 + struct nandsim *ns = nand_get_controller_data(chip);
3129 uint64_t next_offset;
3130 @@ -1908,7 +1908,8 @@
3132 static u_char ns_nand_read_byte(struct mtd_info *mtd)
3134 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3135 + struct nand_chip *chip = mtd_to_nand(mtd);
3136 + struct nandsim *ns = nand_get_controller_data(chip);
3139 /* Sanity and correctness checks */
3140 @@ -1969,7 +1970,8 @@
3142 static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
3144 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3145 + struct nand_chip *chip = mtd_to_nand(mtd);
3146 + struct nandsim *ns = nand_get_controller_data(chip);
3148 /* Sanity and correctness checks */
3149 if (!ns->lines.ce) {
3150 @@ -2123,7 +2125,8 @@
3152 static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
3154 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3155 + struct nand_chip *chip = mtd_to_nand(mtd);
3156 + struct nandsim *ns = nand_get_controller_data(chip);
3158 ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
3159 ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
3160 @@ -2141,7 +2144,7 @@
3162 static uint16_t ns_nand_read_word(struct mtd_info *mtd)
3164 - struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3165 + struct nand_chip *chip = mtd_to_nand(mtd);
3167 NS_DBG("read_word\n");
3169 @@ -2150,7 +2153,8 @@
3171 static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
3173 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3174 + struct nand_chip *chip = mtd_to_nand(mtd);
3175 + struct nandsim *ns = nand_get_controller_data(chip);
3177 /* Check that chip is expecting data input */
3178 if (!(ns->state & STATE_DATAIN_MASK)) {
3179 @@ -2177,7 +2181,8 @@
3181 static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
3183 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3184 + struct nand_chip *chip = mtd_to_nand(mtd);
3185 + struct nandsim *ns = nand_get_controller_data(chip);
3187 /* Sanity and correctness checks */
3188 if (!ns->lines.ce) {
3189 @@ -2198,7 +2203,7 @@
3192 for (i = 0; i < len; i++)
3193 - buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
3194 + buf[i] = mtd_to_nand(mtd)->read_byte(mtd);
3198 @@ -2236,16 +2241,15 @@
3201 /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
3202 - nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
3203 - + sizeof(struct nandsim), GFP_KERNEL);
3205 + chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
3208 NS_ERR("unable to allocate core structures.\n");
3211 - chip = (struct nand_chip *)(nsmtd + 1);
3212 - nsmtd->priv = (void *)chip;
3213 + nsmtd = nand_to_mtd(chip);
3214 nand = (struct nandsim *)(chip + 1);
3215 - chip->priv = (void *)nand;
3216 + nand_set_controller_data(chip, (void *)nand);
3219 * Register simulator's callbacks.
3220 @@ -2257,6 +2261,7 @@
3221 chip->read_buf = ns_nand_read_buf;
3222 chip->read_word = ns_nand_read_word;
3223 chip->ecc.mode = NAND_ECC_SOFT;
3224 + chip->ecc.algo = NAND_ECC_HAMMING;
3225 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
3226 /* and 'badblocks' parameters to work */
3227 chip->options |= NAND_SKIP_BBTSCAN;
3228 @@ -2335,6 +2340,7 @@
3231 chip->ecc.mode = NAND_ECC_SOFT_BCH;
3232 + chip->ecc.algo = NAND_ECC_BCH;
3233 chip->ecc.size = 512;
3234 chip->ecc.strength = bch;
3235 chip->ecc.bytes = eccbytes;
3236 @@ -2392,7 +2398,7 @@
3237 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
3238 kfree(nand->partitions[i].name);
3245 @@ -2405,7 +2411,8 @@
3247 static void __exit ns_cleanup_module(void)
3249 - struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
3250 + struct nand_chip *chip = mtd_to_nand(nsmtd);
3251 + struct nandsim *ns = nand_get_controller_data(chip);
3254 nandsim_debugfs_remove(ns);
3255 @@ -2413,7 +2420,7 @@
3256 nand_release(nsmtd); /* Unregister driver */
3257 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
3258 kfree(ns->partitions[i].name);
3259 - kfree(nsmtd); /* Free other structures */
3260 + kfree(mtd_to_nand(nsmtd)); /* Free other structures */
3264 --- a/drivers/mtd/ofpart.c
3265 +++ b/drivers/mtd/ofpart.c
3269 static int parse_ofpart_partitions(struct mtd_info *master,
3270 - struct mtd_partition **pparts,
3271 + const struct mtd_partition **pparts,
3272 struct mtd_part_parser_data *data)
3274 + struct mtd_partition *parts;
3275 struct device_node *mtd_node;
3276 struct device_node *ofpart_node;
3277 const char *partname;
3279 bool dedicated = true;
3285 - mtd_node = data->of_node;
3286 + /* Pull of_node from the master device node */
3287 + mtd_node = mtd_get_of_node(master);
3295 - *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
3297 + parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
3302 @@ -107,19 +106,19 @@
3306 - (*pparts)[i].offset = of_read_number(reg, a_cells);
3307 - (*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
3308 + parts[i].offset = of_read_number(reg, a_cells);
3309 + parts[i].size = of_read_number(reg + a_cells, s_cells);
3311 partname = of_get_property(pp, "label", &len);
3313 partname = of_get_property(pp, "name", &len);
3314 - (*pparts)[i].name = partname;
3315 + parts[i].name = partname;
3317 if (of_get_property(pp, "read-only", &len))
3318 - (*pparts)[i].mask_flags |= MTD_WRITEABLE;
3319 + parts[i].mask_flags |= MTD_WRITEABLE;
3321 if (of_get_property(pp, "lock", &len))
3322 - (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
3323 + parts[i].mask_flags |= MTD_POWERUP_LOCK;
3335 @@ -135,21 +135,20 @@
3345 static struct mtd_part_parser ofpart_parser = {
3346 - .owner = THIS_MODULE,
3347 .parse_fn = parse_ofpart_partitions,
3351 static int parse_ofoldpart_partitions(struct mtd_info *master,
3352 - struct mtd_partition **pparts,
3353 + const struct mtd_partition **pparts,
3354 struct mtd_part_parser_data *data)
3356 + struct mtd_partition *parts;
3357 struct device_node *dp;
3358 int i, plen, nr_parts;
3360 @@ -157,10 +156,8 @@
3367 - dp = data->of_node;
3368 + /* Pull of_node from the master device node */
3369 + dp = mtd_get_of_node(master);
3373 @@ -173,37 +170,37 @@
3375 nr_parts = plen / sizeof(part[0]);
3377 - *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
3379 + parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
3383 names = of_get_property(dp, "partition-names", &plen);
3385 for (i = 0; i < nr_parts; i++) {
3386 - (*pparts)[i].offset = be32_to_cpu(part->offset);
3387 - (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
3388 + parts[i].offset = be32_to_cpu(part->offset);
3389 + parts[i].size = be32_to_cpu(part->len) & ~1;
3390 /* bit 0 set signifies read only partition */
3391 if (be32_to_cpu(part->len) & 1)
3392 - (*pparts)[i].mask_flags = MTD_WRITEABLE;
3393 + parts[i].mask_flags = MTD_WRITEABLE;
3395 if (names && (plen > 0)) {
3396 int len = strlen(names) + 1;
3398 - (*pparts)[i].name = names;
3399 + parts[i].name = names;
3403 - (*pparts)[i].name = "unnamed";
3404 + parts[i].name = "unnamed";
3414 static struct mtd_part_parser ofoldpart_parser = {
3415 - .owner = THIS_MODULE,
3416 .parse_fn = parse_ofoldpart_partitions,
3417 .name = "ofoldpart",
3419 --- a/drivers/mtd/spi-nor/Kconfig
3420 +++ b/drivers/mtd/spi-nor/Kconfig
3425 +config MTD_MT81xx_NOR
3426 + tristate "Mediatek MT81xx SPI NOR flash controller"
3427 + depends on HAS_IOMEM
3429 + This enables access to SPI NOR flash, using MT81xx SPI NOR flash
3430 + controller. This controller does not support generic SPI BUS, it only
3431 + supports SPI NOR Flash.
3433 config MTD_SPI_NOR_USE_4K_SECTORS
3434 bool "Use small 4096 B erase sectors"
3438 config SPI_FSL_QUADSPI
3439 tristate "Freescale Quad SPI controller"
3440 - depends on ARCH_MXC || COMPILE_TEST
3441 + depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
3442 depends on HAS_IOMEM
3444 This enables support for the Quad SPI controller in master mode.
3445 --- a/drivers/mtd/spi-nor/Makefile
3446 +++ b/drivers/mtd/spi-nor/Makefile
3448 obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
3449 obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
3450 +obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
3451 obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
3453 +++ b/drivers/mtd/spi-nor/mtk-quadspi.c
3456 + * Copyright (c) 2015 MediaTek Inc.
3457 + * Author: Bayi Cheng <bayi.cheng@mediatek.com>
3459 + * This program is free software; you can redistribute it and/or modify
3460 + * it under the terms of the GNU General Public License version 2 as
3461 + * published by the Free Software Foundation.
3463 + * This program is distributed in the hope that it will be useful,
3464 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3465 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3466 + * GNU General Public License for more details.
3469 +#include <linux/clk.h>
3470 +#include <linux/delay.h>
3471 +#include <linux/device.h>
3472 +#include <linux/init.h>
3473 +#include <linux/io.h>
3474 +#include <linux/iopoll.h>
3475 +#include <linux/ioport.h>
3476 +#include <linux/math64.h>
3477 +#include <linux/module.h>
3478 +#include <linux/mtd/mtd.h>
3479 +#include <linux/mutex.h>
3480 +#include <linux/of.h>
3481 +#include <linux/of_device.h>
3482 +#include <linux/pinctrl/consumer.h>
3483 +#include <linux/platform_device.h>
3484 +#include <linux/slab.h>
3485 +#include <linux/mtd/mtd.h>
3486 +#include <linux/mtd/partitions.h>
3487 +#include <linux/mtd/spi-nor.h>
3489 +#define MTK_NOR_CMD_REG 0x00
3490 +#define MTK_NOR_CNT_REG 0x04
3491 +#define MTK_NOR_RDSR_REG 0x08
3492 +#define MTK_NOR_RDATA_REG 0x0c
3493 +#define MTK_NOR_RADR0_REG 0x10
3494 +#define MTK_NOR_RADR1_REG 0x14
3495 +#define MTK_NOR_RADR2_REG 0x18
3496 +#define MTK_NOR_WDATA_REG 0x1c
3497 +#define MTK_NOR_PRGDATA0_REG 0x20
3498 +#define MTK_NOR_PRGDATA1_REG 0x24
3499 +#define MTK_NOR_PRGDATA2_REG 0x28
3500 +#define MTK_NOR_PRGDATA3_REG 0x2c
3501 +#define MTK_NOR_PRGDATA4_REG 0x30
3502 +#define MTK_NOR_PRGDATA5_REG 0x34
3503 +#define MTK_NOR_SHREG0_REG 0x38
3504 +#define MTK_NOR_SHREG1_REG 0x3c
3505 +#define MTK_NOR_SHREG2_REG 0x40
3506 +#define MTK_NOR_SHREG3_REG 0x44
3507 +#define MTK_NOR_SHREG4_REG 0x48
3508 +#define MTK_NOR_SHREG5_REG 0x4c
3509 +#define MTK_NOR_SHREG6_REG 0x50
3510 +#define MTK_NOR_SHREG7_REG 0x54
3511 +#define MTK_NOR_SHREG8_REG 0x58
3512 +#define MTK_NOR_SHREG9_REG 0x5c
3513 +#define MTK_NOR_CFG1_REG 0x60
3514 +#define MTK_NOR_CFG2_REG 0x64
3515 +#define MTK_NOR_CFG3_REG 0x68
3516 +#define MTK_NOR_STATUS0_REG 0x70
3517 +#define MTK_NOR_STATUS1_REG 0x74
3518 +#define MTK_NOR_STATUS2_REG 0x78
3519 +#define MTK_NOR_STATUS3_REG 0x7c
3520 +#define MTK_NOR_FLHCFG_REG 0x84
3521 +#define MTK_NOR_TIME_REG 0x94
3522 +#define MTK_NOR_PP_DATA_REG 0x98
3523 +#define MTK_NOR_PREBUF_STUS_REG 0x9c
3524 +#define MTK_NOR_DELSEL0_REG 0xa0
3525 +#define MTK_NOR_DELSEL1_REG 0xa4
3526 +#define MTK_NOR_INTRSTUS_REG 0xa8
3527 +#define MTK_NOR_INTREN_REG 0xac
3528 +#define MTK_NOR_CHKSUM_CTL_REG 0xb8
3529 +#define MTK_NOR_CHKSUM_REG 0xbc
3530 +#define MTK_NOR_CMD2_REG 0xc0
3531 +#define MTK_NOR_WRPROT_REG 0xc4
3532 +#define MTK_NOR_RADR3_REG 0xc8
3533 +#define MTK_NOR_DUAL_REG 0xcc
3534 +#define MTK_NOR_DELSEL2_REG 0xd0
3535 +#define MTK_NOR_DELSEL3_REG 0xd4
3536 +#define MTK_NOR_DELSEL4_REG 0xd8
3538 +/* commands for mtk nor controller */
3539 +#define MTK_NOR_READ_CMD 0x0
3540 +#define MTK_NOR_RDSR_CMD 0x2
3541 +#define MTK_NOR_PRG_CMD 0x4
3542 +#define MTK_NOR_WR_CMD 0x10
3543 +#define MTK_NOR_PIO_WR_CMD 0x90
3544 +#define MTK_NOR_WRSR_CMD 0x20
3545 +#define MTK_NOR_PIO_READ_CMD 0x81
3546 +#define MTK_NOR_WR_BUF_ENABLE 0x1
3547 +#define MTK_NOR_WR_BUF_DISABLE 0x0
3548 +#define MTK_NOR_ENABLE_SF_CMD 0x30
3549 +#define MTK_NOR_DUAD_ADDR_EN 0x8
3550 +#define MTK_NOR_QUAD_READ_EN 0x4
3551 +#define MTK_NOR_DUAL_ADDR_EN 0x2
3552 +#define MTK_NOR_DUAL_READ_EN 0x1
3553 +#define MTK_NOR_DUAL_DISABLE 0x0
3554 +#define MTK_NOR_FAST_READ 0x1
3556 +#define SFLASH_WRBUF_SIZE 128
3558 +/* Can shift up to 48 bits (6 bytes) of TX/RX */
3559 +#define MTK_NOR_MAX_RX_TX_SHIFT 6
3560 +/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */
3561 +#define MTK_NOR_MAX_SHIFT 7
3563 +/* Helpers for accessing the program data / shift data registers */
3564 +#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
3565 +#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
3567 +struct mt8173_nor {
3568 + struct spi_nor nor;
3569 + struct device *dev;
3570 + void __iomem *base; /* nor flash base address */
3571 + struct clk *spi_clk;
3572 + struct clk *nor_clk;
3575 +static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
3577 + struct spi_nor *nor = &mt8173_nor->nor;
3579 + switch (nor->flash_read) {
3580 + case SPI_NOR_FAST:
3581 + writeb(nor->read_opcode, mt8173_nor->base +
3582 + MTK_NOR_PRGDATA3_REG);
3583 + writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
3584 + MTK_NOR_CFG1_REG);
3586 + case SPI_NOR_DUAL:
3587 + writeb(nor->read_opcode, mt8173_nor->base +
3588 + MTK_NOR_PRGDATA3_REG);
3589 + writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
3590 + MTK_NOR_DUAL_REG);
3592 + case SPI_NOR_QUAD:
3593 + writeb(nor->read_opcode, mt8173_nor->base +
3594 + MTK_NOR_PRGDATA4_REG);
3595 + writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
3596 + MTK_NOR_DUAL_REG);
3599 + writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
3600 + MTK_NOR_DUAL_REG);
3605 +static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
3608 + u8 val = cmdval & 0x1f;
3610 + writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
3611 + return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
3612 + !(reg & val), 100, 10000);
3615 +static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
3616 + u8 *tx, int txlen, u8 *rx, int rxlen)
3618 + int len = 1 + txlen + rxlen;
3621 + if (len > MTK_NOR_MAX_SHIFT)
3624 + writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
3626 + /* start at PRGDATA5, go down to PRGDATA0 */
3627 + idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
3630 + writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3633 + /* program TX data */
3634 + for (i = 0; i < txlen; i++, idx--)
3635 + writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3637 + /* clear out rest of TX registers */
3638 + while (idx >= 0) {
3639 + writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3643 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
3647 + /* restart at first RX byte */
3650 + /* read out RX data */
3651 + for (i = 0; i < rxlen; i++, idx--)
3652 + rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
3657 +/* Do a WRSR (Write Status Register) command */
3658 +static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
3660 + writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
3661 + writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
3662 + return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
3665 +static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
3669 + /* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer
3670 + * 0: pre-fetch buffer use for read
3671 + * 1: pre-fetch buffer use for page program
3673 + writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
3674 + return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
3675 + 0x01 == (reg & 0x01), 100, 10000);
3678 +static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
3682 + writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
3683 + return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
3684 + MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
3688 +static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
3692 + for (i = 0; i < 3; i++) {
3693 + writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
3696 + /* Last register is non-contiguous */
3697 + writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
3700 +static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
3701 + size_t *retlen, u_char *buffer)
3704 + int addr = (int)from;
3705 + u8 *buf = (u8 *)buffer;
3706 + struct mt8173_nor *mt8173_nor = nor->priv;
3708 + /* set mode for fast read mode ,dual mode or quad mode */
3709 + mt8173_nor_set_read_mode(mt8173_nor);
3710 + mt8173_nor_set_addr(mt8173_nor, addr);
3712 + for (i = 0; i < length; i++, (*retlen)++) {
3713 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
3716 + buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
3721 +static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
3722 + int addr, int length, u8 *data)
3726 + mt8173_nor_set_addr(mt8173_nor, addr);
3728 + for (i = 0; i < length; i++) {
3729 + writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
3730 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
3737 +static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
3740 + int i, bufidx, data;
3742 + mt8173_nor_set_addr(mt8173_nor, addr);
3745 + for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
3746 + data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
3747 + buf[bufidx + 1]<<8 | buf[bufidx];
3749 + writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
3751 + return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
3754 +static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
3755 + size_t *retlen, const u_char *buf)
3758 + struct mt8173_nor *mt8173_nor = nor->priv;
3760 + ret = mt8173_nor_write_buffer_enable(mt8173_nor);
3762 + dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
3764 + while (len >= SFLASH_WRBUF_SIZE) {
3765 + ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
3767 + dev_err(mt8173_nor->dev, "write buffer failed!\n");
3768 + len -= SFLASH_WRBUF_SIZE;
3769 + to += SFLASH_WRBUF_SIZE;
3770 + buf += SFLASH_WRBUF_SIZE;
3771 + (*retlen) += SFLASH_WRBUF_SIZE;
3773 + ret = mt8173_nor_write_buffer_disable(mt8173_nor);
3775 + dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
3778 + ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len,
3781 + dev_err(mt8173_nor->dev, "write single byte failed!\n");
3786 +static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
3789 + struct mt8173_nor *mt8173_nor = nor->priv;
3792 + case SPINOR_OP_RDSR:
3793 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
3797 + *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
3799 + dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
3802 + ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
3808 +static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
3812 + struct mt8173_nor *mt8173_nor = nor->priv;
3815 + case SPINOR_OP_WRSR:
3816 + /* We only handle 1 byte */
3817 + ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
3820 + ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
3822 + dev_warn(mt8173_nor->dev, "write reg failure!\n");
3828 +static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
3829 + struct device_node *flash_node)
3832 + struct spi_nor *nor;
3834 + /* initialize controller to accept commands */
3835 + writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
3837 + nor = &mt8173_nor->nor;
3838 + nor->dev = mt8173_nor->dev;
3839 + nor->priv = mt8173_nor;
3840 + spi_nor_set_flash_node(nor, flash_node);
3842 + /* fill the hooks to spi nor */
3843 + nor->read = mt8173_nor_read;
3844 + nor->read_reg = mt8173_nor_read_reg;
3845 + nor->write = mt8173_nor_write;
3846 + nor->write_reg = mt8173_nor_write_reg;
3847 + nor->mtd.name = "mtk_nor";
3848 + /* initialized with NULL */
3849 + ret = spi_nor_scan(nor, NULL, SPI_NOR_DUAL);
3853 + return mtd_device_register(&nor->mtd, NULL, 0);
3856 +static int mtk_nor_drv_probe(struct platform_device *pdev)
3858 + struct device_node *flash_np;
3859 + struct resource *res;
3861 + struct mt8173_nor *mt8173_nor;
3863 + if (!pdev->dev.of_node) {
3864 + dev_err(&pdev->dev, "No DT found\n");
3868 + mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
3871 + platform_set_drvdata(pdev, mt8173_nor);
3873 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3874 + mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
3875 + if (IS_ERR(mt8173_nor->base))
3876 + return PTR_ERR(mt8173_nor->base);
3878 + mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
3879 + if (IS_ERR(mt8173_nor->spi_clk))
3880 + return PTR_ERR(mt8173_nor->spi_clk);
3882 + mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
3883 + if (IS_ERR(mt8173_nor->nor_clk))
3884 + return PTR_ERR(mt8173_nor->nor_clk);
3886 + mt8173_nor->dev = &pdev->dev;
3887 + ret = clk_prepare_enable(mt8173_nor->spi_clk);
3891 + ret = clk_prepare_enable(mt8173_nor->nor_clk);
3893 + clk_disable_unprepare(mt8173_nor->spi_clk);
3896 + /* only support one attached flash */
3897 + flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
3899 + dev_err(&pdev->dev, "no SPI flash device to configure\n");
3903 + ret = mtk_nor_init(mt8173_nor, flash_np);
3907 + clk_disable_unprepare(mt8173_nor->spi_clk);
3908 + clk_disable_unprepare(mt8173_nor->nor_clk);
3913 +static int mtk_nor_drv_remove(struct platform_device *pdev)
3915 + struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
3917 + clk_disable_unprepare(mt8173_nor->spi_clk);
3918 + clk_disable_unprepare(mt8173_nor->nor_clk);
3922 +static const struct of_device_id mtk_nor_of_ids[] = {
3923 + { .compatible = "mediatek,mt8173-nor"},
3924 + { /* sentinel */ }
3926 +MODULE_DEVICE_TABLE(of, mtk_nor_of_ids);
3928 +static struct platform_driver mtk_nor_driver = {
3929 + .probe = mtk_nor_drv_probe,
3930 + .remove = mtk_nor_drv_remove,
3932 + .name = "mtk-nor",
3933 + .of_match_table = mtk_nor_of_ids,
3937 +module_platform_driver(mtk_nor_driver);
3938 +MODULE_LICENSE("GPL v2");
3939 +MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver");
3940 --- a/drivers/mtd/spi-nor/spi-nor.c
3941 +++ b/drivers/mtd/spi-nor/spi-nor.c
3943 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
3945 #define SPI_NOR_MAX_ID_LEN 6
3946 +#define SPI_NOR_MAX_ADDR_WIDTH 4
3954 -#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
3955 -#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
3956 -#define SST_WRITE 0x04 /* use SST byte programming */
3957 -#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
3958 -#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
3959 -#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
3960 -#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
3961 -#define USE_FSR 0x80 /* use flag status register */
3962 +#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
3963 +#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
3964 +#define SST_WRITE BIT(2) /* use SST byte programming */
3965 +#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
3966 +#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
3967 +#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
3968 +#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
3969 +#define USE_FSR BIT(7) /* use flag status register */
3970 +#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
3971 +#define SPI_NOR_HAS_TB BIT(9) /*
3972 + * Flash SR has Top/Bottom (TB) protect
3973 + * bit. Must be used with
3974 + * SPI_NOR_HAS_LOCK.
3978 #define JEDEC_MFR(info) ((info)->id[0])
3979 @@ -313,6 +320,29 @@
3983 + * Initiate the erasure of a single sector
3985 +static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
3987 + u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
3991 + return nor->erase(nor, addr);
3994 + * Default implementation, if driver doesn't have a specialized HW
3997 + for (i = nor->addr_width - 1; i >= 0; i--) {
3998 + buf[i] = addr & 0xff;
4002 + return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
4006 * Erase an address range on the nor chip. The address range may extend
4007 * one or more erase sectors. Return an error is there is a problem erasing.
4009 @@ -371,10 +401,9 @@
4013 - if (nor->erase(nor, addr)) {
4015 + ret = spi_nor_erase_sector(nor, addr);
4020 addr += mtd->erasesize;
4021 len -= mtd->erasesize;
4022 @@ -387,17 +416,13 @@
4027 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
4029 - instr->state = MTD_ERASE_DONE;
4030 + instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
4031 mtd_erase_callback(instr);
4036 - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
4037 - instr->state = MTD_ERASE_FAILED;
4041 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
4042 @@ -415,32 +440,58 @@
4044 pow = ((sr & mask) ^ mask) >> shift;
4045 *len = mtd->size >> pow;
4046 - *ofs = mtd->size - *len;
4047 + if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
4050 + *ofs = mtd->size - *len;
4055 - * Return 1 if the entire region is locked, 0 otherwise
4056 + * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
4057 + * @locked is false); 0 otherwise
4059 -static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4061 +static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4062 + u8 sr, bool locked)
4070 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
4072 - return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
4074 + /* Requested range is a sub-range of locked range */
4075 + return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
4077 + /* Requested range does not overlap with locked range */
4078 + return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
4081 +static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4084 + return stm_check_lock_status_sr(nor, ofs, len, sr, true);
4087 +static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4090 + return stm_check_lock_status_sr(nor, ofs, len, sr, false);
4094 * Lock a region of the flash. Compatible with ST Micro and similar flash.
4095 - * Supports only the block protection bits BP{0,1,2} in the status register
4096 + * Supports the block protection bits BP{0,1,2} in the status register
4097 * (SR). Does not support these features found in newer SR bitfields:
4098 - * - TB: top/bottom protect - only handle TB=0 (top protect)
4099 * - SEC: sector/block protect - only handle SEC=0 (block protect)
4100 * - CMP: complement protect - only support CMP=0 (range is not complemented)
4102 + * Support for the following is provided conditionally for some flash:
4103 + * - TB: top/bottom protect
4105 * Sample table portion for 8MB flash (Winbond w25q64fw):
4107 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
4108 @@ -453,26 +504,55 @@
4109 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
4110 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
4111 * X | X | 1 | 1 | 1 | 8 MB | ALL
4112 + * ------|-------|-------|-------|-------|---------------|-------------------
4113 + * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
4114 + * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
4115 + * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
4116 + * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
4117 + * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
4118 + * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
4120 * Returns negative on errors, 0 on success.
4122 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
4124 struct mtd_info *mtd = &nor->mtd;
4125 - u8 status_old, status_new;
4126 + int status_old, status_new;
4127 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
4128 u8 shift = ffs(mask) - 1, pow, val;
4130 + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
4134 status_old = read_sr(nor);
4135 + if (status_old < 0)
4136 + return status_old;
4138 - /* SPI NOR always locks to the end */
4139 - if (ofs + len != mtd->size) {
4140 - /* Does combined region extend to end? */
4141 - if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
4144 - len = mtd->size - ofs;
4146 + /* If nothing in our range is unlocked, we don't need to do anything */
4147 + if (stm_is_locked_sr(nor, ofs, len, status_old))
4150 + /* If anything below us is unlocked, we can't use 'bottom' protection */
4151 + if (!stm_is_locked_sr(nor, 0, ofs, status_old))
4152 + can_be_bottom = false;
4154 + /* If anything above us is unlocked, we can't use 'top' protection */
4155 + if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
4157 + can_be_top = false;
4159 + if (!can_be_bottom && !can_be_top)
4162 + /* Prefer top, if both are valid */
4163 + use_top = can_be_top;
4165 + /* lock_len: length of region that should end up locked */
4167 + lock_len = mtd->size - ofs;
4169 + lock_len = ofs + len;
4172 * Need smallest pow such that:
4175 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
4177 - pow = ilog2(mtd->size) - ilog2(len);
4178 + pow = ilog2(mtd->size) - ilog2(lock_len);
4179 val = mask - (pow << shift);
4182 @@ -491,14 +571,27 @@
4186 - status_new = (status_old & ~mask) | val;
4187 + status_new = (status_old & ~mask & ~SR_TB) | val;
4189 + /* Disallow further writes if WP pin is asserted */
4190 + status_new |= SR_SRWD;
4193 + status_new |= SR_TB;
4195 + /* Don't bother if they're the same */
4196 + if (status_new == status_old)
4199 /* Only modify protection if it will not unlock other areas */
4200 - if ((status_new & mask) <= (status_old & mask))
4201 + if ((status_new & mask) < (status_old & mask))
4205 - return write_sr(nor, status_new);
4206 + ret = write_sr(nor, status_new);
4209 + return spi_nor_wait_till_ready(nor);
4213 @@ -509,17 +602,43 @@
4214 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
4216 struct mtd_info *mtd = &nor->mtd;
4217 - uint8_t status_old, status_new;
4218 + int status_old, status_new;
4219 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
4220 u8 shift = ffs(mask) - 1, pow, val;
4222 + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
4226 status_old = read_sr(nor);
4227 + if (status_old < 0)
4228 + return status_old;
4230 + /* If nothing in our range is locked, we don't need to do anything */
4231 + if (stm_is_unlocked_sr(nor, ofs, len, status_old))
4234 + /* If anything below us is locked, we can't use 'top' protection */
4235 + if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
4236 + can_be_top = false;
4238 + /* If anything above us is locked, we can't use 'bottom' protection */
4239 + if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
4241 + can_be_bottom = false;
4243 - /* Cannot unlock; would unlock larger region than requested */
4244 - if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
4246 + if (!can_be_bottom && !can_be_top)
4249 + /* Prefer top, if both are valid */
4250 + use_top = can_be_top;
4252 + /* lock_len: length of region that should remain locked */
4254 + lock_len = mtd->size - (ofs + len);
4259 * Need largest pow such that:
4263 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
4265 - pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
4266 - if (ofs + len == mtd->size) {
4267 + pow = ilog2(mtd->size) - order_base_2(lock_len);
4268 + if (lock_len == 0) {
4269 val = 0; /* fully unlocked */
4271 val = mask - (pow << shift);
4272 @@ -539,14 +658,28 @@
4276 - status_new = (status_old & ~mask) | val;
4277 + status_new = (status_old & ~mask & ~SR_TB) | val;
4279 + /* Don't protect status register if we're fully unlocked */
4280 + if (lock_len == mtd->size)
4281 + status_new &= ~SR_SRWD;
4284 + status_new |= SR_TB;
4286 + /* Don't bother if they're the same */
4287 + if (status_new == status_old)
4290 /* Only modify protection if it will not lock other areas */
4291 - if ((status_new & mask) >= (status_old & mask))
4292 + if ((status_new & mask) > (status_old & mask))
4296 - return write_sr(nor, status_new);
4297 + ret = write_sr(nor, status_new);
4300 + return spi_nor_wait_till_ready(nor);
4305 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
4306 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
4307 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
4308 - { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
4309 + { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
4310 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
4311 - { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
4312 + { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
4313 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
4314 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
4315 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
4317 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
4318 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
4319 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
4320 - { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
4321 - { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
4322 + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
4323 + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
4324 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
4325 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
4326 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
4328 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4329 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4330 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
4331 + { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4332 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
4333 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
4334 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
4335 @@ -829,11 +963,23 @@
4336 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
4337 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
4338 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
4339 - { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4341 + "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
4342 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4343 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4345 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
4346 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
4347 - { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4348 - { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4350 + "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
4351 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4352 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4355 + "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
4356 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4357 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4359 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
4360 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
4361 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
4362 @@ -856,7 +1002,7 @@
4364 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
4366 - dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
4367 + dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
4368 return ERR_PTR(tmp);
4371 @@ -867,7 +1013,7 @@
4372 return &spi_nor_ids[tmp];
4375 - dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n",
4376 + dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
4377 id[0], id[1], id[2]);
4378 return ERR_PTR(-ENODEV);
4380 @@ -1013,6 +1159,8 @@
4388 write_sr(nor, val | SR_QUAD_EN_MX);
4389 @@ -1094,7 +1242,7 @@
4390 static int spi_nor_check(struct spi_nor *nor)
4392 if (!nor->dev || !nor->read || !nor->write ||
4393 - !nor->read_reg || !nor->write_reg || !nor->erase) {
4394 + !nor->read_reg || !nor->write_reg) {
4395 pr_err("spi-nor: please fill all the necessary fields!\n");
4398 @@ -1107,7 +1255,7 @@
4399 const struct flash_info *info = NULL;
4400 struct device *dev = nor->dev;
4401 struct mtd_info *mtd = &nor->mtd;
4402 - struct device_node *np = nor->flash_node;
4403 + struct device_node *np = spi_nor_get_flash_node(nor);
4407 @@ -1157,9 +1305,11 @@
4408 if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
4409 JEDEC_MFR(info) == SNOR_MFR_INTEL ||
4410 JEDEC_MFR(info) == SNOR_MFR_MACRONIX ||
4411 - JEDEC_MFR(info) == SNOR_MFR_SST) {
4412 + JEDEC_MFR(info) == SNOR_MFR_SST ||
4413 + info->flags & SPI_NOR_HAS_LOCK) {
4416 + spi_nor_wait_till_ready(nor);
4420 @@ -1173,7 +1323,8 @@
4421 mtd->_read = spi_nor_read;
4423 /* NOR protection support for STmicro/Micron chips and similar */
4424 - if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
4425 + if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
4426 + info->flags & SPI_NOR_HAS_LOCK) {
4427 nor->flash_lock = stm_lock;
4428 nor->flash_unlock = stm_unlock;
4429 nor->flash_is_locked = stm_is_locked;
4430 @@ -1193,6 +1344,8 @@
4432 if (info->flags & USE_FSR)
4433 nor->flags |= SNOR_F_USE_FSR;
4434 + if (info->flags & SPI_NOR_HAS_TB)
4435 + nor->flags |= SNOR_F_HAS_SR_TB;
4437 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
4438 /* prefer "small sector" erase if possible */
4439 @@ -1295,6 +1448,12 @@
4440 nor->addr_width = 3;
4443 + if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4444 + dev_err(dev, "address width is too large: %u\n",
4449 nor->read_dummy = spi_nor_read_dummy_cycles(nor);
4451 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4452 --- a/drivers/mtd/tests/mtd_nandecctest.c
4453 +++ b/drivers/mtd/tests/mtd_nandecctest.c
4455 __nand_calculate_ecc(error_data, size, calc_ecc);
4456 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
4458 - return (ret == -1) ? 0 : -EINVAL;
4459 + return (ret == -EBADMSG) ? 0 : -EINVAL;
4462 static const struct nand_ecc_test nand_ecc_test[] = {
4463 --- a/drivers/mtd/tests/oobtest.c
4464 +++ b/drivers/mtd/tests/oobtest.c
4465 @@ -215,19 +215,19 @@
4466 pr_info("ignoring error as within bitflip_limit\n");
4469 - if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
4470 + if (use_offset != 0 || use_len < mtd->oobavail) {
4473 ops.mode = MTD_OPS_AUTO_OOB;
4476 - ops.ooblen = mtd->ecclayout->oobavail;
4477 + ops.ooblen = mtd->oobavail;
4481 ops.oobbuf = readbuf;
4482 err = mtd_read_oob(mtd, addr, &ops);
4483 - if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
4484 + if (err || ops.oobretlen != mtd->oobavail) {
4485 pr_err("error: readoob failed at %#llx\n",
4489 /* verify post-(use_offset + use_len) area for 0xff */
4490 k = use_offset + use_len;
4491 bitflips += memffshow(addr, k, readbuf + k,
4492 - mtd->ecclayout->oobavail - k);
4493 + mtd->oobavail - k);
4495 if (bitflips > bitflip_limit) {
4496 pr_err("error: verify failed at %#llx\n",
4498 struct mtd_oob_ops ops;
4500 loff_t addr = (loff_t)ebnum * mtd->erasesize;
4501 - size_t len = mtd->ecclayout->oobavail * pgcnt;
4502 - size_t oobavail = mtd->ecclayout->oobavail;
4503 + size_t len = mtd->oobavail * pgcnt;
4504 + size_t oobavail = mtd->oobavail;
4512 - use_len = mtd->ecclayout->oobavail;
4513 - use_len_max = mtd->ecclayout->oobavail;
4514 + use_len = mtd->oobavail;
4515 + use_len_max = mtd->oobavail;
4518 /* First test: write all OOB, read it back and verify */
4521 /* Write all eraseblocks */
4523 - use_len = mtd->ecclayout->oobavail;
4524 - use_len_max = mtd->ecclayout->oobavail;
4525 + use_len = mtd->oobavail;
4526 + use_len_max = mtd->oobavail;
4528 prandom_seed_state(&rnd_state, 5);
4532 /* Check all eraseblocks */
4534 - use_len = mtd->ecclayout->oobavail;
4535 - use_len_max = mtd->ecclayout->oobavail;
4536 + use_len = mtd->oobavail;
4537 + use_len_max = mtd->oobavail;
4539 prandom_seed_state(&rnd_state, 5);
4540 err = verify_all_eraseblocks();
4545 - use_len = mtd->ecclayout->oobavail;
4546 - use_len_max = mtd->ecclayout->oobavail;
4547 + use_len = mtd->oobavail;
4548 + use_len_max = mtd->oobavail;
4551 /* Fourth test: try to write off end of device */
4556 - ops.ooboffs = mtd->ecclayout->oobavail;
4557 + ops.ooboffs = mtd->oobavail;
4559 ops.oobbuf = writebuf;
4560 pr_info("attempting to start write past end of OOB\n");
4565 - ops.ooboffs = mtd->ecclayout->oobavail;
4566 + ops.ooboffs = mtd->oobavail;
4568 ops.oobbuf = readbuf;
4569 pr_info("attempting to start read past end of OOB\n");
4571 ops.mode = MTD_OPS_AUTO_OOB;
4574 - ops.ooblen = mtd->ecclayout->oobavail + 1;
4575 + ops.ooblen = mtd->oobavail + 1;
4580 ops.mode = MTD_OPS_AUTO_OOB;
4583 - ops.ooblen = mtd->ecclayout->oobavail + 1;
4584 + ops.ooblen = mtd->oobavail + 1;
4589 ops.mode = MTD_OPS_AUTO_OOB;
4592 - ops.ooblen = mtd->ecclayout->oobavail;
4593 + ops.ooblen = mtd->oobavail;
4598 ops.mode = MTD_OPS_AUTO_OOB;
4601 - ops.ooblen = mtd->ecclayout->oobavail;
4602 + ops.ooblen = mtd->oobavail;
4607 for (i = 0; i < ebcnt - 1; ++i) {
4610 - size_t sz = mtd->ecclayout->oobavail;
4611 + size_t sz = mtd->oobavail;
4612 if (bbt[i] || bbt[i + 1])
4614 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
4615 @@ -673,13 +673,12 @@
4616 for (i = 0; i < ebcnt - 1; ++i) {
4617 if (bbt[i] || bbt[i + 1])
4619 - prandom_bytes_state(&rnd_state, writebuf,
4620 - mtd->ecclayout->oobavail * 2);
4621 + prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
4622 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
4623 ops.mode = MTD_OPS_AUTO_OOB;
4626 - ops.ooblen = mtd->ecclayout->oobavail * 2;
4627 + ops.ooblen = mtd->oobavail * 2;
4634 if (memcmpshow(addr, readbuf, writebuf,
4635 - mtd->ecclayout->oobavail * 2)) {
4636 + mtd->oobavail * 2)) {
4637 pr_err("error: verify failed at %#llx\n",
4640 --- a/drivers/mtd/tests/pagetest.c
4641 +++ b/drivers/mtd/tests/pagetest.c
4642 @@ -127,13 +127,12 @@
4643 unsigned char *pp1, *pp2, *pp3, *pp4;
4645 pr_info("crosstest\n");
4646 - pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
4647 + pp1 = kzalloc(pgsize * 4, GFP_KERNEL);
4653 - memset(pp1, 0, pgsize * 4);
4656 for (i = 0; i < ebcnt && bbt[i]; ++i)
4657 --- a/drivers/mtd/ubi/cdev.c
4658 +++ b/drivers/mtd/ubi/cdev.c
4660 struct ubi_device *ubi = desc->vol->ubi;
4661 struct inode *inode = file_inode(file);
4663 - mutex_lock(&inode->i_mutex);
4664 + inode_lock(inode);
4665 err = ubi_sync(ubi->ubi_num);
4666 - mutex_unlock(&inode->i_mutex);
4667 + inode_unlock(inode);
4671 --- a/drivers/mtd/ubi/misc.c
4672 +++ b/drivers/mtd/ubi/misc.c
4673 @@ -153,3 +153,52 @@
4678 +/* Normal UBI messages */
4679 +void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...)
4681 + struct va_format vaf;
4684 + va_start(args, fmt);
4689 + pr_notice(UBI_NAME_STR "%d: %pV\n", ubi->ubi_num, &vaf);
4694 +/* UBI warning messages */
4695 +void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...)
4697 + struct va_format vaf;
4700 + va_start(args, fmt);
4705 + pr_warn(UBI_NAME_STR "%d warning: %ps: %pV\n",
4706 + ubi->ubi_num, __builtin_return_address(0), &vaf);
4711 +/* UBI error messages */
4712 +void ubi_err(const struct ubi_device *ubi, const char *fmt, ...)
4714 + struct va_format vaf;
4717 + va_start(args, fmt);
4722 + pr_err(UBI_NAME_STR "%d error: %ps: %pV\n",
4723 + ubi->ubi_num, __builtin_return_address(0), &vaf);
4726 --- a/drivers/mtd/ubi/ubi.h
4727 +++ b/drivers/mtd/ubi/ubi.h
4729 /* UBI name used for character devices, sysfs, etc */
4730 #define UBI_NAME_STR "ubi"
4734 /* Normal UBI messages */
4735 -#define ubi_msg(ubi, fmt, ...) pr_notice(UBI_NAME_STR "%d: " fmt "\n", \
4736 - ubi->ubi_num, ##__VA_ARGS__)
4738 +void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...);
4740 /* UBI warning messages */
4741 -#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
4742 - ubi->ubi_num, __func__, ##__VA_ARGS__)
4744 +void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...);
4746 /* UBI error messages */
4747 -#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
4748 - ubi->ubi_num, __func__, ##__VA_ARGS__)
4750 +void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
4752 /* Background thread name pattern */
4753 #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
4754 --- a/drivers/mtd/ubi/wl.c
4755 +++ b/drivers/mtd/ubi/wl.c
4757 return __erase_worker(ubi, &wl_wrk);
4760 +static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
4762 * wear_leveling_worker - wear-leveling worker function.
4763 * @ubi: UBI device description object
4766 struct ubi_wl_entry *e1, *e2;
4767 struct ubi_vid_hdr *vid_hdr;
4768 + int dst_leb_clean = 0;
4774 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
4775 if (err && err != UBI_IO_BITFLIPS) {
4776 + dst_leb_clean = 1;
4777 if (err == UBI_IO_FF) {
4779 * We are trying to move PEB without a VID header. UBI
4780 @@ -798,10 +801,12 @@
4784 + dst_leb_clean = 1;
4787 if (err == MOVE_RETRY) {
4789 + dst_leb_clean = 1;
4792 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
4794 ubi->erroneous_peb_count);
4797 + dst_leb_clean = 1;
4801 @@ -897,15 +903,24 @@
4802 wl_tree_add(e1, &ubi->scrub);
4804 wl_tree_add(e1, &ubi->used);
4805 + if (dst_leb_clean) {
4806 + wl_tree_add(e2, &ubi->free);
4807 + ubi->free_count++;
4810 ubi_assert(!ubi->move_to_put);
4811 ubi->move_from = ubi->move_to = NULL;
4812 ubi->wl_scheduled = 0;
4813 spin_unlock(&ubi->wl_lock);
4815 ubi_free_vid_hdr(ubi, vid_hdr);
4816 - err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
4819 + if (dst_leb_clean) {
4820 + ensure_wear_leveling(ubi, 1);
4822 + err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
4827 mutex_unlock(&ubi->move_mutex);
4829 --- a/include/linux/mtd/bbm.h
4830 +++ b/include/linux/mtd/bbm.h
4834 /* OneNAND BBT interface */
4835 -extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
4836 extern int onenand_default_bbt(struct mtd_info *mtd);
4838 #endif /* __LINUX_MTD_BBM_H */
4839 --- a/include/linux/mtd/fsmc.h
4840 +++ b/include/linux/mtd/fsmc.h
4841 @@ -103,24 +103,6 @@
4843 #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
4846 - * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
4847 - * and it has to be read consecutively and immediately after the 512
4848 - * byte data block for hardware to generate the error bit offsets
4849 - * Managing the ecc bytes in the following way is easier. This way is
4850 - * similar to oobfree structure maintained already in u-boot nand driver
4852 -#define MAX_ECCPLACE_ENTRIES 32
4854 -struct fsmc_nand_eccplace {
4859 -struct fsmc_eccplace {
4860 - struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
4863 struct fsmc_nand_timings {
4866 --- a/include/linux/mtd/inftl.h
4867 +++ b/include/linux/mtd/inftl.h
4869 unsigned int nb_blocks; /* number of physical blocks */
4870 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
4871 struct erase_info instr;
4872 - struct nand_ecclayout oobinfo;
4875 int INFTL_mount(struct INFTLrecord *s);
4876 --- a/include/linux/mtd/map.h
4877 +++ b/include/linux/mtd/map.h
4881 #ifndef map_bankwidth
4883 #warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
4885 static inline int map_bankwidth(void *map)
4888 @@ -238,8 +240,11 @@
4889 If there is no cache to care about this can be set to NULL. */
4890 void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
4892 - /* set_vpp() must handle being reentered -- enable, enable, disable
4893 - must leave it enabled. */
4894 + /* This will be called with 1 as parameter when the first map user
4895 + * needs VPP, and called with 0 when the last user exits. The map
4896 + * core maintains a reference counter, and assumes that VPP is a
4897 + * global resource applying to all mapped flash chips on the system.
4899 void (*set_vpp)(struct map_info *, int);
4901 unsigned long pfow_base;
4902 --- a/include/linux/mtd/mtd.h
4903 +++ b/include/linux/mtd/mtd.h
4904 @@ -100,17 +100,35 @@
4906 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
4907 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
4909 + * struct mtd_oob_region - oob region definition
4910 + * @offset: region offset
4911 + * @length: region length
4913 + * This structure describes a region of the OOB area, and is used
4914 + * to retrieve ECC or free bytes sections.
4915 + * Each section is defined by an offset within the OOB area and a
4918 +struct mtd_oob_region {
4924 - * Internal ECC layout control structure. For historical reasons, there is a
4925 - * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
4926 - * for export to user-space via the ECCGETLAYOUT ioctl.
4927 - * nand_ecclayout should be expandable in the future simply by the above macros.
4928 + * struct mtd_ooblayout_ops - NAND OOB layout operations
4929 + * @ecc: function returning an ECC region in the OOB area.
4930 + * Should return -ERANGE if %section exceeds the total number of
4932 + * @free: function returning a free region in the OOB area.
4933 + * Should return -ERANGE if %section exceeds the total number of
4936 -struct nand_ecclayout {
4938 - __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
4940 - struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
4941 +struct mtd_ooblayout_ops {
4942 + int (*ecc)(struct mtd_info *mtd, int section,
4943 + struct mtd_oob_region *oobecc);
4944 + int (*free)(struct mtd_info *mtd, int section,
4945 + struct mtd_oob_region *oobfree);
4948 struct module; /* only needed for owner field in mtd_info */
4953 - /* ECC layout structure pointer - read only! */
4954 - struct nand_ecclayout *ecclayout;
4955 + /* OOB layout description */
4956 + const struct mtd_ooblayout_ops *ooblayout;
4958 /* the ecc step size. */
4959 unsigned int ecc_step_size;
4960 @@ -258,6 +276,46 @@
4964 +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
4965 + struct mtd_oob_region *oobecc);
4966 +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
4968 + struct mtd_oob_region *oobregion);
4969 +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
4970 + const u8 *oobbuf, int start, int nbytes);
4971 +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
4972 + u8 *oobbuf, int start, int nbytes);
4973 +int mtd_ooblayout_free(struct mtd_info *mtd, int section,
4974 + struct mtd_oob_region *oobfree);
4975 +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
4976 + const u8 *oobbuf, int start, int nbytes);
4977 +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
4978 + u8 *oobbuf, int start, int nbytes);
4979 +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
4980 +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
4982 +static inline void mtd_set_ooblayout(struct mtd_info *mtd,
4983 + const struct mtd_ooblayout_ops *ooblayout)
4985 + mtd->ooblayout = ooblayout;
4988 +static inline void mtd_set_of_node(struct mtd_info *mtd,
4989 + struct device_node *np)
4991 + mtd->dev.of_node = np;
4994 +static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
4996 + return mtd->dev.of_node;
4999 +static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
5001 + return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
5004 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
5005 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
5006 void **virt, resource_size_t *phys);
5007 --- a/include/linux/mtd/nand.h
5008 +++ b/include/linux/mtd/nand.h
5009 @@ -119,6 +119,12 @@
5013 +enum nand_ecc_algo {
5020 * Constants for Hardware ECC
5022 @@ -129,6 +135,14 @@
5023 /* Enable Hardware ECC before syndrome is read back from flash */
5024 #define NAND_ECC_READSYN 2
5027 + * Enable generic NAND 'page erased' check. This check is only done when
5028 + * ecc.correct() returns -EBADMSG.
5029 + * Set this flag if your implementation does not fix bitflips in erased
5030 + * pages and you want to rely on the default implementation.
5032 +#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
5034 /* Bit mask for flags passed to do_nand_read_ecc */
5035 #define NAND_GET_DEVICE 0x80
5037 @@ -160,6 +174,12 @@
5038 /* Device supports subpage reads */
5039 #define NAND_SUBPAGE_READ 0x00001000
5042 + * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
5045 +#define NAND_NEED_SCRAMBLING 0x00002000
5047 /* Options valid for Samsung large page devices */
5048 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
5050 @@ -276,15 +296,15 @@
5053 __le16 src_sync_timing_mode;
5054 - __le16 src_ssync_features;
5055 + u8 src_ssync_features;
5056 __le16 clk_pin_capacitance_typ;
5057 __le16 io_pin_capacitance_typ;
5058 __le16 input_pin_capacitance_typ;
5059 u8 input_pin_capacitance_max;
5060 u8 driver_strength_support;
5068 __le16 vendor_revision;
5070 __le16 input_pin_capacitance_typ;
5071 __le16 clk_pin_capacitance_typ;
5072 u8 driver_strength_support;
5077 /* ECC and endurance block */
5080 * struct nand_ecc_ctrl - Control structure for ECC
5082 + * @algo: ECC algorithm
5083 * @steps: number of ECC steps per page
5084 * @size: data bytes per ECC step
5085 * @bytes: ECC bytes per step
5086 @@ -451,12 +472,18 @@
5087 * @total: total number of ECC bytes per page
5088 * @prepad: padding information for syndrome based ECC generators
5089 * @postpad: padding information for syndrome based ECC generators
5090 - * @layout: ECC layout control struct pointer
5091 + * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
5092 * @priv: pointer to private ECC control data
5093 * @hwctl: function to control hardware ECC generator. Must only
5094 * be provided if an hardware ECC is available
5095 * @calculate: function for ECC calculation or readback from ECC hardware
5096 - * @correct: function for ECC correction, matching to ECC generator (sw/hw)
5097 + * @correct: function for ECC correction, matching to ECC generator (sw/hw).
5098 + * Should return a positive number representing the number of
5099 + * corrected bitflips, -EBADMSG if the number of bitflips exceed
5100 + * ECC strength, or any other error code if the error is not
5101 + * directly related to correction.
5102 + * If -EBADMSG is returned the input buffers should be left
5104 * @read_page_raw: function to read a raw page without ECC. This function
5105 * should hide the specific layout used by the ECC
5106 * controller and always return contiguous in-band and
5109 struct nand_ecc_ctrl {
5110 nand_ecc_modes_t mode;
5111 + enum nand_ecc_algo algo;
5119 - struct nand_ecclayout *layout;
5120 + unsigned int options;
5122 void (*hwctl)(struct mtd_info *mtd, int mode);
5123 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
5124 @@ -540,11 +568,11 @@
5127 * struct nand_chip - NAND Private Flash Chip Data
5128 + * @mtd: MTD device registered to the MTD framework
5129 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
5131 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
5133 - * @flash_node: [BOARDSPECIFIC] device node describing this instance
5134 * @read_byte: [REPLACEABLE] read one byte from the chip
5135 * @read_word: [REPLACEABLE] read one word from the chip
5136 * @write_byte: [REPLACEABLE] write a single byte to the chip on the
5137 @@ -640,18 +668,17 @@
5141 + struct mtd_info mtd;
5142 void __iomem *IO_ADDR_R;
5143 void __iomem *IO_ADDR_W;
5145 - struct device_node *flash_node;
5147 uint8_t (*read_byte)(struct mtd_info *mtd);
5148 u16 (*read_word)(struct mtd_info *mtd);
5149 void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
5150 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
5151 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
5152 void (*select_chip)(struct mtd_info *mtd, int chip);
5153 - int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
5154 + int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
5155 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
5156 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
5157 int (*dev_ready)(struct mtd_info *mtd);
5158 @@ -719,6 +746,40 @@
5162 +extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
5163 +extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
5165 +static inline void nand_set_flash_node(struct nand_chip *chip,
5166 + struct device_node *np)
5168 + mtd_set_of_node(&chip->mtd, np);
5171 +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
5173 + return mtd_get_of_node(&chip->mtd);
5176 +static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
5178 + return container_of(mtd, struct nand_chip, mtd);
5181 +static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
5183 + return &chip->mtd;
5186 +static inline void *nand_get_controller_data(struct nand_chip *chip)
5188 + return chip->priv;
5191 +static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
5193 + chip->priv = priv;
5197 * NAND Flash Manufacturer ID Codes
5200 * @chip_delay: R/B delay value in us
5201 * @options: Option flags, e.g. 16bit buswidth
5202 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
5203 - * @ecclayout: ECC layout info structure
5204 * @part_probe_types: NULL-terminated array of probe types
5206 struct platform_nand_chip {
5210 struct mtd_partition *partitions;
5211 - struct nand_ecclayout *ecclayout;
5213 unsigned int options;
5214 unsigned int bbt_options;
5215 @@ -908,15 +967,6 @@
5216 struct platform_nand_ctrl ctrl;
5219 -/* Some helpers to access the data structures */
5221 -struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
5223 - struct nand_chip *chip = mtd->priv;
5225 - return chip->priv;
5228 /* return the supported features. */
5229 static inline int onfi_feature(struct nand_chip *chip)
5231 --- a/include/linux/mtd/nand_bch.h
5232 +++ b/include/linux/mtd/nand_bch.h
5235 * Initialize BCH encoder/decoder
5237 -struct nand_bch_control *
5238 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
5239 - unsigned int eccbytes, struct nand_ecclayout **ecclayout);
5240 +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
5242 * Release BCH encoder/decoder resources
5245 nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
5246 unsigned char *read_ecc, unsigned char *calc_ecc)
5252 -static inline struct nand_bch_control *
5253 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
5254 - unsigned int eccbytes, struct nand_ecclayout **ecclayout)
5255 +static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
5259 --- a/include/linux/mtd/nftl.h
5260 +++ b/include/linux/mtd/nftl.h
5262 unsigned int nb_blocks; /* number of physical blocks */
5263 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
5264 struct erase_info instr;
5265 - struct nand_ecclayout oobinfo;
5268 int NFTL_mount(struct NFTLrecord *s);
5269 --- a/include/linux/mtd/onenand.h
5270 +++ b/include/linux/mtd/onenand.h
5272 * @page_buf: [INTERN] page main data buffer
5273 * @oob_buf: [INTERN] page oob data buffer
5274 * @subpagesize: [INTERN] holds the subpagesize
5275 - * @ecclayout: [REPLACEABLE] the default ecc placement scheme
5276 * @bbm: [REPLACEABLE] pointer to Bad Block Management
5277 * @priv: [OPTIONAL] pointer to private chip date
5283 - struct nand_ecclayout *ecclayout;
5287 --- a/include/linux/mtd/partitions.h
5288 +++ b/include/linux/mtd/partitions.h
5290 uint64_t size; /* partition size */
5291 uint64_t offset; /* offset within the master MTD space */
5292 uint32_t mask_flags; /* master MTD flags to mask out for this partition */
5293 - struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
5296 #define MTDPART_OFS_RETAIN (-3)
5299 * struct mtd_part_parser_data - used to pass data to MTD partition parsers.
5300 * @origin: for RedBoot, start address of MTD device
5301 - * @of_node: for OF parsers, device node containing partitioning information
5303 struct mtd_part_parser_data {
5304 unsigned long origin;
5305 - struct device_node *of_node;
5310 struct list_head list;
5311 struct module *owner;
5313 - int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
5314 + int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
5315 struct mtd_part_parser_data *);
5316 + void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
5317 enum mtd_parser_type type;
5320 -extern void register_mtd_parser(struct mtd_part_parser *parser);
5321 +/* Container for passing around a set of parsed partitions */
5322 +struct mtd_partitions {
5323 + const struct mtd_partition *parts;
5325 + const struct mtd_part_parser *parser;
5328 +extern int __register_mtd_parser(struct mtd_part_parser *parser,
5329 + struct module *owner);
5330 +#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE)
5332 extern void deregister_mtd_parser(struct mtd_part_parser *parser);
5335 + * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't
5336 + * do anything special in module init/exit. Each driver may only use this macro
5337 + * once, and calling it replaces module_init() and module_exit().
5339 +#define module_mtd_part_parser(__mtd_part_parser) \
5340 + module_driver(__mtd_part_parser, register_mtd_parser, \
5341 + deregister_mtd_parser)
5343 int mtd_is_partition(const struct mtd_info *mtd);
5344 int mtd_add_partition(struct mtd_info *master, const char *name,
5345 long long offset, long long length);
5346 --- a/include/linux/mtd/sh_flctl.h
5347 +++ b/include/linux/mtd/sh_flctl.h
5348 @@ -143,11 +143,11 @@
5352 - struct mtd_info mtd;
5353 struct nand_chip chip;
5354 struct platform_device *pdev;
5355 struct dev_pm_qos_request pm_qos;
5357 + resource_size_t fifo;
5359 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
5363 static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
5365 - return container_of(mtdinfo, struct sh_flctl, mtd);
5366 + return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip);
5369 #endif /* __SH_FLCTL_H__ */
5370 --- a/include/linux/mtd/sharpsl.h
5371 +++ b/include/linux/mtd/sharpsl.h
5374 struct sharpsl_nand_platform_data {
5375 struct nand_bbt_descr *badblock_pattern;
5376 - struct nand_ecclayout *ecc_layout;
5377 + const struct mtd_ooblayout_ops *ecc_layout;
5378 struct mtd_partition *partitions;
5379 unsigned int nr_partitions;
5381 --- a/include/uapi/mtd/mtd-abi.h
5382 +++ b/include/uapi/mtd/mtd-abi.h
5384 * complete set of ECC information. The ioctl truncates the larger internal
5385 * structure to retain binary compatibility with the static declaration of the
5386 * ioctl. Note that the "MTD_MAX_..._ENTRIES" macros represent the max size of
5387 - * the user struct, not the MAX size of the internal struct nand_ecclayout.
5388 + * the user struct, not the MAX size of the internal OOB layout representation.
5390 struct nand_ecclayout_user {
5392 --- a/fs/jffs2/wbuf.c
5393 +++ b/fs/jffs2/wbuf.c
5394 @@ -1183,22 +1183,20 @@
5396 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
5398 - struct nand_ecclayout *oinfo = c->mtd->ecclayout;
5400 if (!c->mtd->oobsize)
5403 /* Cleanmarker is out-of-band, so inline size zero */
5404 c->cleanmarker_size = 0;
5406 - if (!oinfo || oinfo->oobavail == 0) {
5407 + if (c->oobavail == 0) {
5408 pr_err("inconsistent device description\n");
5412 jffs2_dbg(1, "using OOB on NAND\n");
5414 - c->oobavail = oinfo->oobavail;
5415 + c->oobavail = c->mtd->oobavail;
5417 /* Initialise write buffer */
5418 init_rwsem(&c->wbuf_sem);
5419 --- a/include/linux/mtd/spi-nor.h
5420 +++ b/include/linux/mtd/spi-nor.h
5422 #define SR_BP0 BIT(2) /* Block protect 0 */
5423 #define SR_BP1 BIT(3) /* Block protect 1 */
5424 #define SR_BP2 BIT(4) /* Block protect 2 */
5425 +#define SR_TB BIT(5) /* Top/Bottom protect */
5426 #define SR_SRWD BIT(7) /* SR write protect */
5428 #define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
5431 enum spi_nor_option_flags {
5432 SNOR_F_USE_FSR = BIT(0),
5433 + SNOR_F_HAS_SR_TB = BIT(1),
5438 * @mtd: point to a mtd_info structure
5439 * @lock: the lock for the read/write/erase/lock/unlock operations
5440 * @dev: point to a spi device, or a spi nor controller device.
5441 - * @flash_node: point to a device node describing this flash instance.
5442 * @page_size: the page size of the SPI NOR
5443 * @addr_width: number of address bytes
5444 * @erase_opcode: the opcode for erasing a sector
5446 * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
5447 * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
5448 * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
5449 - * at the offset @offs
5450 + * at the offset @offs; if not provided by the driver,
5451 + * spi-nor will send the erase opcode via write_reg()
5452 * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
5453 * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
5454 * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
5456 struct mtd_info mtd;
5459 - struct device_node *flash_node;
5463 @@ -184,6 +185,17 @@
5467 +static inline void spi_nor_set_flash_node(struct spi_nor *nor,
5468 + struct device_node *np)
5470 + mtd_set_of_node(&nor->mtd, np);
5473 +static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
5475 + return mtd_get_of_node(&nor->mtd);
5479 * spi_nor_scan() - scan the SPI NOR
5480 * @nor: the spi_nor structure