oxnas: backport upstream NAND driver
[openwrt/openwrt.git] / target / linux / oxnas / patches-4.4 / 0072-mtd-backport-v4.7-0day-patches-from-Boris.patch
1 From a369af5149e6eb442b22ce89b564dd7a76e03638 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Tue, 26 Apr 2016 19:05:01 +0200
4 Subject: [PATCH 072/102] mtd: backport v4.7-0day patches from Boris
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8 drivers/mtd/Kconfig | 4 +-
9 drivers/mtd/cmdlinepart.c | 3 +-
10 drivers/mtd/devices/m25p80.c | 44 +--
11 drivers/mtd/maps/physmap_of.c | 6 +-
12 drivers/mtd/mtdchar.c | 123 ++++++--
13 drivers/mtd/mtdconcat.c | 2 +-
14 drivers/mtd/mtdcore.c | 428 ++++++++++++++++++++++++--
15 drivers/mtd/mtdcore.h | 7 +-
16 drivers/mtd/mtdpart.c | 161 ++++++----
17 drivers/mtd/mtdswap.c | 24 +-
18 drivers/mtd/nand/Kconfig | 21 +-
19 drivers/mtd/nand/Makefile | 2 +
20 drivers/mtd/nand/nand_base.c | 571 +++++++++++++++++++----------------
21 drivers/mtd/nand/nand_bbt.c | 34 +--
22 drivers/mtd/nand/nand_bch.c | 52 ++--
23 drivers/mtd/nand/nand_ecc.c | 6 +-
24 drivers/mtd/nand/nand_ids.c | 4 +-
25 drivers/mtd/nand/nandsim.c | 43 +--
26 drivers/mtd/ofpart.c | 53 ++--
27 drivers/mtd/spi-nor/Kconfig | 10 +-
28 drivers/mtd/spi-nor/Makefile | 1 +
29 drivers/mtd/spi-nor/mtk-quadspi.c | 485 +++++++++++++++++++++++++++++
30 drivers/mtd/spi-nor/spi-nor.c | 321 +++++++++++++-------
31 drivers/mtd/tests/mtd_nandecctest.c | 2 +-
32 drivers/mtd/tests/oobtest.c | 49 ++-
33 drivers/mtd/tests/pagetest.c | 3 +-
34 include/linux/mtd/bbm.h | 1 -
35 include/linux/mtd/fsmc.h | 18 --
36 include/linux/mtd/inftl.h | 1 -
37 include/linux/mtd/map.h | 9 +-
38 include/linux/mtd/mtd.h | 80 ++++-
39 include/linux/mtd/nand.h | 94 ++++--
40 include/linux/mtd/nand_bch.h | 10 +-
41 include/linux/mtd/nftl.h | 1 -
42 include/linux/mtd/onenand.h | 2 -
43 include/linux/mtd/partitions.h | 27 +-
44 include/linux/mtd/sh_flctl.h | 4 +-
45 include/linux/mtd/sharpsl.h | 2 +-
46 include/linux/mtd/spi-nor.h | 23 +-
47 include/uapi/mtd/mtd-abi.h | 2 +-
48 45 files changed, 2077 insertions(+), 748 deletions(-)
49 create mode 100644 drivers/mtd/spi-nor/mtk-quadspi.c
50
51 --- a/drivers/mtd/Kconfig
52 +++ b/drivers/mtd/Kconfig
53 @@ -131,7 +131,7 @@ config MTD_CMDLINE_PARTS
54
55 config MTD_AFS_PARTS
56 tristate "ARM Firmware Suite partition parsing"
57 - depends on ARM
58 + depends on (ARM || ARM64)
59 ---help---
60 The ARM Firmware Suite allows the user to divide flash devices into
61 multiple 'images'. Each such image has a header containing its name
62 @@ -161,7 +161,7 @@ config MTD_AR7_PARTS
63
64 config MTD_BCM63XX_PARTS
65 tristate "BCM63XX CFE partitioning support"
66 - depends on BCM63XX
67 + depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
68 select CRC32
69 help
70 This provides partions parsing for BCM63xx devices with CFE
71 --- a/drivers/mtd/cmdlinepart.c
72 +++ b/drivers/mtd/cmdlinepart.c
73 @@ -304,7 +304,7 @@ static int mtdpart_setup_real(char *s)
74 * the first one in the chain if a NULL mtd_id is passed in.
75 */
76 static int parse_cmdline_partitions(struct mtd_info *master,
77 - struct mtd_partition **pparts,
78 + const struct mtd_partition **pparts,
79 struct mtd_part_parser_data *data)
80 {
81 unsigned long long offset;
82 @@ -382,7 +382,6 @@ static int __init mtdpart_setup(char *s)
83 __setup("mtdparts=", mtdpart_setup);
84
85 static struct mtd_part_parser cmdline_parser = {
86 - .owner = THIS_MODULE,
87 .parse_fn = parse_cmdline_partitions,
88 .name = "cmdlinepart",
89 };
90 --- a/drivers/mtd/devices/m25p80.c
91 +++ b/drivers/mtd/devices/m25p80.c
92 @@ -174,22 +174,6 @@ static int m25p80_read(struct spi_nor *n
93 return 0;
94 }
95
96 -static int m25p80_erase(struct spi_nor *nor, loff_t offset)
97 -{
98 - struct m25p *flash = nor->priv;
99 -
100 - dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
101 - flash->spi_nor.mtd.erasesize / 1024, (u32)offset);
102 -
103 - /* Set up command buffer. */
104 - flash->command[0] = nor->erase_opcode;
105 - m25p_addr2cmd(nor, offset, flash->command);
106 -
107 - spi_write(flash->spi, flash->command, m25p_cmdsz(nor));
108 -
109 - return 0;
110 -}
111 -
112 /*
113 * board specific setup should have ensured the SPI clock used here
114 * matches what the READ command supports, at least until this driver
115 @@ -197,12 +181,11 @@ static int m25p80_erase(struct spi_nor *
116 */
117 static int m25p_probe(struct spi_device *spi)
118 {
119 - struct mtd_part_parser_data ppdata;
120 struct flash_platform_data *data;
121 struct m25p *flash;
122 struct spi_nor *nor;
123 enum read_mode mode = SPI_NOR_NORMAL;
124 - char *flash_name = NULL;
125 + char *flash_name;
126 int ret;
127
128 data = dev_get_platdata(&spi->dev);
129 @@ -216,12 +199,11 @@ static int m25p_probe(struct spi_device
130 /* install the hooks */
131 nor->read = m25p80_read;
132 nor->write = m25p80_write;
133 - nor->erase = m25p80_erase;
134 nor->write_reg = m25p80_write_reg;
135 nor->read_reg = m25p80_read_reg;
136
137 nor->dev = &spi->dev;
138 - nor->flash_node = spi->dev.of_node;
139 + spi_nor_set_flash_node(nor, spi->dev.of_node);
140 nor->priv = flash;
141
142 spi_set_drvdata(spi, flash);
143 @@ -242,6 +224,8 @@ static int m25p_probe(struct spi_device
144 */
145 if (data && data->type)
146 flash_name = data->type;
147 + else if (!strcmp(spi->modalias, "spi-nor"))
148 + flash_name = NULL; /* auto-detect */
149 else
150 flash_name = spi->modalias;
151
152 @@ -249,11 +233,8 @@ static int m25p_probe(struct spi_device
153 if (ret)
154 return ret;
155
156 - ppdata.of_node = spi->dev.of_node;
157 -
158 - return mtd_device_parse_register(&nor->mtd, NULL, &ppdata,
159 - data ? data->parts : NULL,
160 - data ? data->nr_parts : 0);
161 + return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
162 + data ? data->nr_parts : 0);
163 }
164
165
166 @@ -279,14 +260,21 @@ static int m25p_remove(struct spi_device
167 */
168 static const struct spi_device_id m25p_ids[] = {
169 /*
170 + * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
171 + * hack around the fact that the SPI core does not provide uevent
172 + * matching for .of_match_table
173 + */
174 + {"spi-nor"},
175 +
176 + /*
177 * Entries not used in DTs that should be safe to drop after replacing
178 - * them with "nor-jedec" in platform data.
179 + * them with "spi-nor" in platform data.
180 */
181 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
182
183 /*
184 - * Entries that were used in DTs without "nor-jedec" fallback and should
185 - * be kept for backward compatibility.
186 + * Entries that were used in DTs without "jedec,spi-nor" fallback and
187 + * should be kept for backward compatibility.
188 */
189 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
190 {"mr25h256"},
191 --- a/drivers/mtd/maps/physmap_of.c
192 +++ b/drivers/mtd/maps/physmap_of.c
193 @@ -128,7 +128,6 @@ static int of_flash_probe(struct platfor
194 int reg_tuple_size;
195 struct mtd_info **mtd_list = NULL;
196 resource_size_t res_size;
197 - struct mtd_part_parser_data ppdata;
198 bool map_indirect;
199 const char *mtd_name = NULL;
200
201 @@ -272,8 +271,9 @@ static int of_flash_probe(struct platfor
202 if (err)
203 goto err_out;
204
205 - ppdata.of_node = dp;
206 - mtd_device_parse_register(info->cmtd, part_probe_types_def, &ppdata,
207 + info->cmtd->dev.parent = &dev->dev;
208 + mtd_set_of_node(info->cmtd, dp);
209 + mtd_device_parse_register(info->cmtd, part_probe_types_def, NULL,
210 NULL, 0);
211
212 kfree(mtd_list);
213 --- a/drivers/mtd/mtdchar.c
214 +++ b/drivers/mtd/mtdchar.c
215 @@ -465,38 +465,111 @@ static int mtdchar_readoob(struct file *
216 }
217
218 /*
219 - * Copies (and truncates, if necessary) data from the larger struct,
220 - * nand_ecclayout, to the smaller, deprecated layout struct,
221 - * nand_ecclayout_user. This is necessary only to support the deprecated
222 - * API ioctl ECCGETLAYOUT while allowing all new functionality to use
223 - * nand_ecclayout flexibly (i.e. the struct may change size in new
224 - * releases without requiring major rewrites).
225 + * Copies (and truncates, if necessary) OOB layout information to the
226 + * deprecated layout struct, nand_ecclayout_user. This is necessary only to
227 + * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
228 + * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
229 + * can describe any kind of OOB layout with almost zero overhead from a
230 + * memory usage point of view).
231 */
232 -static int shrink_ecclayout(const struct nand_ecclayout *from,
233 - struct nand_ecclayout_user *to)
234 +static int shrink_ecclayout(struct mtd_info *mtd,
235 + struct nand_ecclayout_user *to)
236 {
237 - int i;
238 + struct mtd_oob_region oobregion;
239 + int i, section = 0, ret;
240
241 - if (!from || !to)
242 + if (!mtd || !to)
243 return -EINVAL;
244
245 memset(to, 0, sizeof(*to));
246
247 - to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
248 - for (i = 0; i < to->eccbytes; i++)
249 - to->eccpos[i] = from->eccpos[i];
250 + to->eccbytes = 0;
251 + for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
252 + u32 eccpos;
253 +
254 + ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
255 + if (ret < 0) {
256 + if (ret != -ERANGE)
257 + return ret;
258 +
259 + break;
260 + }
261 +
262 + eccpos = oobregion.offset;
263 + for (; i < MTD_MAX_ECCPOS_ENTRIES &&
264 + eccpos < oobregion.offset + oobregion.length; i++) {
265 + to->eccpos[i] = eccpos++;
266 + to->eccbytes++;
267 + }
268 + }
269
270 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
271 - if (from->oobfree[i].length == 0 &&
272 - from->oobfree[i].offset == 0)
273 + ret = mtd_ooblayout_free(mtd, i, &oobregion);
274 + if (ret < 0) {
275 + if (ret != -ERANGE)
276 + return ret;
277 +
278 break;
279 - to->oobavail += from->oobfree[i].length;
280 - to->oobfree[i] = from->oobfree[i];
281 + }
282 +
283 + to->oobfree[i].offset = oobregion.offset;
284 + to->oobfree[i].length = oobregion.length;
285 + to->oobavail += to->oobfree[i].length;
286 }
287
288 return 0;
289 }
290
291 +static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
292 +{
293 + struct mtd_oob_region oobregion;
294 + int i, section = 0, ret;
295 +
296 + if (!mtd || !to)
297 + return -EINVAL;
298 +
299 + memset(to, 0, sizeof(*to));
300 +
301 + to->eccbytes = 0;
302 + for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
303 + u32 eccpos;
304 +
305 + ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
306 + if (ret < 0) {
307 + if (ret != -ERANGE)
308 + return ret;
309 +
310 + break;
311 + }
312 +
313 + if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
314 + return -EINVAL;
315 +
316 + eccpos = oobregion.offset;
317 + for (; eccpos < oobregion.offset + oobregion.length; i++) {
318 + to->eccpos[i] = eccpos++;
319 + to->eccbytes++;
320 + }
321 + }
322 +
323 + for (i = 0; i < 8; i++) {
324 + ret = mtd_ooblayout_free(mtd, i, &oobregion);
325 + if (ret < 0) {
326 + if (ret != -ERANGE)
327 + return ret;
328 +
329 + break;
330 + }
331 +
332 + to->oobfree[i][0] = oobregion.offset;
333 + to->oobfree[i][1] = oobregion.length;
334 + }
335 +
336 + to->useecc = MTD_NANDECC_AUTOPLACE;
337 +
338 + return 0;
339 +}
340 +
341 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
342 struct blkpg_ioctl_arg *arg)
343 {
344 @@ -815,16 +888,12 @@ static int mtdchar_ioctl(struct file *fi
345 {
346 struct nand_oobinfo oi;
347
348 - if (!mtd->ecclayout)
349 + if (!mtd->ooblayout)
350 return -EOPNOTSUPP;
351 - if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
352 - return -EINVAL;
353
354 - oi.useecc = MTD_NANDECC_AUTOPLACE;
355 - memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
356 - memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
357 - sizeof(oi.oobfree));
358 - oi.eccbytes = mtd->ecclayout->eccbytes;
359 + ret = get_oobinfo(mtd, &oi);
360 + if (ret)
361 + return ret;
362
363 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
364 return -EFAULT;
365 @@ -913,14 +982,14 @@ static int mtdchar_ioctl(struct file *fi
366 {
367 struct nand_ecclayout_user *usrlay;
368
369 - if (!mtd->ecclayout)
370 + if (!mtd->ooblayout)
371 return -EOPNOTSUPP;
372
373 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
374 if (!usrlay)
375 return -ENOMEM;
376
377 - shrink_ecclayout(mtd->ecclayout, usrlay);
378 + shrink_ecclayout(mtd, usrlay);
379
380 if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
381 ret = -EFAULT;
382 --- a/drivers/mtd/mtdconcat.c
383 +++ b/drivers/mtd/mtdconcat.c
384 @@ -777,7 +777,7 @@ struct mtd_info *mtd_concat_create(struc
385
386 }
387
388 - concat->mtd.ecclayout = subdev[0]->ecclayout;
389 + mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
390
391 concat->num_subdev = num_devs;
392 concat->mtd.name = name;
393 --- a/drivers/mtd/mtdcore.c
394 +++ b/drivers/mtd/mtdcore.c
395 @@ -32,6 +32,7 @@
396 #include <linux/err.h>
397 #include <linux/ioctl.h>
398 #include <linux/init.h>
399 +#include <linux/of.h>
400 #include <linux/proc_fs.h>
401 #include <linux/idr.h>
402 #include <linux/backing-dev.h>
403 @@ -446,6 +447,7 @@ int add_mtd_device(struct mtd_info *mtd)
404 mtd->dev.devt = MTD_DEVT(i);
405 dev_set_name(&mtd->dev, "mtd%d", i);
406 dev_set_drvdata(&mtd->dev, mtd);
407 + of_node_get(mtd_get_of_node(mtd));
408 error = device_register(&mtd->dev);
409 if (error)
410 goto fail_added;
411 @@ -477,6 +479,7 @@ int add_mtd_device(struct mtd_info *mtd)
412 return 0;
413
414 fail_added:
415 + of_node_put(mtd_get_of_node(mtd));
416 idr_remove(&mtd_idr, i);
417 fail_locked:
418 mutex_unlock(&mtd_table_mutex);
419 @@ -518,6 +521,7 @@ int del_mtd_device(struct mtd_info *mtd)
420 device_unregister(&mtd->dev);
421
422 idr_remove(&mtd_idr, mtd->index);
423 + of_node_put(mtd_get_of_node(mtd));
424
425 module_put(THIS_MODULE);
426 ret = 0;
427 @@ -529,9 +533,10 @@ out_error:
428 }
429
430 static int mtd_add_device_partitions(struct mtd_info *mtd,
431 - struct mtd_partition *real_parts,
432 - int nbparts)
433 + struct mtd_partitions *parts)
434 {
435 + const struct mtd_partition *real_parts = parts->parts;
436 + int nbparts = parts->nr_parts;
437 int ret;
438
439 if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
440 @@ -600,29 +605,29 @@ int mtd_device_parse_register(struct mtd
441 const struct mtd_partition *parts,
442 int nr_parts)
443 {
444 + struct mtd_partitions parsed;
445 int ret;
446 - struct mtd_partition *real_parts = NULL;
447
448 mtd_set_dev_defaults(mtd);
449
450 - ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
451 - if (ret <= 0 && nr_parts && parts) {
452 - real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
453 - GFP_KERNEL);
454 - if (!real_parts)
455 - ret = -ENOMEM;
456 - else
457 - ret = nr_parts;
458 - }
459 - /* Didn't come up with either parsed OR fallback partitions */
460 - if (ret < 0) {
461 - pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
462 + memset(&parsed, 0, sizeof(parsed));
463 +
464 + ret = parse_mtd_partitions(mtd, types, &parsed, parser_data);
465 + if ((ret < 0 || parsed.nr_parts == 0) && parts && nr_parts) {
466 + /* Fall back to driver-provided partitions */
467 + parsed = (struct mtd_partitions){
468 + .parts = parts,
469 + .nr_parts = nr_parts,
470 + };
471 + } else if (ret < 0) {
472 + /* Didn't come up with parsed OR fallback partitions */
473 + pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
474 ret);
475 /* Don't abort on errors; we can still use unpartitioned MTD */
476 - ret = 0;
477 + memset(&parsed, 0, sizeof(parsed));
478 }
479
480 - ret = mtd_add_device_partitions(mtd, real_parts, ret);
481 + ret = mtd_add_device_partitions(mtd, &parsed);
482 if (ret)
483 goto out;
484
485 @@ -642,7 +647,8 @@ int mtd_device_parse_register(struct mtd
486 }
487
488 out:
489 - kfree(real_parts);
490 + /* Cleanup any parsed partitions */
491 + mtd_part_parser_cleanup(&parsed);
492 return ret;
493 }
494 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
495 @@ -767,7 +773,6 @@ out:
496 }
497 EXPORT_SYMBOL_GPL(get_mtd_device);
498
499 -
500 int __get_mtd_device(struct mtd_info *mtd)
501 {
502 int err;
503 @@ -1001,6 +1006,366 @@ int mtd_read_oob(struct mtd_info *mtd, l
504 }
505 EXPORT_SYMBOL_GPL(mtd_read_oob);
506
507 +/**
508 + * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
509 + * @mtd: MTD device structure
510 + * @section: ECC section. Depending on the layout you may have all the ECC
511 + * bytes stored in a single contiguous section, or one section
512 + * per ECC chunk (and sometime several sections for a single ECC
513 + * ECC chunk)
514 + * @oobecc: OOB region struct filled with the appropriate ECC position
515 + * information
516 + *
517 + * This functions return ECC section information in the OOB area. I you want
518 + * to get all the ECC bytes information, then you should call
519 + * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
520 + *
521 + * Returns zero on success, a negative error code otherwise.
522 + */
523 +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
524 + struct mtd_oob_region *oobecc)
525 +{
526 + memset(oobecc, 0, sizeof(*oobecc));
527 +
528 + if (!mtd || section < 0)
529 + return -EINVAL;
530 +
531 + if (!mtd->ooblayout || !mtd->ooblayout->ecc)
532 + return -ENOTSUPP;
533 +
534 + return mtd->ooblayout->ecc(mtd, section, oobecc);
535 +}
536 +EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
537 +
538 +/**
539 + * mtd_ooblayout_free - Get the OOB region definition of a specific free
540 + * section
541 + * @mtd: MTD device structure
542 + * @section: Free section you are interested in. Depending on the layout
543 + * you may have all the free bytes stored in a single contiguous
544 + * section, or one section per ECC chunk plus an extra section
545 + * for the remaining bytes (or other funky layout).
546 + * @oobfree: OOB region struct filled with the appropriate free position
547 + * information
548 + *
549 + * This functions return free bytes position in the OOB area. I you want
550 + * to get all the free bytes information, then you should call
551 + * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
552 + *
553 + * Returns zero on success, a negative error code otherwise.
554 + */
555 +int mtd_ooblayout_free(struct mtd_info *mtd, int section,
556 + struct mtd_oob_region *oobfree)
557 +{
558 + memset(oobfree, 0, sizeof(*oobfree));
559 +
560 + if (!mtd || section < 0)
561 + return -EINVAL;
562 +
563 + if (!mtd->ooblayout || !mtd->ooblayout->free)
564 + return -ENOTSUPP;
565 +
566 + return mtd->ooblayout->free(mtd, section, oobfree);
567 +}
568 +EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
569 +
570 +/**
571 + * mtd_ooblayout_find_region - Find the region attached to a specific byte
572 + * @mtd: mtd info structure
573 + * @byte: the byte we are searching for
574 + * @sectionp: pointer where the section id will be stored
575 + * @oobregion: used to retrieve the ECC position
576 + * @iter: iterator function. Should be either mtd_ooblayout_free or
577 + * mtd_ooblayout_ecc depending on the region type you're searching for
578 + *
579 + * This functions returns the section id and oobregion information of a
580 + * specific byte. For example, say you want to know where the 4th ECC byte is
581 + * stored, you'll use:
582 + *
583 + * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
584 + *
585 + * Returns zero on success, a negative error code otherwise.
586 + */
587 +static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
588 + int *sectionp, struct mtd_oob_region *oobregion,
589 + int (*iter)(struct mtd_info *,
590 + int section,
591 + struct mtd_oob_region *oobregion))
592 +{
593 + int pos = 0, ret, section = 0;
594 +
595 + memset(oobregion, 0, sizeof(*oobregion));
596 +
597 + while (1) {
598 + ret = iter(mtd, section, oobregion);
599 + if (ret)
600 + return ret;
601 +
602 + if (pos + oobregion->length > byte)
603 + break;
604 +
605 + pos += oobregion->length;
606 + section++;
607 + }
608 +
609 + /*
610 + * Adjust region info to make it start at the beginning at the
611 + * 'start' ECC byte.
612 + */
613 + oobregion->offset += byte - pos;
614 + oobregion->length -= byte - pos;
615 + *sectionp = section;
616 +
617 + return 0;
618 +}
619 +
620 +/**
621 + * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
622 + * ECC byte
623 + * @mtd: mtd info structure
624 + * @eccbyte: the byte we are searching for
625 + * @sectionp: pointer where the section id will be stored
626 + * @oobregion: OOB region information
627 + *
628 + * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
629 + * byte.
630 + *
631 + * Returns zero on success, a negative error code otherwise.
632 + */
633 +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
634 + int *section,
635 + struct mtd_oob_region *oobregion)
636 +{
637 + return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
638 + mtd_ooblayout_ecc);
639 +}
640 +EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
641 +
642 +/**
643 + * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
644 + * @mtd: mtd info structure
645 + * @buf: destination buffer to store OOB bytes
646 + * @oobbuf: OOB buffer
647 + * @start: first byte to retrieve
648 + * @nbytes: number of bytes to retrieve
649 + * @iter: section iterator
650 + *
651 + * Extract bytes attached to a specific category (ECC or free)
652 + * from the OOB buffer and copy them into buf.
653 + *
654 + * Returns zero on success, a negative error code otherwise.
655 + */
656 +static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
657 + const u8 *oobbuf, int start, int nbytes,
658 + int (*iter)(struct mtd_info *,
659 + int section,
660 + struct mtd_oob_region *oobregion))
661 +{
662 + struct mtd_oob_region oobregion = { };
663 + int section = 0, ret;
664 +
665 + ret = mtd_ooblayout_find_region(mtd, start, &section,
666 + &oobregion, iter);
667 +
668 + while (!ret) {
669 + int cnt;
670 +
671 + cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
672 + memcpy(buf, oobbuf + oobregion.offset, cnt);
673 + buf += cnt;
674 + nbytes -= cnt;
675 +
676 + if (!nbytes)
677 + break;
678 +
679 + ret = iter(mtd, ++section, &oobregion);
680 + }
681 +
682 + return ret;
683 +}
684 +
685 +/**
686 + * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
687 + * @mtd: mtd info structure
688 + * @buf: source buffer to get OOB bytes from
689 + * @oobbuf: OOB buffer
690 + * @start: first OOB byte to set
691 + * @nbytes: number of OOB bytes to set
692 + * @iter: section iterator
693 + *
694 + * Fill the OOB buffer with data provided in buf. The category (ECC or free)
695 + * is selected by passing the appropriate iterator.
696 + *
697 + * Returns zero on success, a negative error code otherwise.
698 + */
699 +static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
700 + u8 *oobbuf, int start, int nbytes,
701 + int (*iter)(struct mtd_info *,
702 + int section,
703 + struct mtd_oob_region *oobregion))
704 +{
705 + struct mtd_oob_region oobregion = { };
706 + int section = 0, ret;
707 +
708 + ret = mtd_ooblayout_find_region(mtd, start, &section,
709 + &oobregion, iter);
710 +
711 + while (!ret) {
712 + int cnt;
713 +
714 + cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
715 + memcpy(oobbuf + oobregion.offset, buf, cnt);
716 + buf += cnt;
717 + nbytes -= cnt;
718 +
719 + if (!nbytes)
720 + break;
721 +
722 + ret = iter(mtd, ++section, &oobregion);
723 + }
724 +
725 + return ret;
726 +}
727 +
728 +/**
729 + * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
730 + * @mtd: mtd info structure
731 + * @iter: category iterator
732 + *
733 + * Count the number of bytes in a given category.
734 + *
735 + * Returns a positive value on success, a negative error code otherwise.
736 + */
737 +static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
738 + int (*iter)(struct mtd_info *,
739 + int section,
740 + struct mtd_oob_region *oobregion))
741 +{
742 + struct mtd_oob_region oobregion = { };
743 + int section = 0, ret, nbytes = 0;
744 +
745 + while (1) {
746 + ret = iter(mtd, section++, &oobregion);
747 + if (ret) {
748 + if (ret == -ERANGE)
749 + ret = nbytes;
750 + break;
751 + }
752 +
753 + nbytes += oobregion.length;
754 + }
755 +
756 + return ret;
757 +}
758 +
759 +/**
760 + * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
761 + * @mtd: mtd info structure
762 + * @eccbuf: destination buffer to store ECC bytes
763 + * @oobbuf: OOB buffer
764 + * @start: first ECC byte to retrieve
765 + * @nbytes: number of ECC bytes to retrieve
766 + *
767 + * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
768 + *
769 + * Returns zero on success, a negative error code otherwise.
770 + */
771 +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
772 + const u8 *oobbuf, int start, int nbytes)
773 +{
774 + return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
775 + mtd_ooblayout_ecc);
776 +}
777 +EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
778 +
779 +/**
780 + * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
781 + * @mtd: mtd info structure
782 + * @eccbuf: source buffer to get ECC bytes from
783 + * @oobbuf: OOB buffer
784 + * @start: first ECC byte to set
785 + * @nbytes: number of ECC bytes to set
786 + *
787 + * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
788 + *
789 + * Returns zero on success, a negative error code otherwise.
790 + */
791 +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
792 + u8 *oobbuf, int start, int nbytes)
793 +{
794 + return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
795 + mtd_ooblayout_ecc);
796 +}
797 +EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
798 +
799 +/**
800 + * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
801 + * @mtd: mtd info structure
802 + * @databuf: destination buffer to store ECC bytes
803 + * @oobbuf: OOB buffer
804 + * @start: first ECC byte to retrieve
805 + * @nbytes: number of ECC bytes to retrieve
806 + *
807 + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
808 + *
809 + * Returns zero on success, a negative error code otherwise.
810 + */
811 +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
812 + const u8 *oobbuf, int start, int nbytes)
813 +{
814 + return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
815 + mtd_ooblayout_free);
816 +}
817 +EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
818 +
819 +/**
820 + * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
821 + * @mtd: mtd info structure
822 + * @eccbuf: source buffer to get data bytes from
823 + * @oobbuf: OOB buffer
824 + * @start: first ECC byte to set
825 + * @nbytes: number of ECC bytes to set
826 + *
827 + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
828 + *
829 + * Returns zero on success, a negative error code otherwise.
830 + */
831 +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
832 + u8 *oobbuf, int start, int nbytes)
833 +{
834 + return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
835 + mtd_ooblayout_free);
836 +}
837 +EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
838 +
839 +/**
840 + * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
841 + * @mtd: mtd info structure
842 + *
843 + * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
844 + *
845 + * Returns zero on success, a negative error code otherwise.
846 + */
847 +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
848 +{
849 + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
850 +}
851 +EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
852 +
853 +/**
854 + * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
855 + * @mtd: mtd info structure
856 + *
857 + * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
858 + *
859 + * Returns zero on success, a negative error code otherwise.
860 + */
861 +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
862 +{
863 + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
864 +}
865 +EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
866 +
867 /*
868 * Method to access the protection register area, present in some flash
869 * devices. The user data is one time programmable but the factory data is read
870 --- a/drivers/mtd/mtdcore.h
871 +++ b/drivers/mtd/mtdcore.h
872 @@ -10,10 +10,15 @@ int add_mtd_device(struct mtd_info *mtd)
873 int del_mtd_device(struct mtd_info *mtd);
874 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
875 int del_mtd_partitions(struct mtd_info *);
876 +
877 +struct mtd_partitions;
878 +
879 int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
880 - struct mtd_partition **pparts,
881 + struct mtd_partitions *pparts,
882 struct mtd_part_parser_data *data);
883
884 +void mtd_part_parser_cleanup(struct mtd_partitions *parts);
885 +
886 int __init init_mtdchar(void);
887 void __exit cleanup_mtdchar(void);
888
889 --- a/drivers/mtd/mtdpart.c
890 +++ b/drivers/mtd/mtdpart.c
891 @@ -55,9 +55,12 @@ static void mtd_partition_split(struct m
892
893 /*
894 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
895 - * the pointer to that structure with this macro.
896 + * the pointer to that structure.
897 */
898 -#define PART(x) ((struct mtd_part *)(x))
899 +static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
900 +{
901 + return container_of(mtd, struct mtd_part, mtd);
902 +}
903
904
905 /*
906 @@ -68,7 +71,7 @@ static void mtd_partition_split(struct m
907 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
908 size_t *retlen, u_char *buf)
909 {
910 - struct mtd_part *part = PART(mtd);
911 + struct mtd_part *part = mtd_to_part(mtd);
912 struct mtd_ecc_stats stats;
913 int res;
914
915 @@ -87,7 +90,7 @@ static int part_read(struct mtd_info *mt
916 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
917 size_t *retlen, void **virt, resource_size_t *phys)
918 {
919 - struct mtd_part *part = PART(mtd);
920 + struct mtd_part *part = mtd_to_part(mtd);
921
922 return part->master->_point(part->master, from + part->offset, len,
923 retlen, virt, phys);
924 @@ -95,7 +98,7 @@ static int part_point(struct mtd_info *m
925
926 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
927 {
928 - struct mtd_part *part = PART(mtd);
929 + struct mtd_part *part = mtd_to_part(mtd);
930
931 return part->master->_unpoint(part->master, from + part->offset, len);
932 }
933 @@ -105,7 +108,7 @@ static unsigned long part_get_unmapped_a
934 unsigned long offset,
935 unsigned long flags)
936 {
937 - struct mtd_part *part = PART(mtd);
938 + struct mtd_part *part = mtd_to_part(mtd);
939
940 offset += part->offset;
941 return part->master->_get_unmapped_area(part->master, len, offset,
942 @@ -115,7 +118,7 @@ static unsigned long part_get_unmapped_a
943 static int part_read_oob(struct mtd_info *mtd, loff_t from,
944 struct mtd_oob_ops *ops)
945 {
946 - struct mtd_part *part = PART(mtd);
947 + struct mtd_part *part = mtd_to_part(mtd);
948 int res;
949
950 if (from >= mtd->size)
951 @@ -130,10 +133,7 @@ static int part_read_oob(struct mtd_info
952 if (ops->oobbuf) {
953 size_t len, pages;
954
955 - if (ops->mode == MTD_OPS_AUTO_OOB)
956 - len = mtd->oobavail;
957 - else
958 - len = mtd->oobsize;
959 + len = mtd_oobavail(mtd, ops);
960 pages = mtd_div_by_ws(mtd->size, mtd);
961 pages -= mtd_div_by_ws(from, mtd);
962 if (ops->ooboffs + ops->ooblen > pages * len)
963 @@ -153,7 +153,7 @@ static int part_read_oob(struct mtd_info
964 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
965 size_t len, size_t *retlen, u_char *buf)
966 {
967 - struct mtd_part *part = PART(mtd);
968 + struct mtd_part *part = mtd_to_part(mtd);
969 return part->master->_read_user_prot_reg(part->master, from, len,
970 retlen, buf);
971 }
972 @@ -161,7 +161,7 @@ static int part_read_user_prot_reg(struc
973 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
974 size_t *retlen, struct otp_info *buf)
975 {
976 - struct mtd_part *part = PART(mtd);
977 + struct mtd_part *part = mtd_to_part(mtd);
978 return part->master->_get_user_prot_info(part->master, len, retlen,
979 buf);
980 }
981 @@ -169,7 +169,7 @@ static int part_get_user_prot_info(struc
982 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
983 size_t len, size_t *retlen, u_char *buf)
984 {
985 - struct mtd_part *part = PART(mtd);
986 + struct mtd_part *part = mtd_to_part(mtd);
987 return part->master->_read_fact_prot_reg(part->master, from, len,
988 retlen, buf);
989 }
990 @@ -177,7 +177,7 @@ static int part_read_fact_prot_reg(struc
991 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
992 size_t *retlen, struct otp_info *buf)
993 {
994 - struct mtd_part *part = PART(mtd);
995 + struct mtd_part *part = mtd_to_part(mtd);
996 return part->master->_get_fact_prot_info(part->master, len, retlen,
997 buf);
998 }
999 @@ -185,7 +185,7 @@ static int part_get_fact_prot_info(struc
1000 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
1001 size_t *retlen, const u_char *buf)
1002 {
1003 - struct mtd_part *part = PART(mtd);
1004 + struct mtd_part *part = mtd_to_part(mtd);
1005 return part->master->_write(part->master, to + part->offset, len,
1006 retlen, buf);
1007 }
1008 @@ -193,7 +193,7 @@ static int part_write(struct mtd_info *m
1009 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1010 size_t *retlen, const u_char *buf)
1011 {
1012 - struct mtd_part *part = PART(mtd);
1013 + struct mtd_part *part = mtd_to_part(mtd);
1014 return part->master->_panic_write(part->master, to + part->offset, len,
1015 retlen, buf);
1016 }
1017 @@ -201,7 +201,7 @@ static int part_panic_write(struct mtd_i
1018 static int part_write_oob(struct mtd_info *mtd, loff_t to,
1019 struct mtd_oob_ops *ops)
1020 {
1021 - struct mtd_part *part = PART(mtd);
1022 + struct mtd_part *part = mtd_to_part(mtd);
1023
1024 if (to >= mtd->size)
1025 return -EINVAL;
1026 @@ -213,7 +213,7 @@ static int part_write_oob(struct mtd_inf
1027 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1028 size_t len, size_t *retlen, u_char *buf)
1029 {
1030 - struct mtd_part *part = PART(mtd);
1031 + struct mtd_part *part = mtd_to_part(mtd);
1032 return part->master->_write_user_prot_reg(part->master, from, len,
1033 retlen, buf);
1034 }
1035 @@ -221,21 +221,21 @@ static int part_write_user_prot_reg(stru
1036 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1037 size_t len)
1038 {
1039 - struct mtd_part *part = PART(mtd);
1040 + struct mtd_part *part = mtd_to_part(mtd);
1041 return part->master->_lock_user_prot_reg(part->master, from, len);
1042 }
1043
1044 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
1045 unsigned long count, loff_t to, size_t *retlen)
1046 {
1047 - struct mtd_part *part = PART(mtd);
1048 + struct mtd_part *part = mtd_to_part(mtd);
1049 return part->master->_writev(part->master, vecs, count,
1050 to + part->offset, retlen);
1051 }
1052
1053 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
1054 {
1055 - struct mtd_part *part = PART(mtd);
1056 + struct mtd_part *part = mtd_to_part(mtd);
1057 int ret;
1058
1059
1060 @@ -299,7 +299,7 @@ static int part_erase(struct mtd_info *m
1061 void mtd_erase_callback(struct erase_info *instr)
1062 {
1063 if (instr->mtd->_erase == part_erase) {
1064 - struct mtd_part *part = PART(instr->mtd);
1065 + struct mtd_part *part = mtd_to_part(instr->mtd);
1066 size_t wrlen = 0;
1067
1068 if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
1069 @@ -330,13 +330,13 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
1070
1071 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1072 {
1073 - struct mtd_part *part = PART(mtd);
1074 + struct mtd_part *part = mtd_to_part(mtd);
1075 return part->master->_lock(part->master, ofs + part->offset, len);
1076 }
1077
1078 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1079 {
1080 - struct mtd_part *part = PART(mtd);
1081 + struct mtd_part *part = mtd_to_part(mtd);
1082
1083 ofs += part->offset;
1084 if (mtd->flags & MTD_ERASE_PARTIAL) {
1085 @@ -349,45 +349,45 @@ static int part_unlock(struct mtd_info *
1086
1087 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1088 {
1089 - struct mtd_part *part = PART(mtd);
1090 + struct mtd_part *part = mtd_to_part(mtd);
1091 return part->master->_is_locked(part->master, ofs + part->offset, len);
1092 }
1093
1094 static void part_sync(struct mtd_info *mtd)
1095 {
1096 - struct mtd_part *part = PART(mtd);
1097 + struct mtd_part *part = mtd_to_part(mtd);
1098 part->master->_sync(part->master);
1099 }
1100
1101 static int part_suspend(struct mtd_info *mtd)
1102 {
1103 - struct mtd_part *part = PART(mtd);
1104 + struct mtd_part *part = mtd_to_part(mtd);
1105 return part->master->_suspend(part->master);
1106 }
1107
1108 static void part_resume(struct mtd_info *mtd)
1109 {
1110 - struct mtd_part *part = PART(mtd);
1111 + struct mtd_part *part = mtd_to_part(mtd);
1112 part->master->_resume(part->master);
1113 }
1114
1115 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1116 {
1117 - struct mtd_part *part = PART(mtd);
1118 + struct mtd_part *part = mtd_to_part(mtd);
1119 ofs += part->offset;
1120 return part->master->_block_isreserved(part->master, ofs);
1121 }
1122
1123 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
1124 {
1125 - struct mtd_part *part = PART(mtd);
1126 + struct mtd_part *part = mtd_to_part(mtd);
1127 ofs += part->offset;
1128 return part->master->_block_isbad(part->master, ofs);
1129 }
1130
1131 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
1132 {
1133 - struct mtd_part *part = PART(mtd);
1134 + struct mtd_part *part = mtd_to_part(mtd);
1135 int res;
1136
1137 ofs += part->offset;
1138 @@ -397,6 +397,27 @@ static int part_block_markbad(struct mtd
1139 return res;
1140 }
1141
1142 +static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
1143 + struct mtd_oob_region *oobregion)
1144 +{
1145 + struct mtd_part *part = mtd_to_part(mtd);
1146 +
1147 + return mtd_ooblayout_ecc(part->master, section, oobregion);
1148 +}
1149 +
1150 +static int part_ooblayout_free(struct mtd_info *mtd, int section,
1151 + struct mtd_oob_region *oobregion)
1152 +{
1153 + struct mtd_part *part = mtd_to_part(mtd);
1154 +
1155 + return mtd_ooblayout_free(part->master, section, oobregion);
1156 +}
1157 +
1158 +static const struct mtd_ooblayout_ops part_ooblayout_ops = {
1159 + .ecc = part_ooblayout_ecc,
1160 + .free = part_ooblayout_free,
1161 +};
1162 +
1163 static inline void free_partition(struct mtd_part *p)
1164 {
1165 kfree(p->mtd.name);
1166 @@ -614,7 +635,7 @@ static struct mtd_part *allocate_partiti
1167 slave->mtd.erasesize = slave->mtd.size;
1168 }
1169
1170 - slave->mtd.ecclayout = master->ecclayout;
1171 + mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
1172 slave->mtd.ecc_step_size = master->ecc_step_size;
1173 slave->mtd.ecc_strength = master->ecc_strength;
1174 slave->mtd.bitflip_threshold = master->bitflip_threshold;
1175 @@ -639,7 +660,7 @@ static ssize_t mtd_partition_offset_show
1176 struct device_attribute *attr, char *buf)
1177 {
1178 struct mtd_info *mtd = dev_get_drvdata(dev);
1179 - struct mtd_part *part = PART(mtd);
1180 + struct mtd_part *part = mtd_to_part(mtd);
1181 return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
1182 }
1183
1184 @@ -677,11 +698,10 @@ int mtd_add_partition(struct mtd_info *m
1185 if (length <= 0)
1186 return -EINVAL;
1187
1188 + memset(&part, 0, sizeof(part));
1189 part.name = name;
1190 part.size = length;
1191 part.offset = offset;
1192 - part.mask_flags = 0;
1193 - part.ecclayout = NULL;
1194
1195 new = allocate_partition(master, &part, -1, offset);
1196 if (IS_ERR(new))
1197 @@ -845,7 +865,7 @@ int add_mtd_partitions(struct mtd_info *
1198 static DEFINE_SPINLOCK(part_parser_lock);
1199 static LIST_HEAD(part_parsers);
1200
1201 -static struct mtd_part_parser *get_partition_parser(const char *name)
1202 +static struct mtd_part_parser *mtd_part_parser_get(const char *name)
1203 {
1204 struct mtd_part_parser *p, *ret = NULL;
1205
1206 @@ -862,7 +882,20 @@ static struct mtd_part_parser *get_parti
1207 return ret;
1208 }
1209
1210 -#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
1211 +static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
1212 +{
1213 + module_put(p->owner);
1214 +}
1215 +
1216 +/*
1217 + * Many partition parsers just expected the core to kfree() all their data in
1218 + * one chunk. Do that by default.
1219 + */
1220 +static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
1221 + int nr_parts)
1222 +{
1223 + kfree(pparts);
1224 +}
1225
1226 static struct mtd_part_parser *
1227 get_partition_parser_by_type(enum mtd_parser_type type,
1228 @@ -874,7 +907,7 @@ get_partition_parser_by_type(enum mtd_pa
1229
1230 p = list_prepare_entry(start, &part_parsers, list);
1231 if (start)
1232 - put_partition_parser(start);
1233 + mtd_part_parser_put(start);
1234
1235 list_for_each_entry_continue(p, &part_parsers, list) {
1236 if (p->type == type && try_module_get(p->owner)) {
1237 @@ -888,13 +921,19 @@ get_partition_parser_by_type(enum mtd_pa
1238 return ret;
1239 }
1240
1241 -void register_mtd_parser(struct mtd_part_parser *p)
1242 -{
1243 +int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
1244 + {
1245 + p->owner = owner;
1246 +
1247 + if (!p->cleanup)
1248 + p->cleanup = &mtd_part_parser_cleanup_default;
1249 +
1250 spin_lock(&part_parser_lock);
1251 list_add(&p->list, &part_parsers);
1252 spin_unlock(&part_parser_lock);
1253 + return 0;
1254 }
1255 -EXPORT_SYMBOL_GPL(register_mtd_parser);
1256 +EXPORT_SYMBOL_GPL(__register_mtd_parser);
1257
1258 void deregister_mtd_parser(struct mtd_part_parser *p)
1259 {
1260 @@ -954,7 +993,7 @@ static const char * const default_mtd_pa
1261 * parse_mtd_partitions - parse MTD partitions
1262 * @master: the master partition (describes whole MTD device)
1263 * @types: names of partition parsers to try or %NULL
1264 - * @pparts: array of partitions found is returned here
1265 + * @pparts: info about partitions found is returned here
1266 * @data: MTD partition parser-specific data
1267 *
1268 * This function tries to find partition on MTD device @master. It uses MTD
1269 @@ -966,45 +1005,42 @@ static const char * const default_mtd_pa
1270 *
1271 * This function may return:
1272 * o a negative error code in case of failure
1273 - * o zero if no partitions were found
1274 - * o a positive number of found partitions, in which case on exit @pparts will
1275 - * point to an array containing this number of &struct mtd_info objects.
1276 + * o zero otherwise, and @pparts will describe the partitions, number of
1277 + * partitions, and the parser which parsed them. Caller must release
1278 + * resources with mtd_part_parser_cleanup() when finished with the returned
1279 + * data.
1280 */
1281 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
1282 - struct mtd_partition **pparts,
1283 + struct mtd_partitions *pparts,
1284 struct mtd_part_parser_data *data)
1285 {
1286 struct mtd_part_parser *parser;
1287 int ret, err = 0;
1288 const char *const *types_of = NULL;
1289
1290 - if (data && data->of_node) {
1291 - types_of = of_get_probes(data->of_node);
1292 - if (types_of != NULL)
1293 - types = types_of;
1294 - }
1295 -
1296 if (!types)
1297 types = default_mtd_part_types;
1298
1299 for ( ; *types; types++) {
1300 pr_debug("%s: parsing partitions %s\n", master->name, *types);
1301 - parser = get_partition_parser(*types);
1302 + parser = mtd_part_parser_get(*types);
1303 if (!parser && !request_module("%s", *types))
1304 - parser = get_partition_parser(*types);
1305 + parser = mtd_part_parser_get(*types);
1306 pr_debug("%s: got parser %s\n", master->name,
1307 parser ? parser->name : NULL);
1308 if (!parser)
1309 continue;
1310 - ret = (*parser->parse_fn)(master, pparts, data);
1311 + ret = (*parser->parse_fn)(master, &pparts->parts, data);
1312 pr_debug("%s: parser %s: %i\n",
1313 master->name, parser->name, ret);
1314 - put_partition_parser(parser);
1315 if (ret > 0) {
1316 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
1317 ret, parser->name, master->name);
1318 - return ret;
1319 + pparts->nr_parts = ret;
1320 + pparts->parser = parser;
1321 + return 0;
1322 }
1323 + mtd_part_parser_put(parser);
1324 /*
1325 * Stash the first error we see; only report it if no parser
1326 * succeeds
1327 @@ -1034,7 +1070,7 @@ int parse_mtd_partitions_by_type(struct
1328 ret = (*parser->parse_fn)(master, pparts, data);
1329
1330 if (ret > 0) {
1331 - put_partition_parser(parser);
1332 + mtd_part_parser_put(parser);
1333 printk(KERN_NOTICE
1334 "%d %s partitions found on MTD device %s\n",
1335 ret, parser->name, master->name);
1336 @@ -1048,6 +1084,22 @@ int parse_mtd_partitions_by_type(struct
1337 }
1338 EXPORT_SYMBOL_GPL(parse_mtd_partitions_by_type);
1339
1340 +void mtd_part_parser_cleanup(struct mtd_partitions *parts)
1341 +{
1342 + const struct mtd_part_parser *parser;
1343 +
1344 + if (!parts)
1345 + return;
1346 +
1347 + parser = parts->parser;
1348 + if (parser) {
1349 + if (parser->cleanup)
1350 + parser->cleanup(parts->parts, parts->nr_parts);
1351 +
1352 + mtd_part_parser_put(parser);
1353 + }
1354 +}
1355 +
1356 int mtd_is_partition(const struct mtd_info *mtd)
1357 {
1358 struct mtd_part *part;
1359 @@ -1070,7 +1122,7 @@ struct mtd_info *mtdpart_get_master(cons
1360 if (!mtd_is_partition(mtd))
1361 return (struct mtd_info *)mtd;
1362
1363 - return PART(mtd)->master;
1364 + return mtd_to_part(mtd)->master;
1365 }
1366 EXPORT_SYMBOL_GPL(mtdpart_get_master);
1367
1368 @@ -1079,7 +1131,7 @@ uint64_t mtdpart_get_offset(const struct
1369 if (!mtd_is_partition(mtd))
1370 return 0;
1371
1372 - return PART(mtd)->offset;
1373 + return mtd_to_part(mtd)->offset;
1374 }
1375 EXPORT_SYMBOL_GPL(mtdpart_get_offset);
1376
1377 @@ -1089,6 +1141,6 @@ uint64_t mtd_get_device_size(const struc
1378 if (!mtd_is_partition(mtd))
1379 return mtd->size;
1380
1381 - return PART(mtd)->master->size;
1382 + return mtd_to_part(mtd)->master->size;
1383 }
1384 EXPORT_SYMBOL_GPL(mtd_get_device_size);
1385 --- a/drivers/mtd/mtdswap.c
1386 +++ b/drivers/mtd/mtdswap.c
1387 @@ -346,7 +346,7 @@ static int mtdswap_read_markers(struct m
1388 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
1389 return MTDSWAP_SCANNED_BAD;
1390
1391 - ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
1392 + ops.ooblen = 2 * d->mtd->oobavail;
1393 ops.oobbuf = d->oob_buf;
1394 ops.ooboffs = 0;
1395 ops.datbuf = NULL;
1396 @@ -359,7 +359,7 @@ static int mtdswap_read_markers(struct m
1397
1398 data = (struct mtdswap_oobdata *)d->oob_buf;
1399 data2 = (struct mtdswap_oobdata *)
1400 - (d->oob_buf + d->mtd->ecclayout->oobavail);
1401 + (d->oob_buf + d->mtd->oobavail);
1402
1403 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
1404 eb->erase_count = le32_to_cpu(data->count);
1405 @@ -933,7 +933,7 @@ static unsigned int mtdswap_eblk_passes(
1406
1407 ops.mode = MTD_OPS_AUTO_OOB;
1408 ops.len = mtd->writesize;
1409 - ops.ooblen = mtd->ecclayout->oobavail;
1410 + ops.ooblen = mtd->oobavail;
1411 ops.ooboffs = 0;
1412 ops.datbuf = d->page_buf;
1413 ops.oobbuf = d->oob_buf;
1414 @@ -945,7 +945,7 @@ static unsigned int mtdswap_eblk_passes(
1415 for (i = 0; i < mtd_pages; i++) {
1416 patt = mtdswap_test_patt(test + i);
1417 memset(d->page_buf, patt, mtd->writesize);
1418 - memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
1419 + memset(d->oob_buf, patt, mtd->oobavail);
1420 ret = mtd_write_oob(mtd, pos, &ops);
1421 if (ret)
1422 goto error;
1423 @@ -964,7 +964,7 @@ static unsigned int mtdswap_eblk_passes(
1424 if (p1[j] != patt)
1425 goto error;
1426
1427 - for (j = 0; j < mtd->ecclayout->oobavail; j++)
1428 + for (j = 0; j < mtd->oobavail; j++)
1429 if (p2[j] != (unsigned char)patt)
1430 goto error;
1431
1432 @@ -1387,7 +1387,7 @@ static int mtdswap_init(struct mtdswap_d
1433 if (!d->page_buf)
1434 goto page_buf_fail;
1435
1436 - d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
1437 + d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL);
1438 if (!d->oob_buf)
1439 goto oob_buf_fail;
1440
1441 @@ -1417,7 +1417,6 @@ static void mtdswap_add_mtd(struct mtd_b
1442 unsigned long part;
1443 unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
1444 uint64_t swap_size, use_size, size_limit;
1445 - struct nand_ecclayout *oinfo;
1446 int ret;
1447
1448 parts = &partitions[0];
1449 @@ -1447,17 +1446,10 @@ static void mtdswap_add_mtd(struct mtd_b
1450 return;
1451 }
1452
1453 - oinfo = mtd->ecclayout;
1454 - if (!oinfo) {
1455 - printk(KERN_ERR "%s: mtd%d does not have OOB\n",
1456 - MTDSWAP_PREFIX, mtd->index);
1457 - return;
1458 - }
1459 -
1460 - if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
1461 + if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
1462 printk(KERN_ERR "%s: Not enough free bytes in OOB, "
1463 "%d available, %zu needed.\n",
1464 - MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
1465 + MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
1466 return;
1467 }
1468
1469 --- a/drivers/mtd/nand/Kconfig
1470 +++ b/drivers/mtd/nand/Kconfig
1471 @@ -55,7 +55,7 @@ config MTD_NAND_DENALI_PCI
1472 config MTD_NAND_DENALI_DT
1473 tristate "Support Denali NAND controller as a DT device"
1474 select MTD_NAND_DENALI
1475 - depends on HAS_DMA && HAVE_CLK
1476 + depends on HAS_DMA && HAVE_CLK && OF
1477 help
1478 Enable the driver for NAND flash on platforms using a Denali NAND
1479 controller as a DT device.
1480 @@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
1481 config MTD_NAND_GPIO
1482 tristate "GPIO assisted NAND Flash driver"
1483 depends on GPIOLIB || COMPILE_TEST
1484 + depends on HAS_IOMEM
1485 help
1486 This enables a NAND flash driver where control signals are
1487 connected to GPIO pins, and commands and data are communicated
1488 @@ -310,6 +311,7 @@ config MTD_NAND_CAFE
1489 config MTD_NAND_CS553X
1490 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
1491 depends on X86_32
1492 + depends on !UML && HAS_IOMEM
1493 help
1494 The CS553x companion chips for the AMD Geode processor
1495 include NAND flash controllers with built-in hardware ECC
1496 @@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC
1497 config MTD_NAND_VF610_NFC
1498 tristate "Support for Freescale NFC for VF610/MPC5125"
1499 depends on (SOC_VF610 || COMPILE_TEST)
1500 + depends on HAS_IOMEM
1501 help
1502 Enables support for NAND Flash Controller on some Freescale
1503 processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
1504 @@ -480,7 +483,7 @@ config MTD_NAND_MXC
1505
1506 config MTD_NAND_SH_FLCTL
1507 tristate "Support for NAND on Renesas SuperH FLCTL"
1508 - depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
1509 + depends on SUPERH || COMPILE_TEST
1510 depends on HAS_IOMEM
1511 depends on HAS_DMA
1512 help
1513 @@ -519,6 +522,13 @@ config MTD_NAND_JZ4740
1514 help
1515 Enables support for NAND Flash on JZ4740 SoC based boards.
1516
1517 +config MTD_NAND_JZ4780
1518 + tristate "Support for NAND on JZ4780 SoC"
1519 + depends on MACH_JZ4780 && JZ4780_NEMC
1520 + help
1521 + Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
1522 + based boards, using the BCH controller for hardware error correction.
1523 +
1524 config MTD_NAND_FSMC
1525 tristate "Support for NAND on ST Micros FSMC"
1526 depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
1527 @@ -546,4 +556,11 @@ config MTD_NAND_HISI504
1528 help
1529 Enables support for NAND controller on Hisilicon SoC Hip04.
1530
1531 +config MTD_NAND_QCOM
1532 + tristate "Support for NAND on QCOM SoCs"
1533 + depends on ARCH_QCOM
1534 + help
1535 + Enables support for NAND flash chips on SoCs containing the EBI2 NAND
1536 + controller. This controller is found on IPQ806x SoC.
1537 +
1538 endif # MTD_NAND
1539 --- a/drivers/mtd/nand/Makefile
1540 +++ b/drivers/mtd/nand/Makefile
1541 @@ -49,11 +49,13 @@ obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mp
1542 obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
1543 obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
1544 obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
1545 +obj-$(CONFIG_MTD_NAND_JZ4780) += jz4780_nand.o jz4780_bch.o
1546 obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
1547 obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
1548 obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
1549 obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
1550 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
1551 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
1552 +obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
1553
1554 nand-objs := nand_base.o nand_bbt.o nand_timings.o
1555 --- a/drivers/mtd/nand/nand_base.c
1556 +++ b/drivers/mtd/nand/nand_base.c
1557 @@ -48,50 +48,6 @@
1558 #include <linux/mtd/partitions.h>
1559 #include <linux/of_mtd.h>
1560
1561 -/* Define default oob placement schemes for large and small page devices */
1562 -static struct nand_ecclayout nand_oob_8 = {
1563 - .eccbytes = 3,
1564 - .eccpos = {0, 1, 2},
1565 - .oobfree = {
1566 - {.offset = 3,
1567 - .length = 2},
1568 - {.offset = 6,
1569 - .length = 2} }
1570 -};
1571 -
1572 -static struct nand_ecclayout nand_oob_16 = {
1573 - .eccbytes = 6,
1574 - .eccpos = {0, 1, 2, 3, 6, 7},
1575 - .oobfree = {
1576 - {.offset = 8,
1577 - . length = 8} }
1578 -};
1579 -
1580 -static struct nand_ecclayout nand_oob_64 = {
1581 - .eccbytes = 24,
1582 - .eccpos = {
1583 - 40, 41, 42, 43, 44, 45, 46, 47,
1584 - 48, 49, 50, 51, 52, 53, 54, 55,
1585 - 56, 57, 58, 59, 60, 61, 62, 63},
1586 - .oobfree = {
1587 - {.offset = 2,
1588 - .length = 38} }
1589 -};
1590 -
1591 -static struct nand_ecclayout nand_oob_128 = {
1592 - .eccbytes = 48,
1593 - .eccpos = {
1594 - 80, 81, 82, 83, 84, 85, 86, 87,
1595 - 88, 89, 90, 91, 92, 93, 94, 95,
1596 - 96, 97, 98, 99, 100, 101, 102, 103,
1597 - 104, 105, 106, 107, 108, 109, 110, 111,
1598 - 112, 113, 114, 115, 116, 117, 118, 119,
1599 - 120, 121, 122, 123, 124, 125, 126, 127},
1600 - .oobfree = {
1601 - {.offset = 2,
1602 - .length = 78} }
1603 -};
1604 -
1605 static int nand_get_device(struct mtd_info *mtd, int new_state);
1606
1607 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
1608 @@ -103,10 +59,96 @@ static int nand_do_write_oob(struct mtd_
1609 */
1610 DEFINE_LED_TRIGGER(nand_led_trigger);
1611
1612 +/* Define default oob placement schemes for large and small page devices */
1613 +static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
1614 + struct mtd_oob_region *oobregion)
1615 +{
1616 + struct nand_chip *chip = mtd_to_nand(mtd);
1617 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1618 +
1619 + if (section > 1)
1620 + return -ERANGE;
1621 +
1622 + if (!section) {
1623 + oobregion->offset = 0;
1624 + oobregion->length = 4;
1625 + } else {
1626 + oobregion->offset = 6;
1627 + oobregion->length = ecc->total - 4;
1628 + }
1629 +
1630 + return 0;
1631 +}
1632 +
1633 +static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
1634 + struct mtd_oob_region *oobregion)
1635 +{
1636 + if (section > 1)
1637 + return -ERANGE;
1638 +
1639 + if (mtd->oobsize == 16) {
1640 + if (section)
1641 + return -ERANGE;
1642 +
1643 + oobregion->length = 8;
1644 + oobregion->offset = 8;
1645 + } else {
1646 + oobregion->length = 2;
1647 + if (!section)
1648 + oobregion->offset = 3;
1649 + else
1650 + oobregion->offset = 6;
1651 + }
1652 +
1653 + return 0;
1654 +}
1655 +
1656 +const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
1657 + .ecc = nand_ooblayout_ecc_sp,
1658 + .free = nand_ooblayout_free_sp,
1659 +};
1660 +EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
1661 +
1662 +static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
1663 + struct mtd_oob_region *oobregion)
1664 +{
1665 + struct nand_chip *chip = mtd_to_nand(mtd);
1666 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1667 +
1668 + if (section)
1669 + return -ERANGE;
1670 +
1671 + oobregion->length = ecc->total;
1672 + oobregion->offset = mtd->oobsize - oobregion->length;
1673 +
1674 + return 0;
1675 +}
1676 +
1677 +static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
1678 + struct mtd_oob_region *oobregion)
1679 +{
1680 + struct nand_chip *chip = mtd_to_nand(mtd);
1681 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1682 +
1683 + if (section)
1684 + return -ERANGE;
1685 +
1686 + oobregion->length = mtd->oobsize - ecc->total - 2;
1687 + oobregion->offset = 2;
1688 +
1689 + return 0;
1690 +}
1691 +
1692 +const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
1693 + .ecc = nand_ooblayout_ecc_lp,
1694 + .free = nand_ooblayout_free_lp,
1695 +};
1696 +EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
1697 +
1698 static int check_offs_len(struct mtd_info *mtd,
1699 loff_t ofs, uint64_t len)
1700 {
1701 - struct nand_chip *chip = mtd->priv;
1702 + struct nand_chip *chip = mtd_to_nand(mtd);
1703 int ret = 0;
1704
1705 /* Start address must align on block boundary */
1706 @@ -132,7 +174,7 @@ static int check_offs_len(struct mtd_inf
1707 */
1708 static void nand_release_device(struct mtd_info *mtd)
1709 {
1710 - struct nand_chip *chip = mtd->priv;
1711 + struct nand_chip *chip = mtd_to_nand(mtd);
1712
1713 /* Release the controller and the chip */
1714 spin_lock(&chip->controller->lock);
1715 @@ -150,7 +192,7 @@ static void nand_release_device(struct m
1716 */
1717 static uint8_t nand_read_byte(struct mtd_info *mtd)
1718 {
1719 - struct nand_chip *chip = mtd->priv;
1720 + struct nand_chip *chip = mtd_to_nand(mtd);
1721 return readb(chip->IO_ADDR_R);
1722 }
1723
1724 @@ -163,7 +205,7 @@ static uint8_t nand_read_byte(struct mtd
1725 */
1726 static uint8_t nand_read_byte16(struct mtd_info *mtd)
1727 {
1728 - struct nand_chip *chip = mtd->priv;
1729 + struct nand_chip *chip = mtd_to_nand(mtd);
1730 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
1731 }
1732
1733 @@ -175,7 +217,7 @@ static uint8_t nand_read_byte16(struct m
1734 */
1735 static u16 nand_read_word(struct mtd_info *mtd)
1736 {
1737 - struct nand_chip *chip = mtd->priv;
1738 + struct nand_chip *chip = mtd_to_nand(mtd);
1739 return readw(chip->IO_ADDR_R);
1740 }
1741
1742 @@ -188,7 +230,7 @@ static u16 nand_read_word(struct mtd_inf
1743 */
1744 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
1745 {
1746 - struct nand_chip *chip = mtd->priv;
1747 + struct nand_chip *chip = mtd_to_nand(mtd);
1748
1749 switch (chipnr) {
1750 case -1:
1751 @@ -211,7 +253,7 @@ static void nand_select_chip(struct mtd_
1752 */
1753 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
1754 {
1755 - struct nand_chip *chip = mtd->priv;
1756 + struct nand_chip *chip = mtd_to_nand(mtd);
1757
1758 chip->write_buf(mtd, &byte, 1);
1759 }
1760 @@ -225,7 +267,7 @@ static void nand_write_byte(struct mtd_i
1761 */
1762 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
1763 {
1764 - struct nand_chip *chip = mtd->priv;
1765 + struct nand_chip *chip = mtd_to_nand(mtd);
1766 uint16_t word = byte;
1767
1768 /*
1769 @@ -257,7 +299,7 @@ static void nand_write_byte16(struct mtd
1770 */
1771 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
1772 {
1773 - struct nand_chip *chip = mtd->priv;
1774 + struct nand_chip *chip = mtd_to_nand(mtd);
1775
1776 iowrite8_rep(chip->IO_ADDR_W, buf, len);
1777 }
1778 @@ -272,7 +314,7 @@ static void nand_write_buf(struct mtd_in
1779 */
1780 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1781 {
1782 - struct nand_chip *chip = mtd->priv;
1783 + struct nand_chip *chip = mtd_to_nand(mtd);
1784
1785 ioread8_rep(chip->IO_ADDR_R, buf, len);
1786 }
1787 @@ -287,7 +329,7 @@ static void nand_read_buf(struct mtd_inf
1788 */
1789 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
1790 {
1791 - struct nand_chip *chip = mtd->priv;
1792 + struct nand_chip *chip = mtd_to_nand(mtd);
1793 u16 *p = (u16 *) buf;
1794
1795 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
1796 @@ -303,7 +345,7 @@ static void nand_write_buf16(struct mtd_
1797 */
1798 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
1799 {
1800 - struct nand_chip *chip = mtd->priv;
1801 + struct nand_chip *chip = mtd_to_nand(mtd);
1802 u16 *p = (u16 *) buf;
1803
1804 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
1805 @@ -313,14 +355,13 @@ static void nand_read_buf16(struct mtd_i
1806 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
1807 * @mtd: MTD device structure
1808 * @ofs: offset from device start
1809 - * @getchip: 0, if the chip is already selected
1810 *
1811 * Check, if the block is bad.
1812 */
1813 -static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
1814 +static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1815 {
1816 - int page, chipnr, res = 0, i = 0;
1817 - struct nand_chip *chip = mtd->priv;
1818 + int page, res = 0, i = 0;
1819 + struct nand_chip *chip = mtd_to_nand(mtd);
1820 u16 bad;
1821
1822 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
1823 @@ -328,15 +369,6 @@ static int nand_block_bad(struct mtd_inf
1824
1825 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1826
1827 - if (getchip) {
1828 - chipnr = (int)(ofs >> chip->chip_shift);
1829 -
1830 - nand_get_device(mtd, FL_READING);
1831 -
1832 - /* Select the NAND device */
1833 - chip->select_chip(mtd, chipnr);
1834 - }
1835 -
1836 do {
1837 if (chip->options & NAND_BUSWIDTH_16) {
1838 chip->cmdfunc(mtd, NAND_CMD_READOOB,
1839 @@ -361,11 +393,6 @@ static int nand_block_bad(struct mtd_inf
1840 i++;
1841 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
1842
1843 - if (getchip) {
1844 - chip->select_chip(mtd, -1);
1845 - nand_release_device(mtd);
1846 - }
1847 -
1848 return res;
1849 }
1850
1851 @@ -380,7 +407,7 @@ static int nand_block_bad(struct mtd_inf
1852 */
1853 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
1854 {
1855 - struct nand_chip *chip = mtd->priv;
1856 + struct nand_chip *chip = mtd_to_nand(mtd);
1857 struct mtd_oob_ops ops;
1858 uint8_t buf[2] = { 0, 0 };
1859 int ret = 0, res, i = 0;
1860 @@ -430,7 +457,7 @@ static int nand_default_block_markbad(st
1861 */
1862 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
1863 {
1864 - struct nand_chip *chip = mtd->priv;
1865 + struct nand_chip *chip = mtd_to_nand(mtd);
1866 int res, ret = 0;
1867
1868 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
1869 @@ -471,7 +498,7 @@ static int nand_block_markbad_lowlevel(s
1870 */
1871 static int nand_check_wp(struct mtd_info *mtd)
1872 {
1873 - struct nand_chip *chip = mtd->priv;
1874 + struct nand_chip *chip = mtd_to_nand(mtd);
1875
1876 /* Broken xD cards report WP despite being writable */
1877 if (chip->options & NAND_BROKEN_XD)
1878 @@ -491,7 +518,7 @@ static int nand_check_wp(struct mtd_info
1879 */
1880 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1881 {
1882 - struct nand_chip *chip = mtd->priv;
1883 + struct nand_chip *chip = mtd_to_nand(mtd);
1884
1885 if (!chip->bbt)
1886 return 0;
1887 @@ -503,19 +530,17 @@ static int nand_block_isreserved(struct
1888 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
1889 * @mtd: MTD device structure
1890 * @ofs: offset from device start
1891 - * @getchip: 0, if the chip is already selected
1892 * @allowbbt: 1, if its allowed to access the bbt area
1893 *
1894 * Check, if the block is bad. Either by reading the bad block table or
1895 * calling of the scan function.
1896 */
1897 -static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
1898 - int allowbbt)
1899 +static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
1900 {
1901 - struct nand_chip *chip = mtd->priv;
1902 + struct nand_chip *chip = mtd_to_nand(mtd);
1903
1904 if (!chip->bbt)
1905 - return chip->block_bad(mtd, ofs, getchip);
1906 + return chip->block_bad(mtd, ofs);
1907
1908 /* Return info from the table */
1909 return nand_isbad_bbt(mtd, ofs, allowbbt);
1910 @@ -531,7 +556,7 @@ static int nand_block_checkbad(struct mt
1911 */
1912 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
1913 {
1914 - struct nand_chip *chip = mtd->priv;
1915 + struct nand_chip *chip = mtd_to_nand(mtd);
1916 int i;
1917
1918 /* Wait for the device to get ready */
1919 @@ -551,7 +576,7 @@ static void panic_nand_wait_ready(struct
1920 */
1921 void nand_wait_ready(struct mtd_info *mtd)
1922 {
1923 - struct nand_chip *chip = mtd->priv;
1924 + struct nand_chip *chip = mtd_to_nand(mtd);
1925 unsigned long timeo = 400;
1926
1927 if (in_interrupt() || oops_in_progress)
1928 @@ -566,8 +591,8 @@ void nand_wait_ready(struct mtd_info *mt
1929 cond_resched();
1930 } while (time_before(jiffies, timeo));
1931
1932 - pr_warn_ratelimited(
1933 - "timeout while waiting for chip to become ready\n");
1934 + if (!chip->dev_ready(mtd))
1935 + pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
1936 out:
1937 led_trigger_event(nand_led_trigger, LED_OFF);
1938 }
1939 @@ -582,7 +607,7 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
1940 */
1941 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
1942 {
1943 - register struct nand_chip *chip = mtd->priv;
1944 + register struct nand_chip *chip = mtd_to_nand(mtd);
1945
1946 timeo = jiffies + msecs_to_jiffies(timeo);
1947 do {
1948 @@ -605,7 +630,7 @@ static void nand_wait_status_ready(struc
1949 static void nand_command(struct mtd_info *mtd, unsigned int command,
1950 int column, int page_addr)
1951 {
1952 - register struct nand_chip *chip = mtd->priv;
1953 + register struct nand_chip *chip = mtd_to_nand(mtd);
1954 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
1955
1956 /* Write out the command to the device */
1957 @@ -708,7 +733,7 @@ static void nand_command(struct mtd_info
1958 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
1959 int column, int page_addr)
1960 {
1961 - register struct nand_chip *chip = mtd->priv;
1962 + register struct nand_chip *chip = mtd_to_nand(mtd);
1963
1964 /* Emulate NAND_CMD_READOOB */
1965 if (command == NAND_CMD_READOOB) {
1966 @@ -832,7 +857,7 @@ static void panic_nand_get_device(struct
1967 static int
1968 nand_get_device(struct mtd_info *mtd, int new_state)
1969 {
1970 - struct nand_chip *chip = mtd->priv;
1971 + struct nand_chip *chip = mtd_to_nand(mtd);
1972 spinlock_t *lock = &chip->controller->lock;
1973 wait_queue_head_t *wq = &chip->controller->wq;
1974 DECLARE_WAITQUEUE(wait, current);
1975 @@ -952,7 +977,7 @@ static int __nand_unlock(struct mtd_info
1976 {
1977 int ret = 0;
1978 int status, page;
1979 - struct nand_chip *chip = mtd->priv;
1980 + struct nand_chip *chip = mtd_to_nand(mtd);
1981
1982 /* Submit address of first page to unlock */
1983 page = ofs >> chip->page_shift;
1984 @@ -987,7 +1012,7 @@ int nand_unlock(struct mtd_info *mtd, lo
1985 {
1986 int ret = 0;
1987 int chipnr;
1988 - struct nand_chip *chip = mtd->priv;
1989 + struct nand_chip *chip = mtd_to_nand(mtd);
1990
1991 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1992 __func__, (unsigned long long)ofs, len);
1993 @@ -1050,7 +1075,7 @@ int nand_lock(struct mtd_info *mtd, loff
1994 {
1995 int ret = 0;
1996 int chipnr, status, page;
1997 - struct nand_chip *chip = mtd->priv;
1998 + struct nand_chip *chip = mtd_to_nand(mtd);
1999
2000 pr_debug("%s: start = 0x%012llx, len = %llu\n",
2001 __func__, (unsigned long long)ofs, len);
2002 @@ -1309,13 +1334,12 @@ static int nand_read_page_raw_syndrome(s
2003 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2004 uint8_t *buf, int oob_required, int page)
2005 {
2006 - int i, eccsize = chip->ecc.size;
2007 + int i, eccsize = chip->ecc.size, ret;
2008 int eccbytes = chip->ecc.bytes;
2009 int eccsteps = chip->ecc.steps;
2010 uint8_t *p = buf;
2011 uint8_t *ecc_calc = chip->buffers->ecccalc;
2012 uint8_t *ecc_code = chip->buffers->ecccode;
2013 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2014 unsigned int max_bitflips = 0;
2015
2016 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
2017 @@ -1323,8 +1347,10 @@ static int nand_read_page_swecc(struct m
2018 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2019 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2020
2021 - for (i = 0; i < chip->ecc.total; i++)
2022 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2023 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2024 + chip->ecc.total);
2025 + if (ret)
2026 + return ret;
2027
2028 eccsteps = chip->ecc.steps;
2029 p = buf;
2030 @@ -1356,14 +1382,14 @@ static int nand_read_subpage(struct mtd_
2031 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
2032 int page)
2033 {
2034 - int start_step, end_step, num_steps;
2035 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2036 + int start_step, end_step, num_steps, ret;
2037 uint8_t *p;
2038 int data_col_addr, i, gaps = 0;
2039 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2040 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2041 - int index;
2042 + int index, section = 0;
2043 unsigned int max_bitflips = 0;
2044 + struct mtd_oob_region oobregion = { };
2045
2046 /* Column address within the page aligned to ECC size (256bytes) */
2047 start_step = data_offs / chip->ecc.size;
2048 @@ -1391,12 +1417,13 @@ static int nand_read_subpage(struct mtd_
2049 * The performance is faster if we position offsets according to
2050 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2051 */
2052 - for (i = 0; i < eccfrag_len - 1; i++) {
2053 - if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
2054 - gaps = 1;
2055 - break;
2056 - }
2057 - }
2058 + ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2059 + if (ret)
2060 + return ret;
2061 +
2062 + if (oobregion.length < eccfrag_len)
2063 + gaps = 1;
2064 +
2065 if (gaps) {
2066 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
2067 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2068 @@ -1405,20 +1432,23 @@ static int nand_read_subpage(struct mtd_
2069 * Send the command to read the particular ECC bytes take care
2070 * about buswidth alignment in read_buf.
2071 */
2072 - aligned_pos = eccpos[index] & ~(busw - 1);
2073 + aligned_pos = oobregion.offset & ~(busw - 1);
2074 aligned_len = eccfrag_len;
2075 - if (eccpos[index] & (busw - 1))
2076 + if (oobregion.offset & (busw - 1))
2077 aligned_len++;
2078 - if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
2079 + if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2080 + (busw - 1))
2081 aligned_len++;
2082
2083 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
2084 - mtd->writesize + aligned_pos, -1);
2085 + mtd->writesize + aligned_pos, -1);
2086 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
2087 }
2088
2089 - for (i = 0; i < eccfrag_len; i++)
2090 - chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
2091 + ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
2092 + chip->oob_poi, index, eccfrag_len);
2093 + if (ret)
2094 + return ret;
2095
2096 p = bufpoi + data_col_addr;
2097 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2098 @@ -1426,6 +1456,16 @@ static int nand_read_subpage(struct mtd_
2099
2100 stat = chip->ecc.correct(mtd, p,
2101 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
2102 + if (stat == -EBADMSG &&
2103 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2104 + /* check for empty pages with bitflips */
2105 + stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2106 + &chip->buffers->ecccode[i],
2107 + chip->ecc.bytes,
2108 + NULL, 0,
2109 + chip->ecc.strength);
2110 + }
2111 +
2112 if (stat < 0) {
2113 mtd->ecc_stats.failed++;
2114 } else {
2115 @@ -1449,13 +1489,12 @@ static int nand_read_subpage(struct mtd_
2116 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2117 uint8_t *buf, int oob_required, int page)
2118 {
2119 - int i, eccsize = chip->ecc.size;
2120 + int i, eccsize = chip->ecc.size, ret;
2121 int eccbytes = chip->ecc.bytes;
2122 int eccsteps = chip->ecc.steps;
2123 uint8_t *p = buf;
2124 uint8_t *ecc_calc = chip->buffers->ecccalc;
2125 uint8_t *ecc_code = chip->buffers->ecccode;
2126 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2127 unsigned int max_bitflips = 0;
2128
2129 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2130 @@ -1465,8 +1504,10 @@ static int nand_read_page_hwecc(struct m
2131 }
2132 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2133
2134 - for (i = 0; i < chip->ecc.total; i++)
2135 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2136 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2137 + chip->ecc.total);
2138 + if (ret)
2139 + return ret;
2140
2141 eccsteps = chip->ecc.steps;
2142 p = buf;
2143 @@ -1475,6 +1516,15 @@ static int nand_read_page_hwecc(struct m
2144 int stat;
2145
2146 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
2147 + if (stat == -EBADMSG &&
2148 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2149 + /* check for empty pages with bitflips */
2150 + stat = nand_check_erased_ecc_chunk(p, eccsize,
2151 + &ecc_code[i], eccbytes,
2152 + NULL, 0,
2153 + chip->ecc.strength);
2154 + }
2155 +
2156 if (stat < 0) {
2157 mtd->ecc_stats.failed++;
2158 } else {
2159 @@ -1502,12 +1552,11 @@ static int nand_read_page_hwecc(struct m
2160 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
2161 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
2162 {
2163 - int i, eccsize = chip->ecc.size;
2164 + int i, eccsize = chip->ecc.size, ret;
2165 int eccbytes = chip->ecc.bytes;
2166 int eccsteps = chip->ecc.steps;
2167 uint8_t *p = buf;
2168 uint8_t *ecc_code = chip->buffers->ecccode;
2169 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2170 uint8_t *ecc_calc = chip->buffers->ecccalc;
2171 unsigned int max_bitflips = 0;
2172
2173 @@ -1516,8 +1565,10 @@ static int nand_read_page_hwecc_oob_firs
2174 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2175 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
2176
2177 - for (i = 0; i < chip->ecc.total; i++)
2178 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2179 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2180 + chip->ecc.total);
2181 + if (ret)
2182 + return ret;
2183
2184 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2185 int stat;
2186 @@ -1527,6 +1578,15 @@ static int nand_read_page_hwecc_oob_firs
2187 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2188
2189 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
2190 + if (stat == -EBADMSG &&
2191 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2192 + /* check for empty pages with bitflips */
2193 + stat = nand_check_erased_ecc_chunk(p, eccsize,
2194 + &ecc_code[i], eccbytes,
2195 + NULL, 0,
2196 + chip->ecc.strength);
2197 + }
2198 +
2199 if (stat < 0) {
2200 mtd->ecc_stats.failed++;
2201 } else {
2202 @@ -1554,6 +1614,7 @@ static int nand_read_page_syndrome(struc
2203 int i, eccsize = chip->ecc.size;
2204 int eccbytes = chip->ecc.bytes;
2205 int eccsteps = chip->ecc.steps;
2206 + int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2207 uint8_t *p = buf;
2208 uint8_t *oob = chip->oob_poi;
2209 unsigned int max_bitflips = 0;
2210 @@ -1573,19 +1634,29 @@ static int nand_read_page_syndrome(struc
2211 chip->read_buf(mtd, oob, eccbytes);
2212 stat = chip->ecc.correct(mtd, p, oob, NULL);
2213
2214 - if (stat < 0) {
2215 - mtd->ecc_stats.failed++;
2216 - } else {
2217 - mtd->ecc_stats.corrected += stat;
2218 - max_bitflips = max_t(unsigned int, max_bitflips, stat);
2219 - }
2220 -
2221 oob += eccbytes;
2222
2223 if (chip->ecc.postpad) {
2224 chip->read_buf(mtd, oob, chip->ecc.postpad);
2225 oob += chip->ecc.postpad;
2226 }
2227 +
2228 + if (stat == -EBADMSG &&
2229 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2230 + /* check for empty pages with bitflips */
2231 + stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2232 + oob - eccpadbytes,
2233 + eccpadbytes,
2234 + NULL, 0,
2235 + chip->ecc.strength);
2236 + }
2237 +
2238 + if (stat < 0) {
2239 + mtd->ecc_stats.failed++;
2240 + } else {
2241 + mtd->ecc_stats.corrected += stat;
2242 + max_bitflips = max_t(unsigned int, max_bitflips, stat);
2243 + }
2244 }
2245
2246 /* Calculate remaining oob bytes */
2247 @@ -1598,14 +1669,17 @@ static int nand_read_page_syndrome(struc
2248
2249 /**
2250 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
2251 - * @chip: nand chip structure
2252 + * @mtd: mtd info structure
2253 * @oob: oob destination address
2254 * @ops: oob ops structure
2255 * @len: size of oob to transfer
2256 */
2257 -static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
2258 +static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
2259 struct mtd_oob_ops *ops, size_t len)
2260 {
2261 + struct nand_chip *chip = mtd_to_nand(mtd);
2262 + int ret;
2263 +
2264 switch (ops->mode) {
2265
2266 case MTD_OPS_PLACE_OOB:
2267 @@ -1613,31 +1687,12 @@ static uint8_t *nand_transfer_oob(struct
2268 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
2269 return oob + len;
2270
2271 - case MTD_OPS_AUTO_OOB: {
2272 - struct nand_oobfree *free = chip->ecc.layout->oobfree;
2273 - uint32_t boffs = 0, roffs = ops->ooboffs;
2274 - size_t bytes = 0;
2275 -
2276 - for (; free->length && len; free++, len -= bytes) {
2277 - /* Read request not from offset 0? */
2278 - if (unlikely(roffs)) {
2279 - if (roffs >= free->length) {
2280 - roffs -= free->length;
2281 - continue;
2282 - }
2283 - boffs = free->offset + roffs;
2284 - bytes = min_t(size_t, len,
2285 - (free->length - roffs));
2286 - roffs = 0;
2287 - } else {
2288 - bytes = min_t(size_t, len, free->length);
2289 - boffs = free->offset;
2290 - }
2291 - memcpy(oob, chip->oob_poi + boffs, bytes);
2292 - oob += bytes;
2293 - }
2294 - return oob;
2295 - }
2296 + case MTD_OPS_AUTO_OOB:
2297 + ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
2298 + ops->ooboffs, len);
2299 + BUG_ON(ret);
2300 + return oob + len;
2301 +
2302 default:
2303 BUG();
2304 }
2305 @@ -1655,7 +1710,7 @@ static uint8_t *nand_transfer_oob(struct
2306 */
2307 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
2308 {
2309 - struct nand_chip *chip = mtd->priv;
2310 + struct nand_chip *chip = mtd_to_nand(mtd);
2311
2312 pr_debug("setting READ RETRY mode %d\n", retry_mode);
2313
2314 @@ -1680,12 +1735,11 @@ static int nand_do_read_ops(struct mtd_i
2315 struct mtd_oob_ops *ops)
2316 {
2317 int chipnr, page, realpage, col, bytes, aligned, oob_required;
2318 - struct nand_chip *chip = mtd->priv;
2319 + struct nand_chip *chip = mtd_to_nand(mtd);
2320 int ret = 0;
2321 uint32_t readlen = ops->len;
2322 uint32_t oobreadlen = ops->ooblen;
2323 - uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
2324 - mtd->oobavail : mtd->oobsize;
2325 + uint32_t max_oobsize = mtd_oobavail(mtd, ops);
2326
2327 uint8_t *bufpoi, *oob, *buf;
2328 int use_bufpoi;
2329 @@ -1772,7 +1826,7 @@ read_retry:
2330 int toread = min(oobreadlen, max_oobsize);
2331
2332 if (toread) {
2333 - oob = nand_transfer_oob(chip,
2334 + oob = nand_transfer_oob(mtd,
2335 oob, ops, toread);
2336 oobreadlen -= toread;
2337 }
2338 @@ -2024,7 +2078,7 @@ static int nand_do_read_oob(struct mtd_i
2339 struct mtd_oob_ops *ops)
2340 {
2341 int page, realpage, chipnr;
2342 - struct nand_chip *chip = mtd->priv;
2343 + struct nand_chip *chip = mtd_to_nand(mtd);
2344 struct mtd_ecc_stats stats;
2345 int readlen = ops->ooblen;
2346 int len;
2347 @@ -2036,10 +2090,7 @@ static int nand_do_read_oob(struct mtd_i
2348
2349 stats = mtd->ecc_stats;
2350
2351 - if (ops->mode == MTD_OPS_AUTO_OOB)
2352 - len = chip->ecc.layout->oobavail;
2353 - else
2354 - len = mtd->oobsize;
2355 + len = mtd_oobavail(mtd, ops);
2356
2357 if (unlikely(ops->ooboffs >= len)) {
2358 pr_debug("%s: attempt to start read outside oob\n",
2359 @@ -2073,7 +2124,7 @@ static int nand_do_read_oob(struct mtd_i
2360 break;
2361
2362 len = min(len, readlen);
2363 - buf = nand_transfer_oob(chip, buf, ops, len);
2364 + buf = nand_transfer_oob(mtd, buf, ops, len);
2365
2366 if (chip->options & NAND_NEED_READRDY) {
2367 /* Apply delay or wait for ready/busy pin */
2368 @@ -2232,19 +2283,20 @@ static int nand_write_page_swecc(struct
2369 const uint8_t *buf, int oob_required,
2370 int page)
2371 {
2372 - int i, eccsize = chip->ecc.size;
2373 + int i, eccsize = chip->ecc.size, ret;
2374 int eccbytes = chip->ecc.bytes;
2375 int eccsteps = chip->ecc.steps;
2376 uint8_t *ecc_calc = chip->buffers->ecccalc;
2377 const uint8_t *p = buf;
2378 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2379
2380 /* Software ECC calculation */
2381 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2382 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2383
2384 - for (i = 0; i < chip->ecc.total; i++)
2385 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2386 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2387 + chip->ecc.total);
2388 + if (ret)
2389 + return ret;
2390
2391 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2392 }
2393 @@ -2261,12 +2313,11 @@ static int nand_write_page_hwecc(struct
2394 const uint8_t *buf, int oob_required,
2395 int page)
2396 {
2397 - int i, eccsize = chip->ecc.size;
2398 + int i, eccsize = chip->ecc.size, ret;
2399 int eccbytes = chip->ecc.bytes;
2400 int eccsteps = chip->ecc.steps;
2401 uint8_t *ecc_calc = chip->buffers->ecccalc;
2402 const uint8_t *p = buf;
2403 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2404
2405 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2406 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2407 @@ -2274,8 +2325,10 @@ static int nand_write_page_hwecc(struct
2408 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2409 }
2410
2411 - for (i = 0; i < chip->ecc.total; i++)
2412 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2413 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2414 + chip->ecc.total);
2415 + if (ret)
2416 + return ret;
2417
2418 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2419
2420 @@ -2303,11 +2356,10 @@ static int nand_write_subpage_hwecc(stru
2421 int ecc_size = chip->ecc.size;
2422 int ecc_bytes = chip->ecc.bytes;
2423 int ecc_steps = chip->ecc.steps;
2424 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2425 uint32_t start_step = offset / ecc_size;
2426 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2427 int oob_bytes = mtd->oobsize / ecc_steps;
2428 - int step, i;
2429 + int step, ret;
2430
2431 for (step = 0; step < ecc_steps; step++) {
2432 /* configure controller for WRITE access */
2433 @@ -2335,8 +2387,10 @@ static int nand_write_subpage_hwecc(stru
2434 /* copy calculated ECC for whole page to chip->buffer->oob */
2435 /* this include masked-value(0xFF) for unwritten subpages */
2436 ecc_calc = chip->buffers->ecccalc;
2437 - for (i = 0; i < chip->ecc.total; i++)
2438 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2439 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2440 + chip->ecc.total);
2441 + if (ret)
2442 + return ret;
2443
2444 /* write OOB buffer to NAND device */
2445 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2446 @@ -2472,7 +2526,8 @@ static int nand_write_page(struct mtd_in
2447 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2448 struct mtd_oob_ops *ops)
2449 {
2450 - struct nand_chip *chip = mtd->priv;
2451 + struct nand_chip *chip = mtd_to_nand(mtd);
2452 + int ret;
2453
2454 /*
2455 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2456 @@ -2487,31 +2542,12 @@ static uint8_t *nand_fill_oob(struct mtd
2457 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2458 return oob + len;
2459
2460 - case MTD_OPS_AUTO_OOB: {
2461 - struct nand_oobfree *free = chip->ecc.layout->oobfree;
2462 - uint32_t boffs = 0, woffs = ops->ooboffs;
2463 - size_t bytes = 0;
2464 -
2465 - for (; free->length && len; free++, len -= bytes) {
2466 - /* Write request not from offset 0? */
2467 - if (unlikely(woffs)) {
2468 - if (woffs >= free->length) {
2469 - woffs -= free->length;
2470 - continue;
2471 - }
2472 - boffs = free->offset + woffs;
2473 - bytes = min_t(size_t, len,
2474 - (free->length - woffs));
2475 - woffs = 0;
2476 - } else {
2477 - bytes = min_t(size_t, len, free->length);
2478 - boffs = free->offset;
2479 - }
2480 - memcpy(chip->oob_poi + boffs, oob, bytes);
2481 - oob += bytes;
2482 - }
2483 - return oob;
2484 - }
2485 + case MTD_OPS_AUTO_OOB:
2486 + ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2487 + ops->ooboffs, len);
2488 + BUG_ON(ret);
2489 + return oob + len;
2490 +
2491 default:
2492 BUG();
2493 }
2494 @@ -2532,12 +2568,11 @@ static int nand_do_write_ops(struct mtd_
2495 struct mtd_oob_ops *ops)
2496 {
2497 int chipnr, realpage, page, blockmask, column;
2498 - struct nand_chip *chip = mtd->priv;
2499 + struct nand_chip *chip = mtd_to_nand(mtd);
2500 uint32_t writelen = ops->len;
2501
2502 uint32_t oobwritelen = ops->ooblen;
2503 - uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
2504 - mtd->oobavail : mtd->oobsize;
2505 + uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2506
2507 uint8_t *oob = ops->oobbuf;
2508 uint8_t *buf = ops->datbuf;
2509 @@ -2662,7 +2697,7 @@ err_out:
2510 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2511 size_t *retlen, const uint8_t *buf)
2512 {
2513 - struct nand_chip *chip = mtd->priv;
2514 + struct nand_chip *chip = mtd_to_nand(mtd);
2515 struct mtd_oob_ops ops;
2516 int ret;
2517
2518 @@ -2722,15 +2757,12 @@ static int nand_do_write_oob(struct mtd_
2519 struct mtd_oob_ops *ops)
2520 {
2521 int chipnr, page, status, len;
2522 - struct nand_chip *chip = mtd->priv;
2523 + struct nand_chip *chip = mtd_to_nand(mtd);
2524
2525 pr_debug("%s: to = 0x%08x, len = %i\n",
2526 __func__, (unsigned int)to, (int)ops->ooblen);
2527
2528 - if (ops->mode == MTD_OPS_AUTO_OOB)
2529 - len = chip->ecc.layout->oobavail;
2530 - else
2531 - len = mtd->oobsize;
2532 + len = mtd_oobavail(mtd, ops);
2533
2534 /* Do not allow write past end of page */
2535 if ((ops->ooboffs + ops->ooblen) > len) {
2536 @@ -2847,7 +2879,7 @@ out:
2537 */
2538 static int single_erase(struct mtd_info *mtd, int page)
2539 {
2540 - struct nand_chip *chip = mtd->priv;
2541 + struct nand_chip *chip = mtd_to_nand(mtd);
2542 /* Send commands to erase a block */
2543 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2544 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2545 @@ -2879,7 +2911,7 @@ int nand_erase_nand(struct mtd_info *mtd
2546 int allowbbt)
2547 {
2548 int page, status, pages_per_block, ret, chipnr;
2549 - struct nand_chip *chip = mtd->priv;
2550 + struct nand_chip *chip = mtd_to_nand(mtd);
2551 loff_t len;
2552
2553 pr_debug("%s: start = 0x%012llx, len = %llu\n",
2554 @@ -2918,7 +2950,7 @@ int nand_erase_nand(struct mtd_info *mtd
2555 while (len) {
2556 /* Check if we have a bad block, we do not erase bad blocks! */
2557 if (nand_block_checkbad(mtd, ((loff_t) page) <<
2558 - chip->page_shift, 0, allowbbt)) {
2559 + chip->page_shift, allowbbt)) {
2560 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
2561 __func__, page);
2562 instr->state = MTD_ERASE_FAILED;
2563 @@ -3005,7 +3037,20 @@ static void nand_sync(struct mtd_info *m
2564 */
2565 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2566 {
2567 - return nand_block_checkbad(mtd, offs, 1, 0);
2568 + struct nand_chip *chip = mtd_to_nand(mtd);
2569 + int chipnr = (int)(offs >> chip->chip_shift);
2570 + int ret;
2571 +
2572 + /* Select the NAND device */
2573 + nand_get_device(mtd, FL_READING);
2574 + chip->select_chip(mtd, chipnr);
2575 +
2576 + ret = nand_block_checkbad(mtd, offs, 0);
2577 +
2578 + chip->select_chip(mtd, -1);
2579 + nand_release_device(mtd);
2580 +
2581 + return ret;
2582 }
2583
2584 /**
2585 @@ -3094,7 +3139,7 @@ static int nand_suspend(struct mtd_info
2586 */
2587 static void nand_resume(struct mtd_info *mtd)
2588 {
2589 - struct nand_chip *chip = mtd->priv;
2590 + struct nand_chip *chip = mtd_to_nand(mtd);
2591
2592 if (chip->state == FL_PM_SUSPENDED)
2593 nand_release_device(mtd);
2594 @@ -3266,7 +3311,7 @@ ext_out:
2595
2596 static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
2597 {
2598 - struct nand_chip *chip = mtd->priv;
2599 + struct nand_chip *chip = mtd_to_nand(mtd);
2600 uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
2601
2602 return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
2603 @@ -3937,10 +3982,13 @@ ident_done:
2604 return type;
2605 }
2606
2607 -static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
2608 - struct device_node *dn)
2609 +static int nand_dt_init(struct nand_chip *chip)
2610 {
2611 - int ecc_mode, ecc_strength, ecc_step;
2612 + struct device_node *dn = nand_get_flash_node(chip);
2613 + int ecc_mode, ecc_algo, ecc_strength, ecc_step;
2614 +
2615 + if (!dn)
2616 + return 0;
2617
2618 if (of_get_nand_bus_width(dn) == 16)
2619 chip->options |= NAND_BUSWIDTH_16;
2620 @@ -3949,6 +3997,7 @@ static int nand_dt_init(struct mtd_info
2621 chip->bbt_options |= NAND_BBT_USE_FLASH;
2622
2623 ecc_mode = of_get_nand_ecc_mode(dn);
2624 + ecc_algo = of_get_nand_ecc_algo(dn);
2625 ecc_strength = of_get_nand_ecc_strength(dn);
2626 ecc_step = of_get_nand_ecc_step_size(dn);
2627
2628 @@ -3961,6 +4010,9 @@ static int nand_dt_init(struct mtd_info
2629 if (ecc_mode >= 0)
2630 chip->ecc.mode = ecc_mode;
2631
2632 + if (ecc_algo >= 0)
2633 + chip->ecc.algo = ecc_algo;
2634 +
2635 if (ecc_strength >= 0)
2636 chip->ecc.strength = ecc_strength;
2637
2638 @@ -3984,15 +4036,16 @@ int nand_scan_ident(struct mtd_info *mtd
2639 struct nand_flash_dev *table)
2640 {
2641 int i, nand_maf_id, nand_dev_id;
2642 - struct nand_chip *chip = mtd->priv;
2643 + struct nand_chip *chip = mtd_to_nand(mtd);
2644 struct nand_flash_dev *type;
2645 int ret;
2646
2647 - if (chip->flash_node) {
2648 - ret = nand_dt_init(mtd, chip, chip->flash_node);
2649 - if (ret)
2650 - return ret;
2651 - }
2652 + ret = nand_dt_init(chip);
2653 + if (ret)
2654 + return ret;
2655 +
2656 + if (!mtd->name && mtd->dev.parent)
2657 + mtd->name = dev_name(mtd->dev.parent);
2658
2659 if (!mtd->name && mtd->dev.parent)
2660 mtd->name = dev_name(mtd->dev.parent);
2661 @@ -4055,7 +4108,7 @@ EXPORT_SYMBOL(nand_scan_ident);
2662 */
2663 static bool nand_ecc_strength_good(struct mtd_info *mtd)
2664 {
2665 - struct nand_chip *chip = mtd->priv;
2666 + struct nand_chip *chip = mtd_to_nand(mtd);
2667 struct nand_ecc_ctrl *ecc = &chip->ecc;
2668 int corr, ds_corr;
2669
2670 @@ -4083,10 +4136,10 @@ static bool nand_ecc_strength_good(struc
2671 */
2672 int nand_scan_tail(struct mtd_info *mtd)
2673 {
2674 - int i;
2675 - struct nand_chip *chip = mtd->priv;
2676 + struct nand_chip *chip = mtd_to_nand(mtd);
2677 struct nand_ecc_ctrl *ecc = &chip->ecc;
2678 struct nand_buffers *nbuf;
2679 + int ret;
2680
2681 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
2682 BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
2683 @@ -4113,19 +4166,15 @@ int nand_scan_tail(struct mtd_info *mtd)
2684 /*
2685 * If no default placement scheme is given, select an appropriate one.
2686 */
2687 - if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
2688 + if (!mtd->ooblayout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
2689 switch (mtd->oobsize) {
2690 case 8:
2691 - ecc->layout = &nand_oob_8;
2692 - break;
2693 case 16:
2694 - ecc->layout = &nand_oob_16;
2695 + mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
2696 break;
2697 case 64:
2698 - ecc->layout = &nand_oob_64;
2699 - break;
2700 case 128:
2701 - ecc->layout = &nand_oob_128;
2702 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
2703 break;
2704 default:
2705 pr_warn("No oob scheme defined for oobsize %d\n",
2706 @@ -4168,7 +4217,7 @@ int nand_scan_tail(struct mtd_info *mtd)
2707 ecc->write_oob = nand_write_oob_std;
2708 if (!ecc->read_subpage)
2709 ecc->read_subpage = nand_read_subpage;
2710 - if (!ecc->write_subpage)
2711 + if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
2712 ecc->write_subpage = nand_write_subpage_hwecc;
2713
2714 case NAND_ECC_HW_SYNDROME:
2715 @@ -4246,10 +4295,8 @@ int nand_scan_tail(struct mtd_info *mtd)
2716 }
2717
2718 /* See nand_bch_init() for details. */
2719 - ecc->bytes = DIV_ROUND_UP(
2720 - ecc->strength * fls(8 * ecc->size), 8);
2721 - ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
2722 - &ecc->layout);
2723 + ecc->bytes = 0;
2724 + ecc->priv = nand_bch_init(mtd);
2725 if (!ecc->priv) {
2726 pr_warn("BCH ECC initialization failed!\n");
2727 BUG();
2728 @@ -4280,20 +4327,9 @@ int nand_scan_tail(struct mtd_info *mtd)
2729 if (!ecc->write_oob_raw)
2730 ecc->write_oob_raw = ecc->write_oob;
2731
2732 - /*
2733 - * The number of bytes available for a client to place data into
2734 - * the out of band area.
2735 - */
2736 - ecc->layout->oobavail = 0;
2737 - for (i = 0; ecc->layout->oobfree[i].length
2738 - && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
2739 - ecc->layout->oobavail += ecc->layout->oobfree[i].length;
2740 - mtd->oobavail = ecc->layout->oobavail;
2741 -
2742 - /* ECC sanity check: warn if it's too weak */
2743 - if (!nand_ecc_strength_good(mtd))
2744 - pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
2745 - mtd->name);
2746 + /* propagate ecc info to mtd_info */
2747 + mtd->ecc_strength = ecc->strength;
2748 + mtd->ecc_step_size = ecc->size;
2749
2750 /*
2751 * Set the number of read / write steps for one page depending on ECC
2752 @@ -4306,6 +4342,21 @@ int nand_scan_tail(struct mtd_info *mtd)
2753 }
2754 ecc->total = ecc->steps * ecc->bytes;
2755
2756 + /*
2757 + * The number of bytes available for a client to place data into
2758 + * the out of band area.
2759 + */
2760 + ret = mtd_ooblayout_count_freebytes(mtd);
2761 + if (ret < 0)
2762 + ret = 0;
2763 +
2764 + mtd->oobavail = ret;
2765 +
2766 + /* ECC sanity check: warn if it's too weak */
2767 + if (!nand_ecc_strength_good(mtd))
2768 + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
2769 + mtd->name);
2770 +
2771 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
2772 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
2773 switch (ecc->steps) {
2774 @@ -4362,10 +4413,6 @@ int nand_scan_tail(struct mtd_info *mtd)
2775 mtd->_block_markbad = nand_block_markbad;
2776 mtd->writebufsize = mtd->writesize;
2777
2778 - /* propagate ecc info to mtd_info */
2779 - mtd->ecclayout = ecc->layout;
2780 - mtd->ecc_strength = ecc->strength;
2781 - mtd->ecc_step_size = ecc->size;
2782 /*
2783 * Initialize bitflip_threshold to its default prior scan_bbt() call.
2784 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
2785 @@ -4421,7 +4468,7 @@ EXPORT_SYMBOL(nand_scan);
2786 */
2787 void nand_release(struct mtd_info *mtd)
2788 {
2789 - struct nand_chip *chip = mtd->priv;
2790 + struct nand_chip *chip = mtd_to_nand(mtd);
2791
2792 if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
2793 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
2794 --- a/drivers/mtd/nand/nand_bbt.c
2795 +++ b/drivers/mtd/nand/nand_bbt.c
2796 @@ -172,7 +172,7 @@ static int read_bbt(struct mtd_info *mtd
2797 struct nand_bbt_descr *td, int offs)
2798 {
2799 int res, ret = 0, i, j, act = 0;
2800 - struct nand_chip *this = mtd->priv;
2801 + struct nand_chip *this = mtd_to_nand(mtd);
2802 size_t retlen, len, totlen;
2803 loff_t from;
2804 int bits = td->options & NAND_BBT_NRBITS_MSK;
2805 @@ -263,7 +263,7 @@ static int read_bbt(struct mtd_info *mtd
2806 */
2807 static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
2808 {
2809 - struct nand_chip *this = mtd->priv;
2810 + struct nand_chip *this = mtd_to_nand(mtd);
2811 int res = 0, i;
2812
2813 if (td->options & NAND_BBT_PERCHIP) {
2814 @@ -388,7 +388,7 @@ static u32 bbt_get_ver_offs(struct mtd_i
2815 static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
2816 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
2817 {
2818 - struct nand_chip *this = mtd->priv;
2819 + struct nand_chip *this = mtd_to_nand(mtd);
2820
2821 /* Read the primary version, if available */
2822 if (td->options & NAND_BBT_VERSION) {
2823 @@ -454,7 +454,7 @@ static int scan_block_fast(struct mtd_in
2824 static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
2825 struct nand_bbt_descr *bd, int chip)
2826 {
2827 - struct nand_chip *this = mtd->priv;
2828 + struct nand_chip *this = mtd_to_nand(mtd);
2829 int i, numblocks, numpages;
2830 int startblock;
2831 loff_t from;
2832 @@ -523,7 +523,7 @@ static int create_bbt(struct mtd_info *m
2833 */
2834 static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
2835 {
2836 - struct nand_chip *this = mtd->priv;
2837 + struct nand_chip *this = mtd_to_nand(mtd);
2838 int i, chips;
2839 int startblock, block, dir;
2840 int scanlen = mtd->writesize + mtd->oobsize;
2841 @@ -618,7 +618,7 @@ static int write_bbt(struct mtd_info *mt
2842 struct nand_bbt_descr *td, struct nand_bbt_descr *md,
2843 int chipsel)
2844 {
2845 - struct nand_chip *this = mtd->priv;
2846 + struct nand_chip *this = mtd_to_nand(mtd);
2847 struct erase_info einfo;
2848 int i, res, chip = 0;
2849 int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
2850 @@ -819,7 +819,7 @@ static int write_bbt(struct mtd_info *mt
2851 */
2852 static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2853 {
2854 - struct nand_chip *this = mtd->priv;
2855 + struct nand_chip *this = mtd_to_nand(mtd);
2856
2857 return create_bbt(mtd, this->buffers->databuf, bd, -1);
2858 }
2859 @@ -838,7 +838,7 @@ static inline int nand_memory_bbt(struct
2860 static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
2861 {
2862 int i, chips, writeops, create, chipsel, res, res2;
2863 - struct nand_chip *this = mtd->priv;
2864 + struct nand_chip *this = mtd_to_nand(mtd);
2865 struct nand_bbt_descr *td = this->bbt_td;
2866 struct nand_bbt_descr *md = this->bbt_md;
2867 struct nand_bbt_descr *rd, *rd2;
2868 @@ -962,7 +962,7 @@ static int check_create(struct mtd_info
2869 */
2870 static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
2871 {
2872 - struct nand_chip *this = mtd->priv;
2873 + struct nand_chip *this = mtd_to_nand(mtd);
2874 int i, j, chips, block, nrblocks, update;
2875 uint8_t oldval;
2876
2877 @@ -1022,7 +1022,7 @@ static void mark_bbt_region(struct mtd_i
2878 */
2879 static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2880 {
2881 - struct nand_chip *this = mtd->priv;
2882 + struct nand_chip *this = mtd_to_nand(mtd);
2883 u32 pattern_len;
2884 u32 bits;
2885 u32 table_size;
2886 @@ -1074,7 +1074,7 @@ static void verify_bbt_descr(struct mtd_
2887 */
2888 static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2889 {
2890 - struct nand_chip *this = mtd->priv;
2891 + struct nand_chip *this = mtd_to_nand(mtd);
2892 int len, res;
2893 uint8_t *buf;
2894 struct nand_bbt_descr *td = this->bbt_td;
2895 @@ -1147,7 +1147,7 @@ err:
2896 */
2897 static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
2898 {
2899 - struct nand_chip *this = mtd->priv;
2900 + struct nand_chip *this = mtd_to_nand(mtd);
2901 int len, res = 0;
2902 int chip, chipsel;
2903 uint8_t *buf;
2904 @@ -1281,7 +1281,7 @@ static int nand_create_badblock_pattern(
2905 */
2906 int nand_default_bbt(struct mtd_info *mtd)
2907 {
2908 - struct nand_chip *this = mtd->priv;
2909 + struct nand_chip *this = mtd_to_nand(mtd);
2910 int ret;
2911
2912 /* Is a flash based bad block table requested? */
2913 @@ -1317,7 +1317,7 @@ int nand_default_bbt(struct mtd_info *mt
2914 */
2915 int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
2916 {
2917 - struct nand_chip *this = mtd->priv;
2918 + struct nand_chip *this = mtd_to_nand(mtd);
2919 int block;
2920
2921 block = (int)(offs >> this->bbt_erase_shift);
2922 @@ -1332,7 +1332,7 @@ int nand_isreserved_bbt(struct mtd_info
2923 */
2924 int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
2925 {
2926 - struct nand_chip *this = mtd->priv;
2927 + struct nand_chip *this = mtd_to_nand(mtd);
2928 int block, res;
2929
2930 block = (int)(offs >> this->bbt_erase_shift);
2931 @@ -1359,7 +1359,7 @@ int nand_isbad_bbt(struct mtd_info *mtd,
2932 */
2933 int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
2934 {
2935 - struct nand_chip *this = mtd->priv;
2936 + struct nand_chip *this = mtd_to_nand(mtd);
2937 int block, ret = 0;
2938
2939 block = (int)(offs >> this->bbt_erase_shift);
2940 @@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mt
2941
2942 return ret;
2943 }
2944 -
2945 -EXPORT_SYMBOL(nand_scan_bbt);
2946 --- a/drivers/mtd/nand/nand_bch.c
2947 +++ b/drivers/mtd/nand/nand_bch.c
2948 @@ -32,13 +32,11 @@
2949 /**
2950 * struct nand_bch_control - private NAND BCH control structure
2951 * @bch: BCH control structure
2952 - * @ecclayout: private ecc layout for this BCH configuration
2953 * @errloc: error location array
2954 * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
2955 */
2956 struct nand_bch_control {
2957 struct bch_control *bch;
2958 - struct nand_ecclayout ecclayout;
2959 unsigned int *errloc;
2960 unsigned char *eccmask;
2961 };
2962 @@ -52,7 +50,7 @@ struct nand_bch_control {
2963 int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
2964 unsigned char *code)
2965 {
2966 - const struct nand_chip *chip = mtd->priv;
2967 + const struct nand_chip *chip = mtd_to_nand(mtd);
2968 struct nand_bch_control *nbc = chip->ecc.priv;
2969 unsigned int i;
2970
2971 @@ -79,7 +77,7 @@ EXPORT_SYMBOL(nand_bch_calculate_ecc);
2972 int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
2973 unsigned char *read_ecc, unsigned char *calc_ecc)
2974 {
2975 - const struct nand_chip *chip = mtd->priv;
2976 + const struct nand_chip *chip = mtd_to_nand(mtd);
2977 struct nand_bch_control *nbc = chip->ecc.priv;
2978 unsigned int *errloc = nbc->errloc;
2979 int i, count;
2980 @@ -98,7 +96,7 @@ int nand_bch_correct_data(struct mtd_inf
2981 }
2982 } else if (count < 0) {
2983 printk(KERN_ERR "ecc unrecoverable error\n");
2984 - count = -1;
2985 + count = -EBADMSG;
2986 }
2987 return count;
2988 }
2989 @@ -107,9 +105,6 @@ EXPORT_SYMBOL(nand_bch_correct_data);
2990 /**
2991 * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
2992 * @mtd: MTD block structure
2993 - * @eccsize: ecc block size in bytes
2994 - * @eccbytes: ecc length in bytes
2995 - * @ecclayout: output default layout
2996 *
2997 * Returns:
2998 * a pointer to a new NAND BCH control structure, or NULL upon failure
2999 @@ -123,14 +118,20 @@ EXPORT_SYMBOL(nand_bch_correct_data);
3000 * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
3001 * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
3002 */
3003 -struct nand_bch_control *
3004 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
3005 - struct nand_ecclayout **ecclayout)
3006 +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
3007 {
3008 + struct nand_chip *nand = mtd_to_nand(mtd);
3009 unsigned int m, t, eccsteps, i;
3010 - struct nand_ecclayout *layout;
3011 struct nand_bch_control *nbc = NULL;
3012 unsigned char *erased_page;
3013 + unsigned int eccsize = nand->ecc.size;
3014 + unsigned int eccbytes = nand->ecc.bytes;
3015 + unsigned int eccstrength = nand->ecc.strength;
3016 +
3017 + if (!eccbytes && eccstrength) {
3018 + eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
3019 + nand->ecc.bytes = eccbytes;
3020 + }
3021
3022 if (!eccsize || !eccbytes) {
3023 printk(KERN_WARNING "ecc parameters not supplied\n");
3024 @@ -158,7 +159,7 @@ nand_bch_init(struct mtd_info *mtd, unsi
3025 eccsteps = mtd->writesize/eccsize;
3026
3027 /* if no ecc placement scheme was provided, build one */
3028 - if (!*ecclayout) {
3029 + if (!mtd->ooblayout) {
3030
3031 /* handle large page devices only */
3032 if (mtd->oobsize < 64) {
3033 @@ -167,24 +168,7 @@ nand_bch_init(struct mtd_info *mtd, unsi
3034 goto fail;
3035 }
3036
3037 - layout = &nbc->ecclayout;
3038 - layout->eccbytes = eccsteps*eccbytes;
3039 -
3040 - /* reserve 2 bytes for bad block marker */
3041 - if (layout->eccbytes+2 > mtd->oobsize) {
3042 - printk(KERN_WARNING "no suitable oob scheme available "
3043 - "for oobsize %d eccbytes %u\n", mtd->oobsize,
3044 - eccbytes);
3045 - goto fail;
3046 - }
3047 - /* put ecc bytes at oob tail */
3048 - for (i = 0; i < layout->eccbytes; i++)
3049 - layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
3050 -
3051 - layout->oobfree[0].offset = 2;
3052 - layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
3053 -
3054 - *ecclayout = layout;
3055 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
3056 }
3057
3058 /* sanity checks */
3059 @@ -192,7 +176,8 @@ nand_bch_init(struct mtd_info *mtd, unsi
3060 printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
3061 goto fail;
3062 }
3063 - if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
3064 +
3065 + if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
3066 printk(KERN_WARNING "invalid ecc layout\n");
3067 goto fail;
3068 }
3069 @@ -216,6 +201,9 @@ nand_bch_init(struct mtd_info *mtd, unsi
3070 for (i = 0; i < eccbytes; i++)
3071 nbc->eccmask[i] ^= 0xff;
3072
3073 + if (!eccstrength)
3074 + nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
3075 +
3076 return nbc;
3077 fail:
3078 nand_bch_free(nbc);
3079 --- a/drivers/mtd/nand/nand_ecc.c
3080 +++ b/drivers/mtd/nand/nand_ecc.c
3081 @@ -424,7 +424,7 @@ int nand_calculate_ecc(struct mtd_info *
3082 unsigned char *code)
3083 {
3084 __nand_calculate_ecc(buf,
3085 - ((struct nand_chip *)mtd->priv)->ecc.size, code);
3086 + mtd_to_nand(mtd)->ecc.size, code);
3087
3088 return 0;
3089 }
3090 @@ -524,7 +524,7 @@ int nand_correct_data(struct mtd_info *m
3091 unsigned char *read_ecc, unsigned char *calc_ecc)
3092 {
3093 return __nand_correct_data(buf, read_ecc, calc_ecc,
3094 - ((struct nand_chip *)mtd->priv)->ecc.size);
3095 + mtd_to_nand(mtd)->ecc.size);
3096 }
3097 EXPORT_SYMBOL(nand_correct_data);
3098
3099 --- a/drivers/mtd/nand/nand_ids.c
3100 +++ b/drivers/mtd/nand/nand_ids.c
3101 @@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] =
3102 SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
3103 {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
3104 { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
3105 - SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
3106 - 4 },
3107 + SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
3108 + NAND_ECC_INFO(40, SZ_1K), 4 },
3109
3110 LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
3111 LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
3112 --- a/drivers/mtd/nand/nandsim.c
3113 +++ b/drivers/mtd/nand/nandsim.c
3114 @@ -666,8 +666,8 @@ static char *get_partition_name(int i)
3115 */
3116 static int init_nandsim(struct mtd_info *mtd)
3117 {
3118 - struct nand_chip *chip = mtd->priv;
3119 - struct nandsim *ns = chip->priv;
3120 + struct nand_chip *chip = mtd_to_nand(mtd);
3121 + struct nandsim *ns = nand_get_controller_data(chip);
3122 int i, ret = 0;
3123 uint64_t remains;
3124 uint64_t next_offset;
3125 @@ -1908,7 +1908,8 @@ static void switch_state(struct nandsim
3126
3127 static u_char ns_nand_read_byte(struct mtd_info *mtd)
3128 {
3129 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3130 + struct nand_chip *chip = mtd_to_nand(mtd);
3131 + struct nandsim *ns = nand_get_controller_data(chip);
3132 u_char outb = 0x00;
3133
3134 /* Sanity and correctness checks */
3135 @@ -1969,7 +1970,8 @@ static u_char ns_nand_read_byte(struct m
3136
3137 static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
3138 {
3139 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3140 + struct nand_chip *chip = mtd_to_nand(mtd);
3141 + struct nandsim *ns = nand_get_controller_data(chip);
3142
3143 /* Sanity and correctness checks */
3144 if (!ns->lines.ce) {
3145 @@ -2123,7 +2125,8 @@ static void ns_nand_write_byte(struct mt
3146
3147 static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
3148 {
3149 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3150 + struct nand_chip *chip = mtd_to_nand(mtd);
3151 + struct nandsim *ns = nand_get_controller_data(chip);
3152
3153 ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
3154 ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
3155 @@ -2141,7 +2144,7 @@ static int ns_device_ready(struct mtd_in
3156
3157 static uint16_t ns_nand_read_word(struct mtd_info *mtd)
3158 {
3159 - struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3160 + struct nand_chip *chip = mtd_to_nand(mtd);
3161
3162 NS_DBG("read_word\n");
3163
3164 @@ -2150,7 +2153,8 @@ static uint16_t ns_nand_read_word(struct
3165
3166 static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
3167 {
3168 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3169 + struct nand_chip *chip = mtd_to_nand(mtd);
3170 + struct nandsim *ns = nand_get_controller_data(chip);
3171
3172 /* Check that chip is expecting data input */
3173 if (!(ns->state & STATE_DATAIN_MASK)) {
3174 @@ -2177,7 +2181,8 @@ static void ns_nand_write_buf(struct mtd
3175
3176 static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
3177 {
3178 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3179 + struct nand_chip *chip = mtd_to_nand(mtd);
3180 + struct nandsim *ns = nand_get_controller_data(chip);
3181
3182 /* Sanity and correctness checks */
3183 if (!ns->lines.ce) {
3184 @@ -2198,7 +2203,7 @@ static void ns_nand_read_buf(struct mtd_
3185 int i;
3186
3187 for (i = 0; i < len; i++)
3188 - buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
3189 + buf[i] = mtd_to_nand(mtd)->read_byte(mtd);
3190
3191 return;
3192 }
3193 @@ -2236,16 +2241,15 @@ static int __init ns_init_module(void)
3194 }
3195
3196 /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
3197 - nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
3198 - + sizeof(struct nandsim), GFP_KERNEL);
3199 - if (!nsmtd) {
3200 + chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
3201 + GFP_KERNEL);
3202 + if (!chip) {
3203 NS_ERR("unable to allocate core structures.\n");
3204 return -ENOMEM;
3205 }
3206 - chip = (struct nand_chip *)(nsmtd + 1);
3207 - nsmtd->priv = (void *)chip;
3208 + nsmtd = nand_to_mtd(chip);
3209 nand = (struct nandsim *)(chip + 1);
3210 - chip->priv = (void *)nand;
3211 + nand_set_controller_data(chip, (void *)nand);
3212
3213 /*
3214 * Register simulator's callbacks.
3215 @@ -2257,6 +2261,7 @@ static int __init ns_init_module(void)
3216 chip->read_buf = ns_nand_read_buf;
3217 chip->read_word = ns_nand_read_word;
3218 chip->ecc.mode = NAND_ECC_SOFT;
3219 + chip->ecc.algo = NAND_ECC_HAMMING;
3220 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
3221 /* and 'badblocks' parameters to work */
3222 chip->options |= NAND_SKIP_BBTSCAN;
3223 @@ -2335,6 +2340,7 @@ static int __init ns_init_module(void)
3224 goto error;
3225 }
3226 chip->ecc.mode = NAND_ECC_SOFT_BCH;
3227 + chip->ecc.algo = NAND_ECC_BCH;
3228 chip->ecc.size = 512;
3229 chip->ecc.strength = bch;
3230 chip->ecc.bytes = eccbytes;
3231 @@ -2392,7 +2398,7 @@ err_exit:
3232 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
3233 kfree(nand->partitions[i].name);
3234 error:
3235 - kfree(nsmtd);
3236 + kfree(chip);
3237 free_lists();
3238
3239 return retval;
3240 @@ -2405,7 +2411,8 @@ module_init(ns_init_module);
3241 */
3242 static void __exit ns_cleanup_module(void)
3243 {
3244 - struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
3245 + struct nand_chip *chip = mtd_to_nand(nsmtd);
3246 + struct nandsim *ns = nand_get_controller_data(chip);
3247 int i;
3248
3249 nandsim_debugfs_remove(ns);
3250 @@ -2413,7 +2420,7 @@ static void __exit ns_cleanup_module(voi
3251 nand_release(nsmtd); /* Unregister driver */
3252 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
3253 kfree(ns->partitions[i].name);
3254 - kfree(nsmtd); /* Free other structures */
3255 + kfree(mtd_to_nand(nsmtd)); /* Free other structures */
3256 free_lists();
3257 }
3258
3259 --- a/drivers/mtd/ofpart.c
3260 +++ b/drivers/mtd/ofpart.c
3261 @@ -26,9 +26,10 @@ static bool node_has_compatible(struct d
3262 }
3263
3264 static int parse_ofpart_partitions(struct mtd_info *master,
3265 - struct mtd_partition **pparts,
3266 + const struct mtd_partition **pparts,
3267 struct mtd_part_parser_data *data)
3268 {
3269 + struct mtd_partition *parts;
3270 struct device_node *mtd_node;
3271 struct device_node *ofpart_node;
3272 const char *partname;
3273 @@ -37,10 +38,8 @@ static int parse_ofpart_partitions(struc
3274 bool dedicated = true;
3275
3276
3277 - if (!data)
3278 - return 0;
3279 -
3280 - mtd_node = data->of_node;
3281 + /* Pull of_node from the master device node */
3282 + mtd_node = mtd_get_of_node(master);
3283 if (!mtd_node)
3284 return 0;
3285
3286 @@ -72,8 +71,8 @@ static int parse_ofpart_partitions(struc
3287 if (nr_parts == 0)
3288 return 0;
3289
3290 - *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
3291 - if (!*pparts)
3292 + parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
3293 + if (!parts)
3294 return -ENOMEM;
3295
3296 i = 0;
3297 @@ -107,19 +106,19 @@ static int parse_ofpart_partitions(struc
3298 goto ofpart_fail;
3299 }
3300
3301 - (*pparts)[i].offset = of_read_number(reg, a_cells);
3302 - (*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
3303 + parts[i].offset = of_read_number(reg, a_cells);
3304 + parts[i].size = of_read_number(reg + a_cells, s_cells);
3305
3306 partname = of_get_property(pp, "label", &len);
3307 if (!partname)
3308 partname = of_get_property(pp, "name", &len);
3309 - (*pparts)[i].name = partname;
3310 + parts[i].name = partname;
3311
3312 if (of_get_property(pp, "read-only", &len))
3313 - (*pparts)[i].mask_flags |= MTD_WRITEABLE;
3314 + parts[i].mask_flags |= MTD_WRITEABLE;
3315
3316 if (of_get_property(pp, "lock", &len))
3317 - (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
3318 + parts[i].mask_flags |= MTD_POWERUP_LOCK;
3319
3320 i++;
3321 }
3322 @@ -127,6 +126,7 @@ static int parse_ofpart_partitions(struc
3323 if (!nr_parts)
3324 goto ofpart_none;
3325
3326 + *pparts = parts;
3327 return nr_parts;
3328
3329 ofpart_fail:
3330 @@ -135,21 +135,20 @@ ofpart_fail:
3331 ret = -EINVAL;
3332 ofpart_none:
3333 of_node_put(pp);
3334 - kfree(*pparts);
3335 - *pparts = NULL;
3336 + kfree(parts);
3337 return ret;
3338 }
3339
3340 static struct mtd_part_parser ofpart_parser = {
3341 - .owner = THIS_MODULE,
3342 .parse_fn = parse_ofpart_partitions,
3343 .name = "ofpart",
3344 };
3345
3346 static int parse_ofoldpart_partitions(struct mtd_info *master,
3347 - struct mtd_partition **pparts,
3348 + const struct mtd_partition **pparts,
3349 struct mtd_part_parser_data *data)
3350 {
3351 + struct mtd_partition *parts;
3352 struct device_node *dp;
3353 int i, plen, nr_parts;
3354 const struct {
3355 @@ -157,10 +156,8 @@ static int parse_ofoldpart_partitions(st
3356 } *part;
3357 const char *names;
3358
3359 - if (!data)
3360 - return 0;
3361 -
3362 - dp = data->of_node;
3363 + /* Pull of_node from the master device node */
3364 + dp = mtd_get_of_node(master);
3365 if (!dp)
3366 return 0;
3367
3368 @@ -173,37 +170,37 @@ static int parse_ofoldpart_partitions(st
3369
3370 nr_parts = plen / sizeof(part[0]);
3371
3372 - *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
3373 - if (!*pparts)
3374 + parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
3375 + if (!parts)
3376 return -ENOMEM;
3377
3378 names = of_get_property(dp, "partition-names", &plen);
3379
3380 for (i = 0; i < nr_parts; i++) {
3381 - (*pparts)[i].offset = be32_to_cpu(part->offset);
3382 - (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
3383 + parts[i].offset = be32_to_cpu(part->offset);
3384 + parts[i].size = be32_to_cpu(part->len) & ~1;
3385 /* bit 0 set signifies read only partition */
3386 if (be32_to_cpu(part->len) & 1)
3387 - (*pparts)[i].mask_flags = MTD_WRITEABLE;
3388 + parts[i].mask_flags = MTD_WRITEABLE;
3389
3390 if (names && (plen > 0)) {
3391 int len = strlen(names) + 1;
3392
3393 - (*pparts)[i].name = names;
3394 + parts[i].name = names;
3395 plen -= len;
3396 names += len;
3397 } else {
3398 - (*pparts)[i].name = "unnamed";
3399 + parts[i].name = "unnamed";
3400 }
3401
3402 part++;
3403 }
3404
3405 + *pparts = parts;
3406 return nr_parts;
3407 }
3408
3409 static struct mtd_part_parser ofoldpart_parser = {
3410 - .owner = THIS_MODULE,
3411 .parse_fn = parse_ofoldpart_partitions,
3412 .name = "ofoldpart",
3413 };
3414 --- a/drivers/mtd/spi-nor/Kconfig
3415 +++ b/drivers/mtd/spi-nor/Kconfig
3416 @@ -7,6 +7,14 @@ menuconfig MTD_SPI_NOR
3417
3418 if MTD_SPI_NOR
3419
3420 +config MTD_MT81xx_NOR
3421 + tristate "Mediatek MT81xx SPI NOR flash controller"
3422 + depends on HAS_IOMEM
3423 + help
3424 + This enables access to SPI NOR flash, using MT81xx SPI NOR flash
3425 + controller. This controller does not support generic SPI BUS, it only
3426 + supports SPI NOR Flash.
3427 +
3428 config MTD_SPI_NOR_USE_4K_SECTORS
3429 bool "Use small 4096 B erase sectors"
3430 default y
3431 @@ -23,7 +31,7 @@ config MTD_SPI_NOR_USE_4K_SECTORS
3432
3433 config SPI_FSL_QUADSPI
3434 tristate "Freescale Quad SPI controller"
3435 - depends on ARCH_MXC || COMPILE_TEST
3436 + depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
3437 depends on HAS_IOMEM
3438 help
3439 This enables support for the Quad SPI controller in master mode.
3440 --- a/drivers/mtd/spi-nor/Makefile
3441 +++ b/drivers/mtd/spi-nor/Makefile
3442 @@ -1,3 +1,4 @@
3443 obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
3444 obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
3445 +obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
3446 obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
3447 --- /dev/null
3448 +++ b/drivers/mtd/spi-nor/mtk-quadspi.c
3449 @@ -0,0 +1,485 @@
3450 +/*
3451 + * Copyright (c) 2015 MediaTek Inc.
3452 + * Author: Bayi Cheng <bayi.cheng@mediatek.com>
3453 + *
3454 + * This program is free software; you can redistribute it and/or modify
3455 + * it under the terms of the GNU General Public License version 2 as
3456 + * published by the Free Software Foundation.
3457 + *
3458 + * This program is distributed in the hope that it will be useful,
3459 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3460 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3461 + * GNU General Public License for more details.
3462 + */
3463 +
3464 +#include <linux/clk.h>
3465 +#include <linux/delay.h>
3466 +#include <linux/device.h>
3467 +#include <linux/init.h>
3468 +#include <linux/io.h>
3469 +#include <linux/iopoll.h>
3470 +#include <linux/ioport.h>
3471 +#include <linux/math64.h>
3472 +#include <linux/module.h>
3473 +#include <linux/mtd/mtd.h>
3474 +#include <linux/mutex.h>
3475 +#include <linux/of.h>
3476 +#include <linux/of_device.h>
3477 +#include <linux/pinctrl/consumer.h>
3478 +#include <linux/platform_device.h>
3479 +#include <linux/slab.h>
3480 +#include <linux/mtd/mtd.h>
3481 +#include <linux/mtd/partitions.h>
3482 +#include <linux/mtd/spi-nor.h>
3483 +
3484 +#define MTK_NOR_CMD_REG 0x00
3485 +#define MTK_NOR_CNT_REG 0x04
3486 +#define MTK_NOR_RDSR_REG 0x08
3487 +#define MTK_NOR_RDATA_REG 0x0c
3488 +#define MTK_NOR_RADR0_REG 0x10
3489 +#define MTK_NOR_RADR1_REG 0x14
3490 +#define MTK_NOR_RADR2_REG 0x18
3491 +#define MTK_NOR_WDATA_REG 0x1c
3492 +#define MTK_NOR_PRGDATA0_REG 0x20
3493 +#define MTK_NOR_PRGDATA1_REG 0x24
3494 +#define MTK_NOR_PRGDATA2_REG 0x28
3495 +#define MTK_NOR_PRGDATA3_REG 0x2c
3496 +#define MTK_NOR_PRGDATA4_REG 0x30
3497 +#define MTK_NOR_PRGDATA5_REG 0x34
3498 +#define MTK_NOR_SHREG0_REG 0x38
3499 +#define MTK_NOR_SHREG1_REG 0x3c
3500 +#define MTK_NOR_SHREG2_REG 0x40
3501 +#define MTK_NOR_SHREG3_REG 0x44
3502 +#define MTK_NOR_SHREG4_REG 0x48
3503 +#define MTK_NOR_SHREG5_REG 0x4c
3504 +#define MTK_NOR_SHREG6_REG 0x50
3505 +#define MTK_NOR_SHREG7_REG 0x54
3506 +#define MTK_NOR_SHREG8_REG 0x58
3507 +#define MTK_NOR_SHREG9_REG 0x5c
3508 +#define MTK_NOR_CFG1_REG 0x60
3509 +#define MTK_NOR_CFG2_REG 0x64
3510 +#define MTK_NOR_CFG3_REG 0x68
3511 +#define MTK_NOR_STATUS0_REG 0x70
3512 +#define MTK_NOR_STATUS1_REG 0x74
3513 +#define MTK_NOR_STATUS2_REG 0x78
3514 +#define MTK_NOR_STATUS3_REG 0x7c
3515 +#define MTK_NOR_FLHCFG_REG 0x84
3516 +#define MTK_NOR_TIME_REG 0x94
3517 +#define MTK_NOR_PP_DATA_REG 0x98
3518 +#define MTK_NOR_PREBUF_STUS_REG 0x9c
3519 +#define MTK_NOR_DELSEL0_REG 0xa0
3520 +#define MTK_NOR_DELSEL1_REG 0xa4
3521 +#define MTK_NOR_INTRSTUS_REG 0xa8
3522 +#define MTK_NOR_INTREN_REG 0xac
3523 +#define MTK_NOR_CHKSUM_CTL_REG 0xb8
3524 +#define MTK_NOR_CHKSUM_REG 0xbc
3525 +#define MTK_NOR_CMD2_REG 0xc0
3526 +#define MTK_NOR_WRPROT_REG 0xc4
3527 +#define MTK_NOR_RADR3_REG 0xc8
3528 +#define MTK_NOR_DUAL_REG 0xcc
3529 +#define MTK_NOR_DELSEL2_REG 0xd0
3530 +#define MTK_NOR_DELSEL3_REG 0xd4
3531 +#define MTK_NOR_DELSEL4_REG 0xd8
3532 +
3533 +/* commands for mtk nor controller */
3534 +#define MTK_NOR_READ_CMD 0x0
3535 +#define MTK_NOR_RDSR_CMD 0x2
3536 +#define MTK_NOR_PRG_CMD 0x4
3537 +#define MTK_NOR_WR_CMD 0x10
3538 +#define MTK_NOR_PIO_WR_CMD 0x90
3539 +#define MTK_NOR_WRSR_CMD 0x20
3540 +#define MTK_NOR_PIO_READ_CMD 0x81
3541 +#define MTK_NOR_WR_BUF_ENABLE 0x1
3542 +#define MTK_NOR_WR_BUF_DISABLE 0x0
3543 +#define MTK_NOR_ENABLE_SF_CMD 0x30
3544 +#define MTK_NOR_DUAD_ADDR_EN 0x8
3545 +#define MTK_NOR_QUAD_READ_EN 0x4
3546 +#define MTK_NOR_DUAL_ADDR_EN 0x2
3547 +#define MTK_NOR_DUAL_READ_EN 0x1
3548 +#define MTK_NOR_DUAL_DISABLE 0x0
3549 +#define MTK_NOR_FAST_READ 0x1
3550 +
3551 +#define SFLASH_WRBUF_SIZE 128
3552 +
3553 +/* Can shift up to 48 bits (6 bytes) of TX/RX */
3554 +#define MTK_NOR_MAX_RX_TX_SHIFT 6
3555 +/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */
3556 +#define MTK_NOR_MAX_SHIFT 7
3557 +
3558 +/* Helpers for accessing the program data / shift data registers */
3559 +#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
3560 +#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
3561 +
3562 +struct mt8173_nor {
3563 + struct spi_nor nor;
3564 + struct device *dev;
3565 + void __iomem *base; /* nor flash base address */
3566 + struct clk *spi_clk;
3567 + struct clk *nor_clk;
3568 +};
3569 +
3570 +static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
3571 +{
3572 + struct spi_nor *nor = &mt8173_nor->nor;
3573 +
3574 + switch (nor->flash_read) {
3575 + case SPI_NOR_FAST:
3576 + writeb(nor->read_opcode, mt8173_nor->base +
3577 + MTK_NOR_PRGDATA3_REG);
3578 + writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
3579 + MTK_NOR_CFG1_REG);
3580 + break;
3581 + case SPI_NOR_DUAL:
3582 + writeb(nor->read_opcode, mt8173_nor->base +
3583 + MTK_NOR_PRGDATA3_REG);
3584 + writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
3585 + MTK_NOR_DUAL_REG);
3586 + break;
3587 + case SPI_NOR_QUAD:
3588 + writeb(nor->read_opcode, mt8173_nor->base +
3589 + MTK_NOR_PRGDATA4_REG);
3590 + writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
3591 + MTK_NOR_DUAL_REG);
3592 + break;
3593 + default:
3594 + writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
3595 + MTK_NOR_DUAL_REG);
3596 + break;
3597 + }
3598 +}
3599 +
3600 +static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
3601 +{
3602 + int reg;
3603 + u8 val = cmdval & 0x1f;
3604 +
3605 + writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
3606 + return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
3607 + !(reg & val), 100, 10000);
3608 +}
3609 +
3610 +static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
3611 + u8 *tx, int txlen, u8 *rx, int rxlen)
3612 +{
3613 + int len = 1 + txlen + rxlen;
3614 + int i, ret, idx;
3615 +
3616 + if (len > MTK_NOR_MAX_SHIFT)
3617 + return -EINVAL;
3618 +
3619 + writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
3620 +
3621 + /* start at PRGDATA5, go down to PRGDATA0 */
3622 + idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
3623 +
3624 + /* opcode */
3625 + writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3626 + idx--;
3627 +
3628 + /* program TX data */
3629 + for (i = 0; i < txlen; i++, idx--)
3630 + writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3631 +
3632 + /* clear out rest of TX registers */
3633 + while (idx >= 0) {
3634 + writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3635 + idx--;
3636 + }
3637 +
3638 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
3639 + if (ret)
3640 + return ret;
3641 +
3642 + /* restart at first RX byte */
3643 + idx = rxlen - 1;
3644 +
3645 + /* read out RX data */
3646 + for (i = 0; i < rxlen; i++, idx--)
3647 + rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
3648 +
3649 + return 0;
3650 +}
3651 +
3652 +/* Do a WRSR (Write Status Register) command */
3653 +static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
3654 +{
3655 + writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
3656 + writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
3657 + return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
3658 +}
3659 +
3660 +static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
3661 +{
3662 + u8 reg;
3663 +
3664 + /* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer
3665 + * 0: pre-fetch buffer use for read
3666 + * 1: pre-fetch buffer use for page program
3667 + */
3668 + writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
3669 + return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
3670 + 0x01 == (reg & 0x01), 100, 10000);
3671 +}
3672 +
3673 +static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
3674 +{
3675 + u8 reg;
3676 +
3677 + writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
3678 + return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
3679 + MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
3680 + 10000);
3681 +}
3682 +
3683 +static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
3684 +{
3685 + int i;
3686 +
3687 + for (i = 0; i < 3; i++) {
3688 + writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
3689 + addr >>= 8;
3690 + }
3691 + /* Last register is non-contiguous */
3692 + writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
3693 +}
3694 +
3695 +static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
3696 + size_t *retlen, u_char *buffer)
3697 +{
3698 + int i, ret;
3699 + int addr = (int)from;
3700 + u8 *buf = (u8 *)buffer;
3701 + struct mt8173_nor *mt8173_nor = nor->priv;
3702 +
3703 + /* set mode for fast read mode ,dual mode or quad mode */
3704 + mt8173_nor_set_read_mode(mt8173_nor);
3705 + mt8173_nor_set_addr(mt8173_nor, addr);
3706 +
3707 + for (i = 0; i < length; i++, (*retlen)++) {
3708 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
3709 + if (ret < 0)
3710 + return ret;
3711 + buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
3712 + }
3713 + return 0;
3714 +}
3715 +
3716 +static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
3717 + int addr, int length, u8 *data)
3718 +{
3719 + int i, ret;
3720 +
3721 + mt8173_nor_set_addr(mt8173_nor, addr);
3722 +
3723 + for (i = 0; i < length; i++) {
3724 + writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
3725 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
3726 + if (ret < 0)
3727 + return ret;
3728 + }
3729 + return 0;
3730 +}
3731 +
3732 +static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
3733 + const u8 *buf)
3734 +{
3735 + int i, bufidx, data;
3736 +
3737 + mt8173_nor_set_addr(mt8173_nor, addr);
3738 +
3739 + bufidx = 0;
3740 + for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
3741 + data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
3742 + buf[bufidx + 1]<<8 | buf[bufidx];
3743 + bufidx += 4;
3744 + writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
3745 + }
3746 + return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
3747 +}
3748 +
3749 +static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
3750 + size_t *retlen, const u_char *buf)
3751 +{
3752 + int ret;
3753 + struct mt8173_nor *mt8173_nor = nor->priv;
3754 +
3755 + ret = mt8173_nor_write_buffer_enable(mt8173_nor);
3756 + if (ret < 0)
3757 + dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
3758 +
3759 + while (len >= SFLASH_WRBUF_SIZE) {
3760 + ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
3761 + if (ret < 0)
3762 + dev_err(mt8173_nor->dev, "write buffer failed!\n");
3763 + len -= SFLASH_WRBUF_SIZE;
3764 + to += SFLASH_WRBUF_SIZE;
3765 + buf += SFLASH_WRBUF_SIZE;
3766 + (*retlen) += SFLASH_WRBUF_SIZE;
3767 + }
3768 + ret = mt8173_nor_write_buffer_disable(mt8173_nor);
3769 + if (ret < 0)
3770 + dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
3771 +
3772 + if (len) {
3773 + ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len,
3774 + (u8 *)buf);
3775 + if (ret < 0)
3776 + dev_err(mt8173_nor->dev, "write single byte failed!\n");
3777 + (*retlen) += len;
3778 + }
3779 +}
3780 +
3781 +static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
3782 +{
3783 + int ret;
3784 + struct mt8173_nor *mt8173_nor = nor->priv;
3785 +
3786 + switch (opcode) {
3787 + case SPINOR_OP_RDSR:
3788 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
3789 + if (ret < 0)
3790 + return ret;
3791 + if (len == 1)
3792 + *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
3793 + else
3794 + dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
3795 + break;
3796 + default:
3797 + ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
3798 + break;
3799 + }
3800 + return ret;
3801 +}
3802 +
3803 +static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
3804 + int len)
3805 +{
3806 + int ret;
3807 + struct mt8173_nor *mt8173_nor = nor->priv;
3808 +
3809 + switch (opcode) {
3810 + case SPINOR_OP_WRSR:
3811 + /* We only handle 1 byte */
3812 + ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
3813 + break;
3814 + default:
3815 + ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
3816 + if (ret)
3817 + dev_warn(mt8173_nor->dev, "write reg failure!\n");
3818 + break;
3819 + }
3820 + return ret;
3821 +}
3822 +
3823 +static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
3824 + struct device_node *flash_node)
3825 +{
3826 + int ret;
3827 + struct spi_nor *nor;
3828 +
3829 + /* initialize controller to accept commands */
3830 + writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
3831 +
3832 + nor = &mt8173_nor->nor;
3833 + nor->dev = mt8173_nor->dev;
3834 + nor->priv = mt8173_nor;
3835 + spi_nor_set_flash_node(nor, flash_node);
3836 +
3837 + /* fill the hooks to spi nor */
3838 + nor->read = mt8173_nor_read;
3839 + nor->read_reg = mt8173_nor_read_reg;
3840 + nor->write = mt8173_nor_write;
3841 + nor->write_reg = mt8173_nor_write_reg;
3842 + nor->mtd.name = "mtk_nor";
3843 + /* initialized with NULL */
3844 + ret = spi_nor_scan(nor, NULL, SPI_NOR_DUAL);
3845 + if (ret)
3846 + return ret;
3847 +
3848 + return mtd_device_register(&nor->mtd, NULL, 0);
3849 +}
3850 +
3851 +static int mtk_nor_drv_probe(struct platform_device *pdev)
3852 +{
3853 + struct device_node *flash_np;
3854 + struct resource *res;
3855 + int ret;
3856 + struct mt8173_nor *mt8173_nor;
3857 +
3858 + if (!pdev->dev.of_node) {
3859 + dev_err(&pdev->dev, "No DT found\n");
3860 + return -EINVAL;
3861 + }
3862 +
3863 + mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
3864 + if (!mt8173_nor)
3865 + return -ENOMEM;
3866 + platform_set_drvdata(pdev, mt8173_nor);
3867 +
3868 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3869 + mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
3870 + if (IS_ERR(mt8173_nor->base))
3871 + return PTR_ERR(mt8173_nor->base);
3872 +
3873 + mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
3874 + if (IS_ERR(mt8173_nor->spi_clk))
3875 + return PTR_ERR(mt8173_nor->spi_clk);
3876 +
3877 + mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
3878 + if (IS_ERR(mt8173_nor->nor_clk))
3879 + return PTR_ERR(mt8173_nor->nor_clk);
3880 +
3881 + mt8173_nor->dev = &pdev->dev;
3882 + ret = clk_prepare_enable(mt8173_nor->spi_clk);
3883 + if (ret)
3884 + return ret;
3885 +
3886 + ret = clk_prepare_enable(mt8173_nor->nor_clk);
3887 + if (ret) {
3888 + clk_disable_unprepare(mt8173_nor->spi_clk);
3889 + return ret;
3890 + }
3891 + /* only support one attached flash */
3892 + flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
3893 + if (!flash_np) {
3894 + dev_err(&pdev->dev, "no SPI flash device to configure\n");
3895 + ret = -ENODEV;
3896 + goto nor_free;
3897 + }
3898 + ret = mtk_nor_init(mt8173_nor, flash_np);
3899 +
3900 +nor_free:
3901 + if (ret) {
3902 + clk_disable_unprepare(mt8173_nor->spi_clk);
3903 + clk_disable_unprepare(mt8173_nor->nor_clk);
3904 + }
3905 + return ret;
3906 +}
3907 +
3908 +static int mtk_nor_drv_remove(struct platform_device *pdev)
3909 +{
3910 + struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
3911 +
3912 + clk_disable_unprepare(mt8173_nor->spi_clk);
3913 + clk_disable_unprepare(mt8173_nor->nor_clk);
3914 + return 0;
3915 +}
3916 +
3917 +static const struct of_device_id mtk_nor_of_ids[] = {
3918 + { .compatible = "mediatek,mt8173-nor"},
3919 + { /* sentinel */ }
3920 +};
3921 +MODULE_DEVICE_TABLE(of, mtk_nor_of_ids);
3922 +
3923 +static struct platform_driver mtk_nor_driver = {
3924 + .probe = mtk_nor_drv_probe,
3925 + .remove = mtk_nor_drv_remove,
3926 + .driver = {
3927 + .name = "mtk-nor",
3928 + .of_match_table = mtk_nor_of_ids,
3929 + },
3930 +};
3931 +
3932 +module_platform_driver(mtk_nor_driver);
3933 +MODULE_LICENSE("GPL v2");
3934 +MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver");
3935 --- a/drivers/mtd/spi-nor/spi-nor.c
3936 +++ b/drivers/mtd/spi-nor/spi-nor.c
3937 @@ -38,6 +38,7 @@
3938 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
3939
3940 #define SPI_NOR_MAX_ID_LEN 6
3941 +#define SPI_NOR_MAX_ADDR_WIDTH 4
3942
3943 struct flash_info {
3944 char *name;
3945 @@ -60,14 +61,20 @@ struct flash_info {
3946 u16 addr_width;
3947
3948 u16 flags;
3949 -#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
3950 -#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
3951 -#define SST_WRITE 0x04 /* use SST byte programming */
3952 -#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
3953 -#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
3954 -#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
3955 -#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
3956 -#define USE_FSR 0x80 /* use flag status register */
3957 +#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
3958 +#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
3959 +#define SST_WRITE BIT(2) /* use SST byte programming */
3960 +#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
3961 +#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
3962 +#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
3963 +#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
3964 +#define USE_FSR BIT(7) /* use flag status register */
3965 +#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
3966 +#define SPI_NOR_HAS_TB BIT(9) /*
3967 + * Flash SR has Top/Bottom (TB) protect
3968 + * bit. Must be used with
3969 + * SPI_NOR_HAS_LOCK.
3970 + */
3971 };
3972
3973 #define JEDEC_MFR(info) ((info)->id[0])
3974 @@ -313,6 +320,29 @@ static void spi_nor_unlock_and_unprep(st
3975 }
3976
3977 /*
3978 + * Initiate the erasure of a single sector
3979 + */
3980 +static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
3981 +{
3982 + u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
3983 + int i;
3984 +
3985 + if (nor->erase)
3986 + return nor->erase(nor, addr);
3987 +
3988 + /*
3989 + * Default implementation, if driver doesn't have a specialized HW
3990 + * control
3991 + */
3992 + for (i = nor->addr_width - 1; i >= 0; i--) {
3993 + buf[i] = addr & 0xff;
3994 + addr >>= 8;
3995 + }
3996 +
3997 + return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
3998 +}
3999 +
4000 +/*
4001 * Erase an address range on the nor chip. The address range may extend
4002 * one or more erase sectors. Return an error is there is a problem erasing.
4003 */
4004 @@ -371,10 +401,9 @@ static int spi_nor_erase(struct mtd_info
4005 while (len) {
4006 write_enable(nor);
4007
4008 - if (nor->erase(nor, addr)) {
4009 - ret = -EIO;
4010 + ret = spi_nor_erase_sector(nor, addr);
4011 + if (ret)
4012 goto erase_err;
4013 - }
4014
4015 addr += mtd->erasesize;
4016 len -= mtd->erasesize;
4017 @@ -387,17 +416,13 @@ static int spi_nor_erase(struct mtd_info
4018
4019 write_disable(nor);
4020
4021 +erase_err:
4022 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
4023
4024 - instr->state = MTD_ERASE_DONE;
4025 + instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
4026 mtd_erase_callback(instr);
4027
4028 return ret;
4029 -
4030 -erase_err:
4031 - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
4032 - instr->state = MTD_ERASE_FAILED;
4033 - return ret;
4034 }
4035
4036 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
4037 @@ -415,32 +440,58 @@ static void stm_get_locked_range(struct
4038 } else {
4039 pow = ((sr & mask) ^ mask) >> shift;
4040 *len = mtd->size >> pow;
4041 - *ofs = mtd->size - *len;
4042 + if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
4043 + *ofs = 0;
4044 + else
4045 + *ofs = mtd->size - *len;
4046 }
4047 }
4048
4049 /*
4050 - * Return 1 if the entire region is locked, 0 otherwise
4051 + * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
4052 + * @locked is false); 0 otherwise
4053 */
4054 -static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4055 - u8 sr)
4056 +static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4057 + u8 sr, bool locked)
4058 {
4059 loff_t lock_offs;
4060 uint64_t lock_len;
4061
4062 + if (!len)
4063 + return 1;
4064 +
4065 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
4066
4067 - return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
4068 + if (locked)
4069 + /* Requested range is a sub-range of locked range */
4070 + return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
4071 + else
4072 + /* Requested range does not overlap with locked range */
4073 + return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
4074 +}
4075 +
4076 +static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4077 + u8 sr)
4078 +{
4079 + return stm_check_lock_status_sr(nor, ofs, len, sr, true);
4080 +}
4081 +
4082 +static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4083 + u8 sr)
4084 +{
4085 + return stm_check_lock_status_sr(nor, ofs, len, sr, false);
4086 }
4087
4088 /*
4089 * Lock a region of the flash. Compatible with ST Micro and similar flash.
4090 - * Supports only the block protection bits BP{0,1,2} in the status register
4091 + * Supports the block protection bits BP{0,1,2} in the status register
4092 * (SR). Does not support these features found in newer SR bitfields:
4093 - * - TB: top/bottom protect - only handle TB=0 (top protect)
4094 * - SEC: sector/block protect - only handle SEC=0 (block protect)
4095 * - CMP: complement protect - only support CMP=0 (range is not complemented)
4096 *
4097 + * Support for the following is provided conditionally for some flash:
4098 + * - TB: top/bottom protect
4099 + *
4100 * Sample table portion for 8MB flash (Winbond w25q64fw):
4101 *
4102 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
4103 @@ -453,26 +504,55 @@ static int stm_is_locked_sr(struct spi_n
4104 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
4105 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
4106 * X | X | 1 | 1 | 1 | 8 MB | ALL
4107 + * ------|-------|-------|-------|-------|---------------|-------------------
4108 + * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
4109 + * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
4110 + * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
4111 + * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
4112 + * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
4113 + * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
4114 *
4115 * Returns negative on errors, 0 on success.
4116 */
4117 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
4118 {
4119 struct mtd_info *mtd = &nor->mtd;
4120 - u8 status_old, status_new;
4121 + int status_old, status_new;
4122 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
4123 u8 shift = ffs(mask) - 1, pow, val;
4124 + loff_t lock_len;
4125 + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
4126 + bool use_top;
4127 + int ret;
4128
4129 status_old = read_sr(nor);
4130 + if (status_old < 0)
4131 + return status_old;
4132
4133 - /* SPI NOR always locks to the end */
4134 - if (ofs + len != mtd->size) {
4135 - /* Does combined region extend to end? */
4136 - if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
4137 - status_old))
4138 - return -EINVAL;
4139 - len = mtd->size - ofs;
4140 - }
4141 + /* If nothing in our range is unlocked, we don't need to do anything */
4142 + if (stm_is_locked_sr(nor, ofs, len, status_old))
4143 + return 0;
4144 +
4145 + /* If anything below us is unlocked, we can't use 'bottom' protection */
4146 + if (!stm_is_locked_sr(nor, 0, ofs, status_old))
4147 + can_be_bottom = false;
4148 +
4149 + /* If anything above us is unlocked, we can't use 'top' protection */
4150 + if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
4151 + status_old))
4152 + can_be_top = false;
4153 +
4154 + if (!can_be_bottom && !can_be_top)
4155 + return -EINVAL;
4156 +
4157 + /* Prefer top, if both are valid */
4158 + use_top = can_be_top;
4159 +
4160 + /* lock_len: length of region that should end up locked */
4161 + if (use_top)
4162 + lock_len = mtd->size - ofs;
4163 + else
4164 + lock_len = ofs + len;
4165
4166 /*
4167 * Need smallest pow such that:
4168 @@ -483,7 +563,7 @@ static int stm_lock(struct spi_nor *nor,
4169 *
4170 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
4171 */
4172 - pow = ilog2(mtd->size) - ilog2(len);
4173 + pow = ilog2(mtd->size) - ilog2(lock_len);
4174 val = mask - (pow << shift);
4175 if (val & ~mask)
4176 return -EINVAL;
4177 @@ -491,14 +571,27 @@ static int stm_lock(struct spi_nor *nor,
4178 if (!(val & mask))
4179 return -EINVAL;
4180
4181 - status_new = (status_old & ~mask) | val;
4182 + status_new = (status_old & ~mask & ~SR_TB) | val;
4183 +
4184 + /* Disallow further writes if WP pin is asserted */
4185 + status_new |= SR_SRWD;
4186 +
4187 + if (!use_top)
4188 + status_new |= SR_TB;
4189 +
4190 + /* Don't bother if they're the same */
4191 + if (status_new == status_old)
4192 + return 0;
4193
4194 /* Only modify protection if it will not unlock other areas */
4195 - if ((status_new & mask) <= (status_old & mask))
4196 + if ((status_new & mask) < (status_old & mask))
4197 return -EINVAL;
4198
4199 write_enable(nor);
4200 - return write_sr(nor, status_new);
4201 + ret = write_sr(nor, status_new);
4202 + if (ret)
4203 + return ret;
4204 + return spi_nor_wait_till_ready(nor);
4205 }
4206
4207 /*
4208 @@ -509,17 +602,43 @@ static int stm_lock(struct spi_nor *nor,
4209 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
4210 {
4211 struct mtd_info *mtd = &nor->mtd;
4212 - uint8_t status_old, status_new;
4213 + int status_old, status_new;
4214 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
4215 u8 shift = ffs(mask) - 1, pow, val;
4216 + loff_t lock_len;
4217 + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
4218 + bool use_top;
4219 + int ret;
4220
4221 status_old = read_sr(nor);
4222 + if (status_old < 0)
4223 + return status_old;
4224 +
4225 + /* If nothing in our range is locked, we don't need to do anything */
4226 + if (stm_is_unlocked_sr(nor, ofs, len, status_old))
4227 + return 0;
4228 +
4229 + /* If anything below us is locked, we can't use 'top' protection */
4230 + if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
4231 + can_be_top = false;
4232 +
4233 + /* If anything above us is locked, we can't use 'bottom' protection */
4234 + if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
4235 + status_old))
4236 + can_be_bottom = false;
4237
4238 - /* Cannot unlock; would unlock larger region than requested */
4239 - if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
4240 - status_old))
4241 + if (!can_be_bottom && !can_be_top)
4242 return -EINVAL;
4243
4244 + /* Prefer top, if both are valid */
4245 + use_top = can_be_top;
4246 +
4247 + /* lock_len: length of region that should remain locked */
4248 + if (use_top)
4249 + lock_len = mtd->size - (ofs + len);
4250 + else
4251 + lock_len = ofs;
4252 +
4253 /*
4254 * Need largest pow such that:
4255 *
4256 @@ -529,8 +648,8 @@ static int stm_unlock(struct spi_nor *no
4257 *
4258 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
4259 */
4260 - pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
4261 - if (ofs + len == mtd->size) {
4262 + pow = ilog2(mtd->size) - order_base_2(lock_len);
4263 + if (lock_len == 0) {
4264 val = 0; /* fully unlocked */
4265 } else {
4266 val = mask - (pow << shift);
4267 @@ -539,14 +658,28 @@ static int stm_unlock(struct spi_nor *no
4268 return -EINVAL;
4269 }
4270
4271 - status_new = (status_old & ~mask) | val;
4272 + status_new = (status_old & ~mask & ~SR_TB) | val;
4273 +
4274 + /* Don't protect status register if we're fully unlocked */
4275 + if (lock_len == mtd->size)
4276 + status_new &= ~SR_SRWD;
4277 +
4278 + if (!use_top)
4279 + status_new |= SR_TB;
4280 +
4281 + /* Don't bother if they're the same */
4282 + if (status_new == status_old)
4283 + return 0;
4284
4285 /* Only modify protection if it will not lock other areas */
4286 - if ((status_new & mask) >= (status_old & mask))
4287 + if ((status_new & mask) > (status_old & mask))
4288 return -EINVAL;
4289
4290 write_enable(nor);
4291 - return write_sr(nor, status_new);
4292 + ret = write_sr(nor, status_new);
4293 + if (ret)
4294 + return ret;
4295 + return spi_nor_wait_till_ready(nor);
4296 }
4297
4298 /*
4299 @@ -736,8 +869,8 @@ static const struct flash_info spi_nor_i
4300 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
4301 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
4302 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
4303 - { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
4304 - { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
4305 + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
4306 + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
4307 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
4308 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
4309 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
4310 @@ -771,6 +904,7 @@ static const struct flash_info spi_nor_i
4311 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4312 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4313 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
4314 + { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4315 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
4316 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
4317 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
4318 @@ -834,11 +968,23 @@ static const struct flash_info spi_nor_i
4319 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
4320 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
4321 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
4322 - { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4323 + {
4324 + "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
4325 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4326 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4327 + },
4328 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
4329 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
4330 - { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4331 - { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4332 + {
4333 + "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
4334 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4335 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4336 + },
4337 + {
4338 + "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
4339 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4340 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4341 + },
4342 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
4343 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
4344 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
4345 @@ -861,7 +1007,7 @@ static const struct flash_info *spi_nor_
4346
4347 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
4348 if (tmp < 0) {
4349 - dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
4350 + dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
4351 return ERR_PTR(tmp);
4352 }
4353
4354 @@ -872,7 +1018,7 @@ static const struct flash_info *spi_nor_
4355 return &spi_nor_ids[tmp];
4356 }
4357 }
4358 - dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n",
4359 + dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
4360 id[0], id[1], id[2]);
4361 return ERR_PTR(-ENODEV);
4362 }
4363 @@ -1018,6 +1164,8 @@ static int macronix_quad_enable(struct s
4364 int ret, val;
4365
4366 val = read_sr(nor);
4367 + if (val < 0)
4368 + return val;
4369 write_enable(nor);
4370
4371 write_sr(nor, val | SR_QUAD_EN_MX);
4372 @@ -1099,7 +1247,7 @@ static int set_quad_mode(struct spi_nor
4373 static int spi_nor_check(struct spi_nor *nor)
4374 {
4375 if (!nor->dev || !nor->read || !nor->write ||
4376 - !nor->read_reg || !nor->write_reg || !nor->erase) {
4377 + !nor->read_reg || !nor->write_reg) {
4378 pr_err("spi-nor: please fill all the necessary fields!\n");
4379 return -EINVAL;
4380 }
4381 @@ -1112,7 +1260,7 @@ int spi_nor_scan(struct spi_nor *nor, co
4382 const struct flash_info *info = NULL;
4383 struct device *dev = nor->dev;
4384 struct mtd_info *mtd = &nor->mtd;
4385 - struct device_node *np = nor->flash_node;
4386 + struct device_node *np = spi_nor_get_flash_node(nor);
4387 int ret;
4388 int i;
4389
4390 @@ -1162,9 +1310,11 @@ int spi_nor_scan(struct spi_nor *nor, co
4391 if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
4392 JEDEC_MFR(info) == SNOR_MFR_INTEL ||
4393 JEDEC_MFR(info) == SNOR_MFR_MACRONIX ||
4394 - JEDEC_MFR(info) == SNOR_MFR_SST) {
4395 + JEDEC_MFR(info) == SNOR_MFR_SST ||
4396 + info->flags & SPI_NOR_HAS_LOCK) {
4397 write_enable(nor);
4398 write_sr(nor, 0);
4399 + spi_nor_wait_till_ready(nor);
4400 }
4401
4402 if (!mtd->name)
4403 @@ -1178,7 +1328,8 @@ int spi_nor_scan(struct spi_nor *nor, co
4404 mtd->_read = spi_nor_read;
4405
4406 /* NOR protection support for STmicro/Micron chips and similar */
4407 - if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
4408 + if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
4409 + info->flags & SPI_NOR_HAS_LOCK) {
4410 nor->flash_lock = stm_lock;
4411 nor->flash_unlock = stm_unlock;
4412 nor->flash_is_locked = stm_is_locked;
4413 @@ -1198,6 +1349,8 @@ int spi_nor_scan(struct spi_nor *nor, co
4414
4415 if (info->flags & USE_FSR)
4416 nor->flags |= SNOR_F_USE_FSR;
4417 + if (info->flags & SPI_NOR_HAS_TB)
4418 + nor->flags |= SNOR_F_HAS_SR_TB;
4419
4420 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
4421 /* prefer "small sector" erase if possible */
4422 @@ -1300,6 +1453,12 @@ int spi_nor_scan(struct spi_nor *nor, co
4423 nor->addr_width = 3;
4424 }
4425
4426 + if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4427 + dev_err(dev, "address width is too large: %u\n",
4428 + nor->addr_width);
4429 + return -EINVAL;
4430 + }
4431 +
4432 nor->read_dummy = spi_nor_read_dummy_cycles(nor);
4433
4434 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4435 --- a/drivers/mtd/tests/mtd_nandecctest.c
4436 +++ b/drivers/mtd/tests/mtd_nandecctest.c
4437 @@ -187,7 +187,7 @@ static int double_bit_error_detect(void
4438 __nand_calculate_ecc(error_data, size, calc_ecc);
4439 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
4440
4441 - return (ret == -1) ? 0 : -EINVAL;
4442 + return (ret == -EBADMSG) ? 0 : -EINVAL;
4443 }
4444
4445 static const struct nand_ecc_test nand_ecc_test[] = {
4446 --- a/drivers/mtd/tests/oobtest.c
4447 +++ b/drivers/mtd/tests/oobtest.c
4448 @@ -215,19 +215,19 @@ static int verify_eraseblock(int ebnum)
4449 pr_info("ignoring error as within bitflip_limit\n");
4450 }
4451
4452 - if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
4453 + if (use_offset != 0 || use_len < mtd->oobavail) {
4454 int k;
4455
4456 ops.mode = MTD_OPS_AUTO_OOB;
4457 ops.len = 0;
4458 ops.retlen = 0;
4459 - ops.ooblen = mtd->ecclayout->oobavail;
4460 + ops.ooblen = mtd->oobavail;
4461 ops.oobretlen = 0;
4462 ops.ooboffs = 0;
4463 ops.datbuf = NULL;
4464 ops.oobbuf = readbuf;
4465 err = mtd_read_oob(mtd, addr, &ops);
4466 - if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
4467 + if (err || ops.oobretlen != mtd->oobavail) {
4468 pr_err("error: readoob failed at %#llx\n",
4469 (long long)addr);
4470 errcnt += 1;
4471 @@ -244,7 +244,7 @@ static int verify_eraseblock(int ebnum)
4472 /* verify post-(use_offset + use_len) area for 0xff */
4473 k = use_offset + use_len;
4474 bitflips += memffshow(addr, k, readbuf + k,
4475 - mtd->ecclayout->oobavail - k);
4476 + mtd->oobavail - k);
4477
4478 if (bitflips > bitflip_limit) {
4479 pr_err("error: verify failed at %#llx\n",
4480 @@ -269,8 +269,8 @@ static int verify_eraseblock_in_one_go(i
4481 struct mtd_oob_ops ops;
4482 int err = 0;
4483 loff_t addr = (loff_t)ebnum * mtd->erasesize;
4484 - size_t len = mtd->ecclayout->oobavail * pgcnt;
4485 - size_t oobavail = mtd->ecclayout->oobavail;
4486 + size_t len = mtd->oobavail * pgcnt;
4487 + size_t oobavail = mtd->oobavail;
4488 size_t bitflips;
4489 int i;
4490
4491 @@ -394,8 +394,8 @@ static int __init mtd_oobtest_init(void)
4492 goto out;
4493
4494 use_offset = 0;
4495 - use_len = mtd->ecclayout->oobavail;
4496 - use_len_max = mtd->ecclayout->oobavail;
4497 + use_len = mtd->oobavail;
4498 + use_len_max = mtd->oobavail;
4499 vary_offset = 0;
4500
4501 /* First test: write all OOB, read it back and verify */
4502 @@ -460,8 +460,8 @@ static int __init mtd_oobtest_init(void)
4503
4504 /* Write all eraseblocks */
4505 use_offset = 0;
4506 - use_len = mtd->ecclayout->oobavail;
4507 - use_len_max = mtd->ecclayout->oobavail;
4508 + use_len = mtd->oobavail;
4509 + use_len_max = mtd->oobavail;
4510 vary_offset = 1;
4511 prandom_seed_state(&rnd_state, 5);
4512
4513 @@ -471,8 +471,8 @@ static int __init mtd_oobtest_init(void)
4514
4515 /* Check all eraseblocks */
4516 use_offset = 0;
4517 - use_len = mtd->ecclayout->oobavail;
4518 - use_len_max = mtd->ecclayout->oobavail;
4519 + use_len = mtd->oobavail;
4520 + use_len_max = mtd->oobavail;
4521 vary_offset = 1;
4522 prandom_seed_state(&rnd_state, 5);
4523 err = verify_all_eraseblocks();
4524 @@ -480,8 +480,8 @@ static int __init mtd_oobtest_init(void)
4525 goto out;
4526
4527 use_offset = 0;
4528 - use_len = mtd->ecclayout->oobavail;
4529 - use_len_max = mtd->ecclayout->oobavail;
4530 + use_len = mtd->oobavail;
4531 + use_len_max = mtd->oobavail;
4532 vary_offset = 0;
4533
4534 /* Fourth test: try to write off end of device */
4535 @@ -501,7 +501,7 @@ static int __init mtd_oobtest_init(void)
4536 ops.retlen = 0;
4537 ops.ooblen = 1;
4538 ops.oobretlen = 0;
4539 - ops.ooboffs = mtd->ecclayout->oobavail;
4540 + ops.ooboffs = mtd->oobavail;
4541 ops.datbuf = NULL;
4542 ops.oobbuf = writebuf;
4543 pr_info("attempting to start write past end of OOB\n");
4544 @@ -521,7 +521,7 @@ static int __init mtd_oobtest_init(void)
4545 ops.retlen = 0;
4546 ops.ooblen = 1;
4547 ops.oobretlen = 0;
4548 - ops.ooboffs = mtd->ecclayout->oobavail;
4549 + ops.ooboffs = mtd->oobavail;
4550 ops.datbuf = NULL;
4551 ops.oobbuf = readbuf;
4552 pr_info("attempting to start read past end of OOB\n");
4553 @@ -543,7 +543,7 @@ static int __init mtd_oobtest_init(void)
4554 ops.mode = MTD_OPS_AUTO_OOB;
4555 ops.len = 0;
4556 ops.retlen = 0;
4557 - ops.ooblen = mtd->ecclayout->oobavail + 1;
4558 + ops.ooblen = mtd->oobavail + 1;
4559 ops.oobretlen = 0;
4560 ops.ooboffs = 0;
4561 ops.datbuf = NULL;
4562 @@ -563,7 +563,7 @@ static int __init mtd_oobtest_init(void)
4563 ops.mode = MTD_OPS_AUTO_OOB;
4564 ops.len = 0;
4565 ops.retlen = 0;
4566 - ops.ooblen = mtd->ecclayout->oobavail + 1;
4567 + ops.ooblen = mtd->oobavail + 1;
4568 ops.oobretlen = 0;
4569 ops.ooboffs = 0;
4570 ops.datbuf = NULL;
4571 @@ -587,7 +587,7 @@ static int __init mtd_oobtest_init(void)
4572 ops.mode = MTD_OPS_AUTO_OOB;
4573 ops.len = 0;
4574 ops.retlen = 0;
4575 - ops.ooblen = mtd->ecclayout->oobavail;
4576 + ops.ooblen = mtd->oobavail;
4577 ops.oobretlen = 0;
4578 ops.ooboffs = 1;
4579 ops.datbuf = NULL;
4580 @@ -607,7 +607,7 @@ static int __init mtd_oobtest_init(void)
4581 ops.mode = MTD_OPS_AUTO_OOB;
4582 ops.len = 0;
4583 ops.retlen = 0;
4584 - ops.ooblen = mtd->ecclayout->oobavail;
4585 + ops.ooblen = mtd->oobavail;
4586 ops.oobretlen = 0;
4587 ops.ooboffs = 1;
4588 ops.datbuf = NULL;
4589 @@ -638,7 +638,7 @@ static int __init mtd_oobtest_init(void)
4590 for (i = 0; i < ebcnt - 1; ++i) {
4591 int cnt = 2;
4592 int pg;
4593 - size_t sz = mtd->ecclayout->oobavail;
4594 + size_t sz = mtd->oobavail;
4595 if (bbt[i] || bbt[i + 1])
4596 continue;
4597 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
4598 @@ -673,13 +673,12 @@ static int __init mtd_oobtest_init(void)
4599 for (i = 0; i < ebcnt - 1; ++i) {
4600 if (bbt[i] || bbt[i + 1])
4601 continue;
4602 - prandom_bytes_state(&rnd_state, writebuf,
4603 - mtd->ecclayout->oobavail * 2);
4604 + prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
4605 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
4606 ops.mode = MTD_OPS_AUTO_OOB;
4607 ops.len = 0;
4608 ops.retlen = 0;
4609 - ops.ooblen = mtd->ecclayout->oobavail * 2;
4610 + ops.ooblen = mtd->oobavail * 2;
4611 ops.oobretlen = 0;
4612 ops.ooboffs = 0;
4613 ops.datbuf = NULL;
4614 @@ -688,7 +687,7 @@ static int __init mtd_oobtest_init(void)
4615 if (err)
4616 goto out;
4617 if (memcmpshow(addr, readbuf, writebuf,
4618 - mtd->ecclayout->oobavail * 2)) {
4619 + mtd->oobavail * 2)) {
4620 pr_err("error: verify failed at %#llx\n",
4621 (long long)addr);
4622 errcnt += 1;
4623 --- a/drivers/mtd/tests/pagetest.c
4624 +++ b/drivers/mtd/tests/pagetest.c
4625 @@ -127,13 +127,12 @@ static int crosstest(void)
4626 unsigned char *pp1, *pp2, *pp3, *pp4;
4627
4628 pr_info("crosstest\n");
4629 - pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
4630 + pp1 = kzalloc(pgsize * 4, GFP_KERNEL);
4631 if (!pp1)
4632 return -ENOMEM;
4633 pp2 = pp1 + pgsize;
4634 pp3 = pp2 + pgsize;
4635 pp4 = pp3 + pgsize;
4636 - memset(pp1, 0, pgsize * 4);
4637
4638 addr0 = 0;
4639 for (i = 0; i < ebcnt && bbt[i]; ++i)
4640 --- a/include/linux/mtd/bbm.h
4641 +++ b/include/linux/mtd/bbm.h
4642 @@ -166,7 +166,6 @@ struct bbm_info {
4643 };
4644
4645 /* OneNAND BBT interface */
4646 -extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
4647 extern int onenand_default_bbt(struct mtd_info *mtd);
4648
4649 #endif /* __LINUX_MTD_BBM_H */
4650 --- a/include/linux/mtd/fsmc.h
4651 +++ b/include/linux/mtd/fsmc.h
4652 @@ -103,24 +103,6 @@
4653
4654 #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
4655
4656 -/*
4657 - * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
4658 - * and it has to be read consecutively and immediately after the 512
4659 - * byte data block for hardware to generate the error bit offsets
4660 - * Managing the ecc bytes in the following way is easier. This way is
4661 - * similar to oobfree structure maintained already in u-boot nand driver
4662 - */
4663 -#define MAX_ECCPLACE_ENTRIES 32
4664 -
4665 -struct fsmc_nand_eccplace {
4666 - uint8_t offset;
4667 - uint8_t length;
4668 -};
4669 -
4670 -struct fsmc_eccplace {
4671 - struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
4672 -};
4673 -
4674 struct fsmc_nand_timings {
4675 uint8_t tclr;
4676 uint8_t tar;
4677 --- a/include/linux/mtd/inftl.h
4678 +++ b/include/linux/mtd/inftl.h
4679 @@ -44,7 +44,6 @@ struct INFTLrecord {
4680 unsigned int nb_blocks; /* number of physical blocks */
4681 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
4682 struct erase_info instr;
4683 - struct nand_ecclayout oobinfo;
4684 };
4685
4686 int INFTL_mount(struct INFTLrecord *s);
4687 --- a/include/linux/mtd/map.h
4688 +++ b/include/linux/mtd/map.h
4689 @@ -142,7 +142,9 @@
4690 #endif
4691
4692 #ifndef map_bankwidth
4693 +#ifdef CONFIG_MTD
4694 #warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
4695 +#endif
4696 static inline int map_bankwidth(void *map)
4697 {
4698 BUG();
4699 @@ -238,8 +240,11 @@ struct map_info {
4700 If there is no cache to care about this can be set to NULL. */
4701 void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
4702
4703 - /* set_vpp() must handle being reentered -- enable, enable, disable
4704 - must leave it enabled. */
4705 + /* This will be called with 1 as parameter when the first map user
4706 + * needs VPP, and called with 0 when the last user exits. The map
4707 + * core maintains a reference counter, and assumes that VPP is a
4708 + * global resource applying to all mapped flash chips on the system.
4709 + */
4710 void (*set_vpp)(struct map_info *, int);
4711
4712 unsigned long pfow_base;
4713 --- a/include/linux/mtd/mtd.h
4714 +++ b/include/linux/mtd/mtd.h
4715 @@ -100,17 +100,35 @@ struct mtd_oob_ops {
4716
4717 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
4718 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
4719 +/**
4720 + * struct mtd_oob_region - oob region definition
4721 + * @offset: region offset
4722 + * @length: region length
4723 + *
4724 + * This structure describes a region of the OOB area, and is used
4725 + * to retrieve ECC or free bytes sections.
4726 + * Each section is defined by an offset within the OOB area and a
4727 + * length.
4728 + */
4729 +struct mtd_oob_region {
4730 + u32 offset;
4731 + u32 length;
4732 +};
4733 +
4734 /*
4735 - * Internal ECC layout control structure. For historical reasons, there is a
4736 - * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
4737 - * for export to user-space via the ECCGETLAYOUT ioctl.
4738 - * nand_ecclayout should be expandable in the future simply by the above macros.
4739 + * struct mtd_ooblayout_ops - NAND OOB layout operations
4740 + * @ecc: function returning an ECC region in the OOB area.
4741 + * Should return -ERANGE if %section exceeds the total number of
4742 + * ECC sections.
4743 + * @free: function returning a free region in the OOB area.
4744 + * Should return -ERANGE if %section exceeds the total number of
4745 + * free sections.
4746 */
4747 -struct nand_ecclayout {
4748 - __u32 eccbytes;
4749 - __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
4750 - __u32 oobavail;
4751 - struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
4752 +struct mtd_ooblayout_ops {
4753 + int (*ecc)(struct mtd_info *mtd, int section,
4754 + struct mtd_oob_region *oobecc);
4755 + int (*free)(struct mtd_info *mtd, int section,
4756 + struct mtd_oob_region *oobfree);
4757 };
4758
4759 struct module; /* only needed for owner field in mtd_info */
4760 @@ -171,8 +189,8 @@ struct mtd_info {
4761 const char *name;
4762 int index;
4763
4764 - /* ECC layout structure pointer - read only! */
4765 - struct nand_ecclayout *ecclayout;
4766 + /* OOB layout description */
4767 + const struct mtd_ooblayout_ops *ooblayout;
4768
4769 /* the ecc step size. */
4770 unsigned int ecc_step_size;
4771 @@ -258,6 +276,46 @@ struct mtd_info {
4772 int usecount;
4773 };
4774
4775 +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
4776 + struct mtd_oob_region *oobecc);
4777 +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
4778 + int *section,
4779 + struct mtd_oob_region *oobregion);
4780 +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
4781 + const u8 *oobbuf, int start, int nbytes);
4782 +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
4783 + u8 *oobbuf, int start, int nbytes);
4784 +int mtd_ooblayout_free(struct mtd_info *mtd, int section,
4785 + struct mtd_oob_region *oobfree);
4786 +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
4787 + const u8 *oobbuf, int start, int nbytes);
4788 +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
4789 + u8 *oobbuf, int start, int nbytes);
4790 +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
4791 +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
4792 +
4793 +static inline void mtd_set_ooblayout(struct mtd_info *mtd,
4794 + const struct mtd_ooblayout_ops *ooblayout)
4795 +{
4796 + mtd->ooblayout = ooblayout;
4797 +}
4798 +
4799 +static inline void mtd_set_of_node(struct mtd_info *mtd,
4800 + struct device_node *np)
4801 +{
4802 + mtd->dev.of_node = np;
4803 +}
4804 +
4805 +static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
4806 +{
4807 + return mtd->dev.of_node;
4808 +}
4809 +
4810 +static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
4811 +{
4812 + return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
4813 +}
4814 +
4815 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
4816 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
4817 void **virt, resource_size_t *phys);
4818 --- a/include/linux/mtd/nand.h
4819 +++ b/include/linux/mtd/nand.h
4820 @@ -119,6 +119,12 @@ typedef enum {
4821 NAND_ECC_SOFT_BCH,
4822 } nand_ecc_modes_t;
4823
4824 +enum nand_ecc_algo {
4825 + NAND_ECC_UNKNOWN,
4826 + NAND_ECC_HAMMING,
4827 + NAND_ECC_BCH,
4828 +};
4829 +
4830 /*
4831 * Constants for Hardware ECC
4832 */
4833 @@ -129,6 +135,14 @@ typedef enum {
4834 /* Enable Hardware ECC before syndrome is read back from flash */
4835 #define NAND_ECC_READSYN 2
4836
4837 +/*
4838 + * Enable generic NAND 'page erased' check. This check is only done when
4839 + * ecc.correct() returns -EBADMSG.
4840 + * Set this flag if your implementation does not fix bitflips in erased
4841 + * pages and you want to rely on the default implementation.
4842 + */
4843 +#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
4844 +
4845 /* Bit mask for flags passed to do_nand_read_ecc */
4846 #define NAND_GET_DEVICE 0x80
4847
4848 @@ -160,6 +174,12 @@ typedef enum {
4849 /* Device supports subpage reads */
4850 #define NAND_SUBPAGE_READ 0x00001000
4851
4852 +/*
4853 + * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
4854 + * patterns.
4855 + */
4856 +#define NAND_NEED_SCRAMBLING 0x00002000
4857 +
4858 /* Options valid for Samsung large page devices */
4859 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
4860
4861 @@ -276,15 +296,15 @@ struct nand_onfi_params {
4862 __le16 t_r;
4863 __le16 t_ccs;
4864 __le16 src_sync_timing_mode;
4865 - __le16 src_ssync_features;
4866 + u8 src_ssync_features;
4867 __le16 clk_pin_capacitance_typ;
4868 __le16 io_pin_capacitance_typ;
4869 __le16 input_pin_capacitance_typ;
4870 u8 input_pin_capacitance_max;
4871 u8 driver_strength_support;
4872 __le16 t_int_r;
4873 - __le16 t_ald;
4874 - u8 reserved4[7];
4875 + __le16 t_adl;
4876 + u8 reserved4[8];
4877
4878 /* vendor */
4879 __le16 vendor_revision;
4880 @@ -407,7 +427,7 @@ struct nand_jedec_params {
4881 __le16 input_pin_capacitance_typ;
4882 __le16 clk_pin_capacitance_typ;
4883 u8 driver_strength_support;
4884 - __le16 t_ald;
4885 + __le16 t_adl;
4886 u8 reserved4[36];
4887
4888 /* ECC and endurance block */
4889 @@ -444,6 +464,7 @@ struct nand_hw_control {
4890 /**
4891 * struct nand_ecc_ctrl - Control structure for ECC
4892 * @mode: ECC mode
4893 + * @algo: ECC algorithm
4894 * @steps: number of ECC steps per page
4895 * @size: data bytes per ECC step
4896 * @bytes: ECC bytes per step
4897 @@ -451,12 +472,18 @@ struct nand_hw_control {
4898 * @total: total number of ECC bytes per page
4899 * @prepad: padding information for syndrome based ECC generators
4900 * @postpad: padding information for syndrome based ECC generators
4901 - * @layout: ECC layout control struct pointer
4902 + * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
4903 * @priv: pointer to private ECC control data
4904 * @hwctl: function to control hardware ECC generator. Must only
4905 * be provided if an hardware ECC is available
4906 * @calculate: function for ECC calculation or readback from ECC hardware
4907 - * @correct: function for ECC correction, matching to ECC generator (sw/hw)
4908 + * @correct: function for ECC correction, matching to ECC generator (sw/hw).
4909 + * Should return a positive number representing the number of
4910 + * corrected bitflips, -EBADMSG if the number of bitflips exceed
4911 + * ECC strength, or any other error code if the error is not
4912 + * directly related to correction.
4913 + * If -EBADMSG is returned the input buffers should be left
4914 + * untouched.
4915 * @read_page_raw: function to read a raw page without ECC. This function
4916 * should hide the specific layout used by the ECC
4917 * controller and always return contiguous in-band and
4918 @@ -487,6 +514,7 @@ struct nand_hw_control {
4919 */
4920 struct nand_ecc_ctrl {
4921 nand_ecc_modes_t mode;
4922 + enum nand_ecc_algo algo;
4923 int steps;
4924 int size;
4925 int bytes;
4926 @@ -494,7 +522,7 @@ struct nand_ecc_ctrl {
4927 int strength;
4928 int prepad;
4929 int postpad;
4930 - struct nand_ecclayout *layout;
4931 + unsigned int options;
4932 void *priv;
4933 void (*hwctl)(struct mtd_info *mtd, int mode);
4934 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
4935 @@ -540,11 +568,11 @@ struct nand_buffers {
4936
4937 /**
4938 * struct nand_chip - NAND Private Flash Chip Data
4939 + * @mtd: MTD device registered to the MTD framework
4940 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
4941 * flash device
4942 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
4943 * flash device.
4944 - * @flash_node: [BOARDSPECIFIC] device node describing this instance
4945 * @read_byte: [REPLACEABLE] read one byte from the chip
4946 * @read_word: [REPLACEABLE] read one word from the chip
4947 * @write_byte: [REPLACEABLE] write a single byte to the chip on the
4948 @@ -640,18 +668,17 @@ struct nand_buffers {
4949 */
4950
4951 struct nand_chip {
4952 + struct mtd_info mtd;
4953 void __iomem *IO_ADDR_R;
4954 void __iomem *IO_ADDR_W;
4955
4956 - struct device_node *flash_node;
4957 -
4958 uint8_t (*read_byte)(struct mtd_info *mtd);
4959 u16 (*read_word)(struct mtd_info *mtd);
4960 void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
4961 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
4962 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
4963 void (*select_chip)(struct mtd_info *mtd, int chip);
4964 - int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
4965 + int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
4966 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
4967 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
4968 int (*dev_ready)(struct mtd_info *mtd);
4969 @@ -719,6 +746,40 @@ struct nand_chip {
4970 void *priv;
4971 };
4972
4973 +extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
4974 +extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
4975 +
4976 +static inline void nand_set_flash_node(struct nand_chip *chip,
4977 + struct device_node *np)
4978 +{
4979 + mtd_set_of_node(&chip->mtd, np);
4980 +}
4981 +
4982 +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
4983 +{
4984 + return mtd_get_of_node(&chip->mtd);
4985 +}
4986 +
4987 +static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
4988 +{
4989 + return container_of(mtd, struct nand_chip, mtd);
4990 +}
4991 +
4992 +static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
4993 +{
4994 + return &chip->mtd;
4995 +}
4996 +
4997 +static inline void *nand_get_controller_data(struct nand_chip *chip)
4998 +{
4999 + return chip->priv;
5000 +}
5001 +
5002 +static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
5003 +{
5004 + chip->priv = priv;
5005 +}
5006 +
5007 /*
5008 * NAND Flash Manufacturer ID Codes
5009 */
5010 @@ -850,7 +911,6 @@ extern int nand_do_read(struct mtd_info
5011 * @chip_delay: R/B delay value in us
5012 * @options: Option flags, e.g. 16bit buswidth
5013 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
5014 - * @ecclayout: ECC layout info structure
5015 * @part_probe_types: NULL-terminated array of probe types
5016 */
5017 struct platform_nand_chip {
5018 @@ -858,7 +918,6 @@ struct platform_nand_chip {
5019 int chip_offset;
5020 int nr_partitions;
5021 struct mtd_partition *partitions;
5022 - struct nand_ecclayout *ecclayout;
5023 int chip_delay;
5024 unsigned int options;
5025 unsigned int bbt_options;
5026 @@ -908,15 +967,6 @@ struct platform_nand_data {
5027 struct platform_nand_ctrl ctrl;
5028 };
5029
5030 -/* Some helpers to access the data structures */
5031 -static inline
5032 -struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
5033 -{
5034 - struct nand_chip *chip = mtd->priv;
5035 -
5036 - return chip->priv;
5037 -}
5038 -
5039 /* return the supported features. */
5040 static inline int onfi_feature(struct nand_chip *chip)
5041 {
5042 --- a/include/linux/mtd/nand_bch.h
5043 +++ b/include/linux/mtd/nand_bch.h
5044 @@ -32,9 +32,7 @@ int nand_bch_correct_data(struct mtd_inf
5045 /*
5046 * Initialize BCH encoder/decoder
5047 */
5048 -struct nand_bch_control *
5049 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
5050 - unsigned int eccbytes, struct nand_ecclayout **ecclayout);
5051 +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
5052 /*
5053 * Release BCH encoder/decoder resources
5054 */
5055 @@ -55,12 +53,10 @@ static inline int
5056 nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
5057 unsigned char *read_ecc, unsigned char *calc_ecc)
5058 {
5059 - return -1;
5060 + return -ENOTSUPP;
5061 }
5062
5063 -static inline struct nand_bch_control *
5064 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
5065 - unsigned int eccbytes, struct nand_ecclayout **ecclayout)
5066 +static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
5067 {
5068 return NULL;
5069 }
5070 --- a/include/linux/mtd/nftl.h
5071 +++ b/include/linux/mtd/nftl.h
5072 @@ -50,7 +50,6 @@ struct NFTLrecord {
5073 unsigned int nb_blocks; /* number of physical blocks */
5074 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
5075 struct erase_info instr;
5076 - struct nand_ecclayout oobinfo;
5077 };
5078
5079 int NFTL_mount(struct NFTLrecord *s);
5080 --- a/include/linux/mtd/onenand.h
5081 +++ b/include/linux/mtd/onenand.h
5082 @@ -80,7 +80,6 @@ struct onenand_bufferram {
5083 * @page_buf: [INTERN] page main data buffer
5084 * @oob_buf: [INTERN] page oob data buffer
5085 * @subpagesize: [INTERN] holds the subpagesize
5086 - * @ecclayout: [REPLACEABLE] the default ecc placement scheme
5087 * @bbm: [REPLACEABLE] pointer to Bad Block Management
5088 * @priv: [OPTIONAL] pointer to private chip date
5089 */
5090 @@ -134,7 +133,6 @@ struct onenand_chip {
5091 #endif
5092
5093 int subpagesize;
5094 - struct nand_ecclayout *ecclayout;
5095
5096 void *bbm;
5097
5098 --- a/include/linux/mtd/partitions.h
5099 +++ b/include/linux/mtd/partitions.h
5100 @@ -42,7 +42,6 @@ struct mtd_partition {
5101 uint64_t size; /* partition size */
5102 uint64_t offset; /* offset within the master MTD space */
5103 uint32_t mask_flags; /* master MTD flags to mask out for this partition */
5104 - struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
5105 };
5106
5107 #define MTDPART_OFS_RETAIN (-3)
5108 @@ -56,11 +55,9 @@ struct device_node;
5109 /**
5110 * struct mtd_part_parser_data - used to pass data to MTD partition parsers.
5111 * @origin: for RedBoot, start address of MTD device
5112 - * @of_node: for OF parsers, device node containing partitioning information
5113 */
5114 struct mtd_part_parser_data {
5115 unsigned long origin;
5116 - struct device_node *of_node;
5117 };
5118
5119
5120 @@ -78,14 +75,34 @@ struct mtd_part_parser {
5121 struct list_head list;
5122 struct module *owner;
5123 const char *name;
5124 - int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
5125 + int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
5126 struct mtd_part_parser_data *);
5127 + void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
5128 enum mtd_parser_type type;
5129 };
5130
5131 -extern void register_mtd_parser(struct mtd_part_parser *parser);
5132 +/* Container for passing around a set of parsed partitions */
5133 +struct mtd_partitions {
5134 + const struct mtd_partition *parts;
5135 + int nr_parts;
5136 + const struct mtd_part_parser *parser;
5137 +};
5138 +
5139 +extern int __register_mtd_parser(struct mtd_part_parser *parser,
5140 + struct module *owner);
5141 +#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE)
5142 +
5143 extern void deregister_mtd_parser(struct mtd_part_parser *parser);
5144
5145 +/*
5146 + * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't
5147 + * do anything special in module init/exit. Each driver may only use this macro
5148 + * once, and calling it replaces module_init() and module_exit().
5149 + */
5150 +#define module_mtd_part_parser(__mtd_part_parser) \
5151 + module_driver(__mtd_part_parser, register_mtd_parser, \
5152 + deregister_mtd_parser)
5153 +
5154 int mtd_is_partition(const struct mtd_info *mtd);
5155 int mtd_add_partition(struct mtd_info *master, const char *name,
5156 long long offset, long long length);
5157 --- a/include/linux/mtd/sh_flctl.h
5158 +++ b/include/linux/mtd/sh_flctl.h
5159 @@ -143,11 +143,11 @@ enum flctl_ecc_res_t {
5160 struct dma_chan;
5161
5162 struct sh_flctl {
5163 - struct mtd_info mtd;
5164 struct nand_chip chip;
5165 struct platform_device *pdev;
5166 struct dev_pm_qos_request pm_qos;
5167 void __iomem *reg;
5168 + resource_size_t fifo;
5169
5170 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
5171 int read_bytes;
5172 @@ -186,7 +186,7 @@ struct sh_flctl_platform_data {
5173
5174 static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
5175 {
5176 - return container_of(mtdinfo, struct sh_flctl, mtd);
5177 + return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip);
5178 }
5179
5180 #endif /* __SH_FLCTL_H__ */
5181 --- a/include/linux/mtd/sharpsl.h
5182 +++ b/include/linux/mtd/sharpsl.h
5183 @@ -14,7 +14,7 @@
5184
5185 struct sharpsl_nand_platform_data {
5186 struct nand_bbt_descr *badblock_pattern;
5187 - struct nand_ecclayout *ecc_layout;
5188 + const struct mtd_ooblayout_ops *ecc_layout;
5189 struct mtd_partition *partitions;
5190 unsigned int nr_partitions;
5191 };
5192 --- a/include/uapi/mtd/mtd-abi.h
5193 +++ b/include/uapi/mtd/mtd-abi.h
5194 @@ -228,7 +228,7 @@ struct nand_oobfree {
5195 * complete set of ECC information. The ioctl truncates the larger internal
5196 * structure to retain binary compatibility with the static declaration of the
5197 * ioctl. Note that the "MTD_MAX_..._ENTRIES" macros represent the max size of
5198 - * the user struct, not the MAX size of the internal struct nand_ecclayout.
5199 + * the user struct, not the MAX size of the internal OOB layout representation.
5200 */
5201 struct nand_ecclayout_user {
5202 __u32 eccbytes;
5203 --- a/fs/jffs2/wbuf.c
5204 +++ b/fs/jffs2/wbuf.c
5205 @@ -1153,7 +1153,7 @@ static struct jffs2_sb_info *work_to_sb(
5206 {
5207 struct delayed_work *dwork;
5208
5209 - dwork = container_of(work, struct delayed_work, work);
5210 + dwork = to_delayed_work(work);
5211 return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
5212 }
5213
5214 @@ -1183,22 +1183,20 @@ void jffs2_dirty_trigger(struct jffs2_sb
5215
5216 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
5217 {
5218 - struct nand_ecclayout *oinfo = c->mtd->ecclayout;
5219 -
5220 if (!c->mtd->oobsize)
5221 return 0;
5222
5223 /* Cleanmarker is out-of-band, so inline size zero */
5224 c->cleanmarker_size = 0;
5225
5226 - if (!oinfo || oinfo->oobavail == 0) {
5227 + if (c->mtd->oobavail == 0) {
5228 pr_err("inconsistent device description\n");
5229 return -EINVAL;
5230 }
5231
5232 jffs2_dbg(1, "using OOB on NAND\n");
5233
5234 - c->oobavail = oinfo->oobavail;
5235 + c->oobavail = c->mtd->oobavail;
5236
5237 /* Initialise write buffer */
5238 init_rwsem(&c->wbuf_sem);
5239 --- a/include/linux/mtd/spi-nor.h
5240 +++ b/include/linux/mtd/spi-nor.h
5241 @@ -85,6 +85,7 @@
5242 #define SR_BP0 BIT(2) /* Block protect 0 */
5243 #define SR_BP1 BIT(3) /* Block protect 1 */
5244 #define SR_BP2 BIT(4) /* Block protect 2 */
5245 +#define SR_TB BIT(5) /* Top/Bottom protect */
5246 #define SR_SRWD BIT(7) /* SR write protect */
5247
5248 #define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
5249 @@ -116,6 +117,7 @@ enum spi_nor_ops {
5250
5251 enum spi_nor_option_flags {
5252 SNOR_F_USE_FSR = BIT(0),
5253 + SNOR_F_HAS_SR_TB = BIT(1),
5254 };
5255
5256 /**
5257 @@ -123,7 +125,6 @@ enum spi_nor_option_flags {
5258 * @mtd: point to a mtd_info structure
5259 * @lock: the lock for the read/write/erase/lock/unlock operations
5260 * @dev: point to a spi device, or a spi nor controller device.
5261 - * @flash_node: point to a device node describing this flash instance.
5262 * @page_size: the page size of the SPI NOR
5263 * @addr_width: number of address bytes
5264 * @erase_opcode: the opcode for erasing a sector
5265 @@ -143,7 +144,8 @@ enum spi_nor_option_flags {
5266 * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
5267 * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
5268 * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
5269 - * at the offset @offs
5270 + * at the offset @offs; if not provided by the driver,
5271 + * spi-nor will send the erase opcode via write_reg()
5272 * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
5273 * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
5274 * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
5275 @@ -154,7 +156,6 @@ struct spi_nor {
5276 struct mtd_info mtd;
5277 struct mutex lock;
5278 struct device *dev;
5279 - struct device_node *flash_node;
5280 u32 page_size;
5281 u8 addr_width;
5282 u8 erase_opcode;
5283 @@ -184,6 +185,17 @@ struct spi_nor {
5284 void *priv;
5285 };
5286
5287 +static inline void spi_nor_set_flash_node(struct spi_nor *nor,
5288 + struct device_node *np)
5289 +{
5290 + mtd_set_of_node(&nor->mtd, np);
5291 +}
5292 +
5293 +static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
5294 +{
5295 + return mtd_get_of_node(&nor->mtd);
5296 +}
5297 +
5298 /**
5299 * spi_nor_scan() - scan the SPI NOR
5300 * @nor: the spi_nor structure