kernel: update kernel 4.4 to version 4.4.12
[openwrt/openwrt.git] / target / linux / mediatek / patches-4.4 / 0072-mtd-backport-v4.7-0day-patches-from-Boris.patch
1 From a369af5149e6eb442b22ce89b564dd7a76e03638 Mon Sep 17 00:00:00 2001
2 From: John Crispin <blogic@openwrt.org>
3 Date: Tue, 26 Apr 2016 19:05:01 +0200
4 Subject: [PATCH 072/102] mtd: backport v4.7-0day patches from Boris
5
6 Signed-off-by: John Crispin <blogic@openwrt.org>
7 ---
8 drivers/mtd/Kconfig | 4 +-
9 drivers/mtd/cmdlinepart.c | 3 +-
10 drivers/mtd/devices/m25p80.c | 44 +--
11 drivers/mtd/maps/physmap_of.c | 6 +-
12 drivers/mtd/mtdchar.c | 123 ++++++--
13 drivers/mtd/mtdconcat.c | 2 +-
14 drivers/mtd/mtdcore.c | 428 ++++++++++++++++++++++++--
15 drivers/mtd/mtdcore.h | 7 +-
16 drivers/mtd/mtdpart.c | 161 ++++++----
17 drivers/mtd/mtdswap.c | 24 +-
18 drivers/mtd/nand/Kconfig | 21 +-
19 drivers/mtd/nand/Makefile | 2 +
20 drivers/mtd/nand/nand_base.c | 571 +++++++++++++++++++----------------
21 drivers/mtd/nand/nand_bbt.c | 34 +--
22 drivers/mtd/nand/nand_bch.c | 52 ++--
23 drivers/mtd/nand/nand_ecc.c | 6 +-
24 drivers/mtd/nand/nand_ids.c | 4 +-
25 drivers/mtd/nand/nandsim.c | 43 +--
26 drivers/mtd/ofpart.c | 53 ++--
27 drivers/mtd/spi-nor/Kconfig | 10 +-
28 drivers/mtd/spi-nor/Makefile | 1 +
29 drivers/mtd/spi-nor/mtk-quadspi.c | 485 +++++++++++++++++++++++++++++
30 drivers/mtd/spi-nor/spi-nor.c | 321 +++++++++++++-------
31 drivers/mtd/tests/mtd_nandecctest.c | 2 +-
32 drivers/mtd/tests/oobtest.c | 49 ++-
33 drivers/mtd/tests/pagetest.c | 3 +-
34 drivers/mtd/ubi/cdev.c | 4 +-
35 drivers/mtd/ubi/misc.c | 49 +++
36 drivers/mtd/ubi/ubi.h | 16 +-
37 drivers/mtd/ubi/upd.c | 2 +-
38 drivers/mtd/ubi/wl.c | 21 +-
39 include/linux/mtd/bbm.h | 1 -
40 include/linux/mtd/fsmc.h | 18 --
41 include/linux/mtd/inftl.h | 1 -
42 include/linux/mtd/map.h | 9 +-
43 include/linux/mtd/mtd.h | 80 ++++-
44 include/linux/mtd/nand.h | 94 ++++--
45 include/linux/mtd/nand_bch.h | 10 +-
46 include/linux/mtd/nftl.h | 1 -
47 include/linux/mtd/onenand.h | 2 -
48 include/linux/mtd/partitions.h | 27 +-
49 include/linux/mtd/sh_flctl.h | 4 +-
50 include/linux/mtd/sharpsl.h | 2 +-
51 include/linux/mtd/spi-nor.h | 23 +-
52 include/uapi/mtd/mtd-abi.h | 2 +-
53 45 files changed, 2077 insertions(+), 748 deletions(-)
54 create mode 100644 drivers/mtd/spi-nor/mtk-quadspi.c
55
56 --- a/drivers/mtd/Kconfig
57 +++ b/drivers/mtd/Kconfig
58 @@ -131,7 +131,7 @@ config MTD_CMDLINE_PARTS
59
60 config MTD_AFS_PARTS
61 tristate "ARM Firmware Suite partition parsing"
62 - depends on ARM
63 + depends on (ARM || ARM64)
64 ---help---
65 The ARM Firmware Suite allows the user to divide flash devices into
66 multiple 'images'. Each such image has a header containing its name
67 @@ -161,7 +161,7 @@ config MTD_AR7_PARTS
68
69 config MTD_BCM63XX_PARTS
70 tristate "BCM63XX CFE partitioning support"
71 - depends on BCM63XX
72 + depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
73 select CRC32
74 help
75 This provides partions parsing for BCM63xx devices with CFE
76 --- a/drivers/mtd/cmdlinepart.c
77 +++ b/drivers/mtd/cmdlinepart.c
78 @@ -304,7 +304,7 @@ static int mtdpart_setup_real(char *s)
79 * the first one in the chain if a NULL mtd_id is passed in.
80 */
81 static int parse_cmdline_partitions(struct mtd_info *master,
82 - struct mtd_partition **pparts,
83 + const struct mtd_partition **pparts,
84 struct mtd_part_parser_data *data)
85 {
86 unsigned long long offset;
87 @@ -382,7 +382,6 @@ static int __init mtdpart_setup(char *s)
88 __setup("mtdparts=", mtdpart_setup);
89
90 static struct mtd_part_parser cmdline_parser = {
91 - .owner = THIS_MODULE,
92 .parse_fn = parse_cmdline_partitions,
93 .name = "cmdlinepart",
94 };
95 --- a/drivers/mtd/devices/m25p80.c
96 +++ b/drivers/mtd/devices/m25p80.c
97 @@ -174,22 +174,6 @@ static int m25p80_read(struct spi_nor *n
98 return 0;
99 }
100
101 -static int m25p80_erase(struct spi_nor *nor, loff_t offset)
102 -{
103 - struct m25p *flash = nor->priv;
104 -
105 - dev_dbg(nor->dev, "%dKiB at 0x%08x\n",
106 - flash->spi_nor.mtd.erasesize / 1024, (u32)offset);
107 -
108 - /* Set up command buffer. */
109 - flash->command[0] = nor->erase_opcode;
110 - m25p_addr2cmd(nor, offset, flash->command);
111 -
112 - spi_write(flash->spi, flash->command, m25p_cmdsz(nor));
113 -
114 - return 0;
115 -}
116 -
117 /*
118 * board specific setup should have ensured the SPI clock used here
119 * matches what the READ command supports, at least until this driver
120 @@ -197,12 +181,11 @@ static int m25p80_erase(struct spi_nor *
121 */
122 static int m25p_probe(struct spi_device *spi)
123 {
124 - struct mtd_part_parser_data ppdata;
125 struct flash_platform_data *data;
126 struct m25p *flash;
127 struct spi_nor *nor;
128 enum read_mode mode = SPI_NOR_NORMAL;
129 - char *flash_name = NULL;
130 + char *flash_name;
131 int ret;
132
133 data = dev_get_platdata(&spi->dev);
134 @@ -216,12 +199,11 @@ static int m25p_probe(struct spi_device
135 /* install the hooks */
136 nor->read = m25p80_read;
137 nor->write = m25p80_write;
138 - nor->erase = m25p80_erase;
139 nor->write_reg = m25p80_write_reg;
140 nor->read_reg = m25p80_read_reg;
141
142 nor->dev = &spi->dev;
143 - nor->flash_node = spi->dev.of_node;
144 + spi_nor_set_flash_node(nor, spi->dev.of_node);
145 nor->priv = flash;
146
147 spi_set_drvdata(spi, flash);
148 @@ -242,6 +224,8 @@ static int m25p_probe(struct spi_device
149 */
150 if (data && data->type)
151 flash_name = data->type;
152 + else if (!strcmp(spi->modalias, "spi-nor"))
153 + flash_name = NULL; /* auto-detect */
154 else
155 flash_name = spi->modalias;
156
157 @@ -249,11 +233,8 @@ static int m25p_probe(struct spi_device
158 if (ret)
159 return ret;
160
161 - ppdata.of_node = spi->dev.of_node;
162 -
163 - return mtd_device_parse_register(&nor->mtd, NULL, &ppdata,
164 - data ? data->parts : NULL,
165 - data ? data->nr_parts : 0);
166 + return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
167 + data ? data->nr_parts : 0);
168 }
169
170
171 @@ -279,14 +260,21 @@ static int m25p_remove(struct spi_device
172 */
173 static const struct spi_device_id m25p_ids[] = {
174 /*
175 + * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
176 + * hack around the fact that the SPI core does not provide uevent
177 + * matching for .of_match_table
178 + */
179 + {"spi-nor"},
180 +
181 + /*
182 * Entries not used in DTs that should be safe to drop after replacing
183 - * them with "nor-jedec" in platform data.
184 + * them with "spi-nor" in platform data.
185 */
186 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
187
188 /*
189 - * Entries that were used in DTs without "nor-jedec" fallback and should
190 - * be kept for backward compatibility.
191 + * Entries that were used in DTs without "jedec,spi-nor" fallback and
192 + * should be kept for backward compatibility.
193 */
194 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
195 {"mr25h256"},
196 --- a/drivers/mtd/maps/physmap_of.c
197 +++ b/drivers/mtd/maps/physmap_of.c
198 @@ -128,7 +128,6 @@ static int of_flash_probe(struct platfor
199 int reg_tuple_size;
200 struct mtd_info **mtd_list = NULL;
201 resource_size_t res_size;
202 - struct mtd_part_parser_data ppdata;
203 bool map_indirect;
204 const char *mtd_name = NULL;
205
206 @@ -272,8 +271,9 @@ static int of_flash_probe(struct platfor
207 if (err)
208 goto err_out;
209
210 - ppdata.of_node = dp;
211 - mtd_device_parse_register(info->cmtd, part_probe_types_def, &ppdata,
212 + info->cmtd->dev.parent = &dev->dev;
213 + mtd_set_of_node(info->cmtd, dp);
214 + mtd_device_parse_register(info->cmtd, part_probe_types_def, NULL,
215 NULL, 0);
216
217 kfree(mtd_list);
218 --- a/drivers/mtd/mtdchar.c
219 +++ b/drivers/mtd/mtdchar.c
220 @@ -465,38 +465,111 @@ static int mtdchar_readoob(struct file *
221 }
222
223 /*
224 - * Copies (and truncates, if necessary) data from the larger struct,
225 - * nand_ecclayout, to the smaller, deprecated layout struct,
226 - * nand_ecclayout_user. This is necessary only to support the deprecated
227 - * API ioctl ECCGETLAYOUT while allowing all new functionality to use
228 - * nand_ecclayout flexibly (i.e. the struct may change size in new
229 - * releases without requiring major rewrites).
230 + * Copies (and truncates, if necessary) OOB layout information to the
231 + * deprecated layout struct, nand_ecclayout_user. This is necessary only to
232 + * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
233 + * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
234 + * can describe any kind of OOB layout with almost zero overhead from a
235 + * memory usage point of view).
236 */
237 -static int shrink_ecclayout(const struct nand_ecclayout *from,
238 - struct nand_ecclayout_user *to)
239 +static int shrink_ecclayout(struct mtd_info *mtd,
240 + struct nand_ecclayout_user *to)
241 {
242 - int i;
243 + struct mtd_oob_region oobregion;
244 + int i, section = 0, ret;
245
246 - if (!from || !to)
247 + if (!mtd || !to)
248 return -EINVAL;
249
250 memset(to, 0, sizeof(*to));
251
252 - to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
253 - for (i = 0; i < to->eccbytes; i++)
254 - to->eccpos[i] = from->eccpos[i];
255 + to->eccbytes = 0;
256 + for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
257 + u32 eccpos;
258 +
259 + ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
260 + if (ret < 0) {
261 + if (ret != -ERANGE)
262 + return ret;
263 +
264 + break;
265 + }
266 +
267 + eccpos = oobregion.offset;
268 + for (; i < MTD_MAX_ECCPOS_ENTRIES &&
269 + eccpos < oobregion.offset + oobregion.length; i++) {
270 + to->eccpos[i] = eccpos++;
271 + to->eccbytes++;
272 + }
273 + }
274
275 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
276 - if (from->oobfree[i].length == 0 &&
277 - from->oobfree[i].offset == 0)
278 + ret = mtd_ooblayout_free(mtd, i, &oobregion);
279 + if (ret < 0) {
280 + if (ret != -ERANGE)
281 + return ret;
282 +
283 break;
284 - to->oobavail += from->oobfree[i].length;
285 - to->oobfree[i] = from->oobfree[i];
286 + }
287 +
288 + to->oobfree[i].offset = oobregion.offset;
289 + to->oobfree[i].length = oobregion.length;
290 + to->oobavail += to->oobfree[i].length;
291 }
292
293 return 0;
294 }
295
296 +static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
297 +{
298 + struct mtd_oob_region oobregion;
299 + int i, section = 0, ret;
300 +
301 + if (!mtd || !to)
302 + return -EINVAL;
303 +
304 + memset(to, 0, sizeof(*to));
305 +
306 + to->eccbytes = 0;
307 + for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
308 + u32 eccpos;
309 +
310 + ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
311 + if (ret < 0) {
312 + if (ret != -ERANGE)
313 + return ret;
314 +
315 + break;
316 + }
317 +
318 + if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
319 + return -EINVAL;
320 +
321 + eccpos = oobregion.offset;
322 + for (; eccpos < oobregion.offset + oobregion.length; i++) {
323 + to->eccpos[i] = eccpos++;
324 + to->eccbytes++;
325 + }
326 + }
327 +
328 + for (i = 0; i < 8; i++) {
329 + ret = mtd_ooblayout_free(mtd, i, &oobregion);
330 + if (ret < 0) {
331 + if (ret != -ERANGE)
332 + return ret;
333 +
334 + break;
335 + }
336 +
337 + to->oobfree[i][0] = oobregion.offset;
338 + to->oobfree[i][1] = oobregion.length;
339 + }
340 +
341 + to->useecc = MTD_NANDECC_AUTOPLACE;
342 +
343 + return 0;
344 +}
345 +
346 static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
347 struct blkpg_ioctl_arg *arg)
348 {
349 @@ -815,16 +888,12 @@ static int mtdchar_ioctl(struct file *fi
350 {
351 struct nand_oobinfo oi;
352
353 - if (!mtd->ecclayout)
354 + if (!mtd->ooblayout)
355 return -EOPNOTSUPP;
356 - if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
357 - return -EINVAL;
358
359 - oi.useecc = MTD_NANDECC_AUTOPLACE;
360 - memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
361 - memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
362 - sizeof(oi.oobfree));
363 - oi.eccbytes = mtd->ecclayout->eccbytes;
364 + ret = get_oobinfo(mtd, &oi);
365 + if (ret)
366 + return ret;
367
368 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
369 return -EFAULT;
370 @@ -913,14 +982,14 @@ static int mtdchar_ioctl(struct file *fi
371 {
372 struct nand_ecclayout_user *usrlay;
373
374 - if (!mtd->ecclayout)
375 + if (!mtd->ooblayout)
376 return -EOPNOTSUPP;
377
378 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
379 if (!usrlay)
380 return -ENOMEM;
381
382 - shrink_ecclayout(mtd->ecclayout, usrlay);
383 + shrink_ecclayout(mtd, usrlay);
384
385 if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
386 ret = -EFAULT;
387 --- a/drivers/mtd/mtdconcat.c
388 +++ b/drivers/mtd/mtdconcat.c
389 @@ -777,7 +777,7 @@ struct mtd_info *mtd_concat_create(struc
390
391 }
392
393 - concat->mtd.ecclayout = subdev[0]->ecclayout;
394 + mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
395
396 concat->num_subdev = num_devs;
397 concat->mtd.name = name;
398 --- a/drivers/mtd/mtdcore.c
399 +++ b/drivers/mtd/mtdcore.c
400 @@ -32,6 +32,7 @@
401 #include <linux/err.h>
402 #include <linux/ioctl.h>
403 #include <linux/init.h>
404 +#include <linux/of.h>
405 #include <linux/proc_fs.h>
406 #include <linux/idr.h>
407 #include <linux/backing-dev.h>
408 @@ -446,6 +447,7 @@ int add_mtd_device(struct mtd_info *mtd)
409 mtd->dev.devt = MTD_DEVT(i);
410 dev_set_name(&mtd->dev, "mtd%d", i);
411 dev_set_drvdata(&mtd->dev, mtd);
412 + of_node_get(mtd_get_of_node(mtd));
413 error = device_register(&mtd->dev);
414 if (error)
415 goto fail_added;
416 @@ -477,6 +479,7 @@ int add_mtd_device(struct mtd_info *mtd)
417 return 0;
418
419 fail_added:
420 + of_node_put(mtd_get_of_node(mtd));
421 idr_remove(&mtd_idr, i);
422 fail_locked:
423 mutex_unlock(&mtd_table_mutex);
424 @@ -518,6 +521,7 @@ int del_mtd_device(struct mtd_info *mtd)
425 device_unregister(&mtd->dev);
426
427 idr_remove(&mtd_idr, mtd->index);
428 + of_node_put(mtd_get_of_node(mtd));
429
430 module_put(THIS_MODULE);
431 ret = 0;
432 @@ -529,9 +533,10 @@ out_error:
433 }
434
435 static int mtd_add_device_partitions(struct mtd_info *mtd,
436 - struct mtd_partition *real_parts,
437 - int nbparts)
438 + struct mtd_partitions *parts)
439 {
440 + const struct mtd_partition *real_parts = parts->parts;
441 + int nbparts = parts->nr_parts;
442 int ret;
443
444 if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
445 @@ -600,29 +605,29 @@ int mtd_device_parse_register(struct mtd
446 const struct mtd_partition *parts,
447 int nr_parts)
448 {
449 + struct mtd_partitions parsed;
450 int ret;
451 - struct mtd_partition *real_parts = NULL;
452
453 mtd_set_dev_defaults(mtd);
454
455 - ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
456 - if (ret <= 0 && nr_parts && parts) {
457 - real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
458 - GFP_KERNEL);
459 - if (!real_parts)
460 - ret = -ENOMEM;
461 - else
462 - ret = nr_parts;
463 - }
464 - /* Didn't come up with either parsed OR fallback partitions */
465 - if (ret < 0) {
466 - pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
467 + memset(&parsed, 0, sizeof(parsed));
468 +
469 + ret = parse_mtd_partitions(mtd, types, &parsed, parser_data);
470 + if ((ret < 0 || parsed.nr_parts == 0) && parts && nr_parts) {
471 + /* Fall back to driver-provided partitions */
472 + parsed = (struct mtd_partitions){
473 + .parts = parts,
474 + .nr_parts = nr_parts,
475 + };
476 + } else if (ret < 0) {
477 + /* Didn't come up with parsed OR fallback partitions */
478 + pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
479 ret);
480 /* Don't abort on errors; we can still use unpartitioned MTD */
481 - ret = 0;
482 + memset(&parsed, 0, sizeof(parsed));
483 }
484
485 - ret = mtd_add_device_partitions(mtd, real_parts, ret);
486 + ret = mtd_add_device_partitions(mtd, &parsed);
487 if (ret)
488 goto out;
489
490 @@ -642,7 +647,8 @@ int mtd_device_parse_register(struct mtd
491 }
492
493 out:
494 - kfree(real_parts);
495 + /* Cleanup any parsed partitions */
496 + mtd_part_parser_cleanup(&parsed);
497 return ret;
498 }
499 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
500 @@ -767,7 +773,6 @@ out:
501 }
502 EXPORT_SYMBOL_GPL(get_mtd_device);
503
504 -
505 int __get_mtd_device(struct mtd_info *mtd)
506 {
507 int err;
508 @@ -1001,6 +1006,366 @@ int mtd_read_oob(struct mtd_info *mtd, l
509 }
510 EXPORT_SYMBOL_GPL(mtd_read_oob);
511
512 +/**
513 + * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
514 + * @mtd: MTD device structure
515 + * @section: ECC section. Depending on the layout you may have all the ECC
516 + * bytes stored in a single contiguous section, or one section
517 + * per ECC chunk (and sometime several sections for a single ECC
518 + * ECC chunk)
519 + * @oobecc: OOB region struct filled with the appropriate ECC position
520 + * information
521 + *
522 + * This functions return ECC section information in the OOB area. I you want
523 + * to get all the ECC bytes information, then you should call
524 + * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
525 + *
526 + * Returns zero on success, a negative error code otherwise.
527 + */
528 +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
529 + struct mtd_oob_region *oobecc)
530 +{
531 + memset(oobecc, 0, sizeof(*oobecc));
532 +
533 + if (!mtd || section < 0)
534 + return -EINVAL;
535 +
536 + if (!mtd->ooblayout || !mtd->ooblayout->ecc)
537 + return -ENOTSUPP;
538 +
539 + return mtd->ooblayout->ecc(mtd, section, oobecc);
540 +}
541 +EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
542 +
543 +/**
544 + * mtd_ooblayout_free - Get the OOB region definition of a specific free
545 + * section
546 + * @mtd: MTD device structure
547 + * @section: Free section you are interested in. Depending on the layout
548 + * you may have all the free bytes stored in a single contiguous
549 + * section, or one section per ECC chunk plus an extra section
550 + * for the remaining bytes (or other funky layout).
551 + * @oobfree: OOB region struct filled with the appropriate free position
552 + * information
553 + *
554 + * This functions return free bytes position in the OOB area. I you want
555 + * to get all the free bytes information, then you should call
556 + * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
557 + *
558 + * Returns zero on success, a negative error code otherwise.
559 + */
560 +int mtd_ooblayout_free(struct mtd_info *mtd, int section,
561 + struct mtd_oob_region *oobfree)
562 +{
563 + memset(oobfree, 0, sizeof(*oobfree));
564 +
565 + if (!mtd || section < 0)
566 + return -EINVAL;
567 +
568 + if (!mtd->ooblayout || !mtd->ooblayout->free)
569 + return -ENOTSUPP;
570 +
571 + return mtd->ooblayout->free(mtd, section, oobfree);
572 +}
573 +EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
574 +
575 +/**
576 + * mtd_ooblayout_find_region - Find the region attached to a specific byte
577 + * @mtd: mtd info structure
578 + * @byte: the byte we are searching for
579 + * @sectionp: pointer where the section id will be stored
580 + * @oobregion: used to retrieve the ECC position
581 + * @iter: iterator function. Should be either mtd_ooblayout_free or
582 + * mtd_ooblayout_ecc depending on the region type you're searching for
583 + *
584 + * This functions returns the section id and oobregion information of a
585 + * specific byte. For example, say you want to know where the 4th ECC byte is
586 + * stored, you'll use:
587 + *
588 + * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
589 + *
590 + * Returns zero on success, a negative error code otherwise.
591 + */
592 +static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
593 + int *sectionp, struct mtd_oob_region *oobregion,
594 + int (*iter)(struct mtd_info *,
595 + int section,
596 + struct mtd_oob_region *oobregion))
597 +{
598 + int pos = 0, ret, section = 0;
599 +
600 + memset(oobregion, 0, sizeof(*oobregion));
601 +
602 + while (1) {
603 + ret = iter(mtd, section, oobregion);
604 + if (ret)
605 + return ret;
606 +
607 + if (pos + oobregion->length > byte)
608 + break;
609 +
610 + pos += oobregion->length;
611 + section++;
612 + }
613 +
614 + /*
615 + * Adjust region info to make it start at the beginning at the
616 + * 'start' ECC byte.
617 + */
618 + oobregion->offset += byte - pos;
619 + oobregion->length -= byte - pos;
620 + *sectionp = section;
621 +
622 + return 0;
623 +}
624 +
625 +/**
626 + * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
627 + * ECC byte
628 + * @mtd: mtd info structure
629 + * @eccbyte: the byte we are searching for
630 + * @sectionp: pointer where the section id will be stored
631 + * @oobregion: OOB region information
632 + *
633 + * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
634 + * byte.
635 + *
636 + * Returns zero on success, a negative error code otherwise.
637 + */
638 +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
639 + int *section,
640 + struct mtd_oob_region *oobregion)
641 +{
642 + return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
643 + mtd_ooblayout_ecc);
644 +}
645 +EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
646 +
647 +/**
648 + * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
649 + * @mtd: mtd info structure
650 + * @buf: destination buffer to store OOB bytes
651 + * @oobbuf: OOB buffer
652 + * @start: first byte to retrieve
653 + * @nbytes: number of bytes to retrieve
654 + * @iter: section iterator
655 + *
656 + * Extract bytes attached to a specific category (ECC or free)
657 + * from the OOB buffer and copy them into buf.
658 + *
659 + * Returns zero on success, a negative error code otherwise.
660 + */
661 +static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
662 + const u8 *oobbuf, int start, int nbytes,
663 + int (*iter)(struct mtd_info *,
664 + int section,
665 + struct mtd_oob_region *oobregion))
666 +{
667 + struct mtd_oob_region oobregion = { };
668 + int section = 0, ret;
669 +
670 + ret = mtd_ooblayout_find_region(mtd, start, &section,
671 + &oobregion, iter);
672 +
673 + while (!ret) {
674 + int cnt;
675 +
676 + cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
677 + memcpy(buf, oobbuf + oobregion.offset, cnt);
678 + buf += cnt;
679 + nbytes -= cnt;
680 +
681 + if (!nbytes)
682 + break;
683 +
684 + ret = iter(mtd, ++section, &oobregion);
685 + }
686 +
687 + return ret;
688 +}
689 +
690 +/**
691 + * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
692 + * @mtd: mtd info structure
693 + * @buf: source buffer to get OOB bytes from
694 + * @oobbuf: OOB buffer
695 + * @start: first OOB byte to set
696 + * @nbytes: number of OOB bytes to set
697 + * @iter: section iterator
698 + *
699 + * Fill the OOB buffer with data provided in buf. The category (ECC or free)
700 + * is selected by passing the appropriate iterator.
701 + *
702 + * Returns zero on success, a negative error code otherwise.
703 + */
704 +static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
705 + u8 *oobbuf, int start, int nbytes,
706 + int (*iter)(struct mtd_info *,
707 + int section,
708 + struct mtd_oob_region *oobregion))
709 +{
710 + struct mtd_oob_region oobregion = { };
711 + int section = 0, ret;
712 +
713 + ret = mtd_ooblayout_find_region(mtd, start, &section,
714 + &oobregion, iter);
715 +
716 + while (!ret) {
717 + int cnt;
718 +
719 + cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
720 + memcpy(oobbuf + oobregion.offset, buf, cnt);
721 + buf += cnt;
722 + nbytes -= cnt;
723 +
724 + if (!nbytes)
725 + break;
726 +
727 + ret = iter(mtd, ++section, &oobregion);
728 + }
729 +
730 + return ret;
731 +}
732 +
733 +/**
734 + * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
735 + * @mtd: mtd info structure
736 + * @iter: category iterator
737 + *
738 + * Count the number of bytes in a given category.
739 + *
740 + * Returns a positive value on success, a negative error code otherwise.
741 + */
742 +static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
743 + int (*iter)(struct mtd_info *,
744 + int section,
745 + struct mtd_oob_region *oobregion))
746 +{
747 + struct mtd_oob_region oobregion = { };
748 + int section = 0, ret, nbytes = 0;
749 +
750 + while (1) {
751 + ret = iter(mtd, section++, &oobregion);
752 + if (ret) {
753 + if (ret == -ERANGE)
754 + ret = nbytes;
755 + break;
756 + }
757 +
758 + nbytes += oobregion.length;
759 + }
760 +
761 + return ret;
762 +}
763 +
764 +/**
765 + * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
766 + * @mtd: mtd info structure
767 + * @eccbuf: destination buffer to store ECC bytes
768 + * @oobbuf: OOB buffer
769 + * @start: first ECC byte to retrieve
770 + * @nbytes: number of ECC bytes to retrieve
771 + *
772 + * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
773 + *
774 + * Returns zero on success, a negative error code otherwise.
775 + */
776 +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
777 + const u8 *oobbuf, int start, int nbytes)
778 +{
779 + return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
780 + mtd_ooblayout_ecc);
781 +}
782 +EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
783 +
784 +/**
785 + * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
786 + * @mtd: mtd info structure
787 + * @eccbuf: source buffer to get ECC bytes from
788 + * @oobbuf: OOB buffer
789 + * @start: first ECC byte to set
790 + * @nbytes: number of ECC bytes to set
791 + *
792 + * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
793 + *
794 + * Returns zero on success, a negative error code otherwise.
795 + */
796 +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
797 + u8 *oobbuf, int start, int nbytes)
798 +{
799 + return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
800 + mtd_ooblayout_ecc);
801 +}
802 +EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
803 +
804 +/**
805 + * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
806 + * @mtd: mtd info structure
807 + * @databuf: destination buffer to store ECC bytes
808 + * @oobbuf: OOB buffer
809 + * @start: first ECC byte to retrieve
810 + * @nbytes: number of ECC bytes to retrieve
811 + *
812 + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
813 + *
814 + * Returns zero on success, a negative error code otherwise.
815 + */
816 +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
817 + const u8 *oobbuf, int start, int nbytes)
818 +{
819 + return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
820 + mtd_ooblayout_free);
821 +}
822 +EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
823 +
824 +/**
825 + * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
826 + * @mtd: mtd info structure
827 + * @eccbuf: source buffer to get data bytes from
828 + * @oobbuf: OOB buffer
829 + * @start: first ECC byte to set
830 + * @nbytes: number of ECC bytes to set
831 + *
832 + * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
833 + *
834 + * Returns zero on success, a negative error code otherwise.
835 + */
836 +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
837 + u8 *oobbuf, int start, int nbytes)
838 +{
839 + return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
840 + mtd_ooblayout_free);
841 +}
842 +EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
843 +
844 +/**
845 + * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
846 + * @mtd: mtd info structure
847 + *
848 + * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
849 + *
850 + * Returns zero on success, a negative error code otherwise.
851 + */
852 +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
853 +{
854 + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
855 +}
856 +EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
857 +
858 +/**
859 + * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
860 + * @mtd: mtd info structure
861 + *
862 + * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
863 + *
864 + * Returns zero on success, a negative error code otherwise.
865 + */
866 +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
867 +{
868 + return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
869 +}
870 +EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
871 +
872 /*
873 * Method to access the protection register area, present in some flash
874 * devices. The user data is one time programmable but the factory data is read
875 --- a/drivers/mtd/mtdcore.h
876 +++ b/drivers/mtd/mtdcore.h
877 @@ -10,10 +10,15 @@ int add_mtd_device(struct mtd_info *mtd)
878 int del_mtd_device(struct mtd_info *mtd);
879 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
880 int del_mtd_partitions(struct mtd_info *);
881 +
882 +struct mtd_partitions;
883 +
884 int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
885 - struct mtd_partition **pparts,
886 + struct mtd_partitions *pparts,
887 struct mtd_part_parser_data *data);
888
889 +void mtd_part_parser_cleanup(struct mtd_partitions *parts);
890 +
891 int __init init_mtdchar(void);
892 void __exit cleanup_mtdchar(void);
893
894 --- a/drivers/mtd/mtdpart.c
895 +++ b/drivers/mtd/mtdpart.c
896 @@ -55,9 +55,12 @@ static void mtd_partition_split(struct m
897
898 /*
899 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
900 - * the pointer to that structure with this macro.
901 + * the pointer to that structure.
902 */
903 -#define PART(x) ((struct mtd_part *)(x))
904 +static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
905 +{
906 + return container_of(mtd, struct mtd_part, mtd);
907 +}
908
909
910 /*
911 @@ -68,7 +71,7 @@ static void mtd_partition_split(struct m
912 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
913 size_t *retlen, u_char *buf)
914 {
915 - struct mtd_part *part = PART(mtd);
916 + struct mtd_part *part = mtd_to_part(mtd);
917 struct mtd_ecc_stats stats;
918 int res;
919
920 @@ -87,7 +90,7 @@ static int part_read(struct mtd_info *mt
921 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
922 size_t *retlen, void **virt, resource_size_t *phys)
923 {
924 - struct mtd_part *part = PART(mtd);
925 + struct mtd_part *part = mtd_to_part(mtd);
926
927 return part->master->_point(part->master, from + part->offset, len,
928 retlen, virt, phys);
929 @@ -95,7 +98,7 @@ static int part_point(struct mtd_info *m
930
931 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
932 {
933 - struct mtd_part *part = PART(mtd);
934 + struct mtd_part *part = mtd_to_part(mtd);
935
936 return part->master->_unpoint(part->master, from + part->offset, len);
937 }
938 @@ -105,7 +108,7 @@ static unsigned long part_get_unmapped_a
939 unsigned long offset,
940 unsigned long flags)
941 {
942 - struct mtd_part *part = PART(mtd);
943 + struct mtd_part *part = mtd_to_part(mtd);
944
945 offset += part->offset;
946 return part->master->_get_unmapped_area(part->master, len, offset,
947 @@ -115,7 +118,7 @@ static unsigned long part_get_unmapped_a
948 static int part_read_oob(struct mtd_info *mtd, loff_t from,
949 struct mtd_oob_ops *ops)
950 {
951 - struct mtd_part *part = PART(mtd);
952 + struct mtd_part *part = mtd_to_part(mtd);
953 int res;
954
955 if (from >= mtd->size)
956 @@ -130,10 +133,7 @@ static int part_read_oob(struct mtd_info
957 if (ops->oobbuf) {
958 size_t len, pages;
959
960 - if (ops->mode == MTD_OPS_AUTO_OOB)
961 - len = mtd->oobavail;
962 - else
963 - len = mtd->oobsize;
964 + len = mtd_oobavail(mtd, ops);
965 pages = mtd_div_by_ws(mtd->size, mtd);
966 pages -= mtd_div_by_ws(from, mtd);
967 if (ops->ooboffs + ops->ooblen > pages * len)
968 @@ -153,7 +153,7 @@ static int part_read_oob(struct mtd_info
969 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
970 size_t len, size_t *retlen, u_char *buf)
971 {
972 - struct mtd_part *part = PART(mtd);
973 + struct mtd_part *part = mtd_to_part(mtd);
974 return part->master->_read_user_prot_reg(part->master, from, len,
975 retlen, buf);
976 }
977 @@ -161,7 +161,7 @@ static int part_read_user_prot_reg(struc
978 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
979 size_t *retlen, struct otp_info *buf)
980 {
981 - struct mtd_part *part = PART(mtd);
982 + struct mtd_part *part = mtd_to_part(mtd);
983 return part->master->_get_user_prot_info(part->master, len, retlen,
984 buf);
985 }
986 @@ -169,7 +169,7 @@ static int part_get_user_prot_info(struc
987 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
988 size_t len, size_t *retlen, u_char *buf)
989 {
990 - struct mtd_part *part = PART(mtd);
991 + struct mtd_part *part = mtd_to_part(mtd);
992 return part->master->_read_fact_prot_reg(part->master, from, len,
993 retlen, buf);
994 }
995 @@ -177,7 +177,7 @@ static int part_read_fact_prot_reg(struc
996 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
997 size_t *retlen, struct otp_info *buf)
998 {
999 - struct mtd_part *part = PART(mtd);
1000 + struct mtd_part *part = mtd_to_part(mtd);
1001 return part->master->_get_fact_prot_info(part->master, len, retlen,
1002 buf);
1003 }
1004 @@ -185,7 +185,7 @@ static int part_get_fact_prot_info(struc
1005 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
1006 size_t *retlen, const u_char *buf)
1007 {
1008 - struct mtd_part *part = PART(mtd);
1009 + struct mtd_part *part = mtd_to_part(mtd);
1010 return part->master->_write(part->master, to + part->offset, len,
1011 retlen, buf);
1012 }
1013 @@ -193,7 +193,7 @@ static int part_write(struct mtd_info *m
1014 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1015 size_t *retlen, const u_char *buf)
1016 {
1017 - struct mtd_part *part = PART(mtd);
1018 + struct mtd_part *part = mtd_to_part(mtd);
1019 return part->master->_panic_write(part->master, to + part->offset, len,
1020 retlen, buf);
1021 }
1022 @@ -201,7 +201,7 @@ static int part_panic_write(struct mtd_i
1023 static int part_write_oob(struct mtd_info *mtd, loff_t to,
1024 struct mtd_oob_ops *ops)
1025 {
1026 - struct mtd_part *part = PART(mtd);
1027 + struct mtd_part *part = mtd_to_part(mtd);
1028
1029 if (to >= mtd->size)
1030 return -EINVAL;
1031 @@ -213,7 +213,7 @@ static int part_write_oob(struct mtd_inf
1032 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1033 size_t len, size_t *retlen, u_char *buf)
1034 {
1035 - struct mtd_part *part = PART(mtd);
1036 + struct mtd_part *part = mtd_to_part(mtd);
1037 return part->master->_write_user_prot_reg(part->master, from, len,
1038 retlen, buf);
1039 }
1040 @@ -221,21 +221,21 @@ static int part_write_user_prot_reg(stru
1041 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1042 size_t len)
1043 {
1044 - struct mtd_part *part = PART(mtd);
1045 + struct mtd_part *part = mtd_to_part(mtd);
1046 return part->master->_lock_user_prot_reg(part->master, from, len);
1047 }
1048
1049 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
1050 unsigned long count, loff_t to, size_t *retlen)
1051 {
1052 - struct mtd_part *part = PART(mtd);
1053 + struct mtd_part *part = mtd_to_part(mtd);
1054 return part->master->_writev(part->master, vecs, count,
1055 to + part->offset, retlen);
1056 }
1057
1058 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
1059 {
1060 - struct mtd_part *part = PART(mtd);
1061 + struct mtd_part *part = mtd_to_part(mtd);
1062 int ret;
1063
1064
1065 @@ -299,7 +299,7 @@ static int part_erase(struct mtd_info *m
1066 void mtd_erase_callback(struct erase_info *instr)
1067 {
1068 if (instr->mtd->_erase == part_erase) {
1069 - struct mtd_part *part = PART(instr->mtd);
1070 + struct mtd_part *part = mtd_to_part(instr->mtd);
1071 size_t wrlen = 0;
1072
1073 if (instr->mtd->flags & MTD_ERASE_PARTIAL) {
1074 @@ -330,13 +330,13 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback);
1075
1076 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1077 {
1078 - struct mtd_part *part = PART(mtd);
1079 + struct mtd_part *part = mtd_to_part(mtd);
1080 return part->master->_lock(part->master, ofs + part->offset, len);
1081 }
1082
1083 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1084 {
1085 - struct mtd_part *part = PART(mtd);
1086 + struct mtd_part *part = mtd_to_part(mtd);
1087
1088 ofs += part->offset;
1089 if (mtd->flags & MTD_ERASE_PARTIAL) {
1090 @@ -349,45 +349,45 @@ static int part_unlock(struct mtd_info *
1091
1092 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1093 {
1094 - struct mtd_part *part = PART(mtd);
1095 + struct mtd_part *part = mtd_to_part(mtd);
1096 return part->master->_is_locked(part->master, ofs + part->offset, len);
1097 }
1098
1099 static void part_sync(struct mtd_info *mtd)
1100 {
1101 - struct mtd_part *part = PART(mtd);
1102 + struct mtd_part *part = mtd_to_part(mtd);
1103 part->master->_sync(part->master);
1104 }
1105
1106 static int part_suspend(struct mtd_info *mtd)
1107 {
1108 - struct mtd_part *part = PART(mtd);
1109 + struct mtd_part *part = mtd_to_part(mtd);
1110 return part->master->_suspend(part->master);
1111 }
1112
1113 static void part_resume(struct mtd_info *mtd)
1114 {
1115 - struct mtd_part *part = PART(mtd);
1116 + struct mtd_part *part = mtd_to_part(mtd);
1117 part->master->_resume(part->master);
1118 }
1119
1120 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1121 {
1122 - struct mtd_part *part = PART(mtd);
1123 + struct mtd_part *part = mtd_to_part(mtd);
1124 ofs += part->offset;
1125 return part->master->_block_isreserved(part->master, ofs);
1126 }
1127
1128 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
1129 {
1130 - struct mtd_part *part = PART(mtd);
1131 + struct mtd_part *part = mtd_to_part(mtd);
1132 ofs += part->offset;
1133 return part->master->_block_isbad(part->master, ofs);
1134 }
1135
1136 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
1137 {
1138 - struct mtd_part *part = PART(mtd);
1139 + struct mtd_part *part = mtd_to_part(mtd);
1140 int res;
1141
1142 ofs += part->offset;
1143 @@ -397,6 +397,27 @@ static int part_block_markbad(struct mtd
1144 return res;
1145 }
1146
1147 +static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
1148 + struct mtd_oob_region *oobregion)
1149 +{
1150 + struct mtd_part *part = mtd_to_part(mtd);
1151 +
1152 + return mtd_ooblayout_ecc(part->master, section, oobregion);
1153 +}
1154 +
1155 +static int part_ooblayout_free(struct mtd_info *mtd, int section,
1156 + struct mtd_oob_region *oobregion)
1157 +{
1158 + struct mtd_part *part = mtd_to_part(mtd);
1159 +
1160 + return mtd_ooblayout_free(part->master, section, oobregion);
1161 +}
1162 +
1163 +static const struct mtd_ooblayout_ops part_ooblayout_ops = {
1164 + .ecc = part_ooblayout_ecc,
1165 + .free = part_ooblayout_free,
1166 +};
1167 +
1168 static inline void free_partition(struct mtd_part *p)
1169 {
1170 kfree(p->mtd.name);
1171 @@ -614,7 +635,7 @@ static struct mtd_part *allocate_partiti
1172 slave->mtd.erasesize = slave->mtd.size;
1173 }
1174
1175 - slave->mtd.ecclayout = master->ecclayout;
1176 + mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
1177 slave->mtd.ecc_step_size = master->ecc_step_size;
1178 slave->mtd.ecc_strength = master->ecc_strength;
1179 slave->mtd.bitflip_threshold = master->bitflip_threshold;
1180 @@ -639,7 +660,7 @@ static ssize_t mtd_partition_offset_show
1181 struct device_attribute *attr, char *buf)
1182 {
1183 struct mtd_info *mtd = dev_get_drvdata(dev);
1184 - struct mtd_part *part = PART(mtd);
1185 + struct mtd_part *part = mtd_to_part(mtd);
1186 return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
1187 }
1188
1189 @@ -677,11 +698,10 @@ int mtd_add_partition(struct mtd_info *m
1190 if (length <= 0)
1191 return -EINVAL;
1192
1193 + memset(&part, 0, sizeof(part));
1194 part.name = name;
1195 part.size = length;
1196 part.offset = offset;
1197 - part.mask_flags = 0;
1198 - part.ecclayout = NULL;
1199
1200 new = allocate_partition(master, &part, -1, offset);
1201 if (IS_ERR(new))
1202 @@ -845,7 +865,7 @@ int add_mtd_partitions(struct mtd_info *
1203 static DEFINE_SPINLOCK(part_parser_lock);
1204 static LIST_HEAD(part_parsers);
1205
1206 -static struct mtd_part_parser *get_partition_parser(const char *name)
1207 +static struct mtd_part_parser *mtd_part_parser_get(const char *name)
1208 {
1209 struct mtd_part_parser *p, *ret = NULL;
1210
1211 @@ -862,7 +882,20 @@ static struct mtd_part_parser *get_parti
1212 return ret;
1213 }
1214
1215 -#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
1216 +static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
1217 +{
1218 + module_put(p->owner);
1219 +}
1220 +
1221 +/*
1222 + * Many partition parsers just expected the core to kfree() all their data in
1223 + * one chunk. Do that by default.
1224 + */
1225 +static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
1226 + int nr_parts)
1227 +{
1228 + kfree(pparts);
1229 +}
1230
1231 static struct mtd_part_parser *
1232 get_partition_parser_by_type(enum mtd_parser_type type,
1233 @@ -874,7 +907,7 @@ get_partition_parser_by_type(enum mtd_pa
1234
1235 p = list_prepare_entry(start, &part_parsers, list);
1236 if (start)
1237 - put_partition_parser(start);
1238 + mtd_part_parser_put(start);
1239
1240 list_for_each_entry_continue(p, &part_parsers, list) {
1241 if (p->type == type && try_module_get(p->owner)) {
1242 @@ -888,13 +921,19 @@ get_partition_parser_by_type(enum mtd_pa
1243 return ret;
1244 }
1245
1246 -void register_mtd_parser(struct mtd_part_parser *p)
1247 -{
1248 +int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
1249 + {
1250 + p->owner = owner;
1251 +
1252 + if (!p->cleanup)
1253 + p->cleanup = &mtd_part_parser_cleanup_default;
1254 +
1255 spin_lock(&part_parser_lock);
1256 list_add(&p->list, &part_parsers);
1257 spin_unlock(&part_parser_lock);
1258 + return 0;
1259 }
1260 -EXPORT_SYMBOL_GPL(register_mtd_parser);
1261 +EXPORT_SYMBOL_GPL(__register_mtd_parser);
1262
1263 void deregister_mtd_parser(struct mtd_part_parser *p)
1264 {
1265 @@ -954,7 +993,7 @@ static const char * const default_mtd_pa
1266 * parse_mtd_partitions - parse MTD partitions
1267 * @master: the master partition (describes whole MTD device)
1268 * @types: names of partition parsers to try or %NULL
1269 - * @pparts: array of partitions found is returned here
1270 + * @pparts: info about partitions found is returned here
1271 * @data: MTD partition parser-specific data
1272 *
1273 * This function tries to find partition on MTD device @master. It uses MTD
1274 @@ -966,45 +1005,42 @@ static const char * const default_mtd_pa
1275 *
1276 * This function may return:
1277 * o a negative error code in case of failure
1278 - * o zero if no partitions were found
1279 - * o a positive number of found partitions, in which case on exit @pparts will
1280 - * point to an array containing this number of &struct mtd_info objects.
1281 + * o zero otherwise, and @pparts will describe the partitions, number of
1282 + * partitions, and the parser which parsed them. Caller must release
1283 + * resources with mtd_part_parser_cleanup() when finished with the returned
1284 + * data.
1285 */
1286 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
1287 - struct mtd_partition **pparts,
1288 + struct mtd_partitions *pparts,
1289 struct mtd_part_parser_data *data)
1290 {
1291 struct mtd_part_parser *parser;
1292 int ret, err = 0;
1293 const char *const *types_of = NULL;
1294
1295 - if (data && data->of_node) {
1296 - types_of = of_get_probes(data->of_node);
1297 - if (types_of != NULL)
1298 - types = types_of;
1299 - }
1300 -
1301 if (!types)
1302 types = default_mtd_part_types;
1303
1304 for ( ; *types; types++) {
1305 pr_debug("%s: parsing partitions %s\n", master->name, *types);
1306 - parser = get_partition_parser(*types);
1307 + parser = mtd_part_parser_get(*types);
1308 if (!parser && !request_module("%s", *types))
1309 - parser = get_partition_parser(*types);
1310 + parser = mtd_part_parser_get(*types);
1311 pr_debug("%s: got parser %s\n", master->name,
1312 parser ? parser->name : NULL);
1313 if (!parser)
1314 continue;
1315 - ret = (*parser->parse_fn)(master, pparts, data);
1316 + ret = (*parser->parse_fn)(master, &pparts->parts, data);
1317 pr_debug("%s: parser %s: %i\n",
1318 master->name, parser->name, ret);
1319 - put_partition_parser(parser);
1320 if (ret > 0) {
1321 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
1322 ret, parser->name, master->name);
1323 - return ret;
1324 + pparts->nr_parts = ret;
1325 + pparts->parser = parser;
1326 + return 0;
1327 }
1328 + mtd_part_parser_put(parser);
1329 /*
1330 * Stash the first error we see; only report it if no parser
1331 * succeeds
1332 @@ -1034,7 +1070,7 @@ int parse_mtd_partitions_by_type(struct
1333 ret = (*parser->parse_fn)(master, pparts, data);
1334
1335 if (ret > 0) {
1336 - put_partition_parser(parser);
1337 + mtd_part_parser_put(parser);
1338 printk(KERN_NOTICE
1339 "%d %s partitions found on MTD device %s\n",
1340 ret, parser->name, master->name);
1341 @@ -1048,6 +1084,22 @@ int parse_mtd_partitions_by_type(struct
1342 }
1343 EXPORT_SYMBOL_GPL(parse_mtd_partitions_by_type);
1344
1345 +void mtd_part_parser_cleanup(struct mtd_partitions *parts)
1346 +{
1347 + const struct mtd_part_parser *parser;
1348 +
1349 + if (!parts)
1350 + return;
1351 +
1352 + parser = parts->parser;
1353 + if (parser) {
1354 + if (parser->cleanup)
1355 + parser->cleanup(parts->parts, parts->nr_parts);
1356 +
1357 + mtd_part_parser_put(parser);
1358 + }
1359 +}
1360 +
1361 int mtd_is_partition(const struct mtd_info *mtd)
1362 {
1363 struct mtd_part *part;
1364 @@ -1070,7 +1122,7 @@ struct mtd_info *mtdpart_get_master(cons
1365 if (!mtd_is_partition(mtd))
1366 return (struct mtd_info *)mtd;
1367
1368 - return PART(mtd)->master;
1369 + return mtd_to_part(mtd)->master;
1370 }
1371 EXPORT_SYMBOL_GPL(mtdpart_get_master);
1372
1373 @@ -1079,7 +1131,7 @@ uint64_t mtdpart_get_offset(const struct
1374 if (!mtd_is_partition(mtd))
1375 return 0;
1376
1377 - return PART(mtd)->offset;
1378 + return mtd_to_part(mtd)->offset;
1379 }
1380 EXPORT_SYMBOL_GPL(mtdpart_get_offset);
1381
1382 @@ -1089,6 +1141,6 @@ uint64_t mtd_get_device_size(const struc
1383 if (!mtd_is_partition(mtd))
1384 return mtd->size;
1385
1386 - return PART(mtd)->master->size;
1387 + return mtd_to_part(mtd)->master->size;
1388 }
1389 EXPORT_SYMBOL_GPL(mtd_get_device_size);
1390 --- a/drivers/mtd/mtdswap.c
1391 +++ b/drivers/mtd/mtdswap.c
1392 @@ -346,7 +346,7 @@ static int mtdswap_read_markers(struct m
1393 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
1394 return MTDSWAP_SCANNED_BAD;
1395
1396 - ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
1397 + ops.ooblen = 2 * d->mtd->oobavail;
1398 ops.oobbuf = d->oob_buf;
1399 ops.ooboffs = 0;
1400 ops.datbuf = NULL;
1401 @@ -359,7 +359,7 @@ static int mtdswap_read_markers(struct m
1402
1403 data = (struct mtdswap_oobdata *)d->oob_buf;
1404 data2 = (struct mtdswap_oobdata *)
1405 - (d->oob_buf + d->mtd->ecclayout->oobavail);
1406 + (d->oob_buf + d->mtd->oobavail);
1407
1408 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
1409 eb->erase_count = le32_to_cpu(data->count);
1410 @@ -933,7 +933,7 @@ static unsigned int mtdswap_eblk_passes(
1411
1412 ops.mode = MTD_OPS_AUTO_OOB;
1413 ops.len = mtd->writesize;
1414 - ops.ooblen = mtd->ecclayout->oobavail;
1415 + ops.ooblen = mtd->oobavail;
1416 ops.ooboffs = 0;
1417 ops.datbuf = d->page_buf;
1418 ops.oobbuf = d->oob_buf;
1419 @@ -945,7 +945,7 @@ static unsigned int mtdswap_eblk_passes(
1420 for (i = 0; i < mtd_pages; i++) {
1421 patt = mtdswap_test_patt(test + i);
1422 memset(d->page_buf, patt, mtd->writesize);
1423 - memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
1424 + memset(d->oob_buf, patt, mtd->oobavail);
1425 ret = mtd_write_oob(mtd, pos, &ops);
1426 if (ret)
1427 goto error;
1428 @@ -964,7 +964,7 @@ static unsigned int mtdswap_eblk_passes(
1429 if (p1[j] != patt)
1430 goto error;
1431
1432 - for (j = 0; j < mtd->ecclayout->oobavail; j++)
1433 + for (j = 0; j < mtd->oobavail; j++)
1434 if (p2[j] != (unsigned char)patt)
1435 goto error;
1436
1437 @@ -1387,7 +1387,7 @@ static int mtdswap_init(struct mtdswap_d
1438 if (!d->page_buf)
1439 goto page_buf_fail;
1440
1441 - d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL);
1442 + d->oob_buf = kmalloc(2 * mtd->oobavail, GFP_KERNEL);
1443 if (!d->oob_buf)
1444 goto oob_buf_fail;
1445
1446 @@ -1417,7 +1417,6 @@ static void mtdswap_add_mtd(struct mtd_b
1447 unsigned long part;
1448 unsigned int eblocks, eavailable, bad_blocks, spare_cnt;
1449 uint64_t swap_size, use_size, size_limit;
1450 - struct nand_ecclayout *oinfo;
1451 int ret;
1452
1453 parts = &partitions[0];
1454 @@ -1447,17 +1446,10 @@ static void mtdswap_add_mtd(struct mtd_b
1455 return;
1456 }
1457
1458 - oinfo = mtd->ecclayout;
1459 - if (!oinfo) {
1460 - printk(KERN_ERR "%s: mtd%d does not have OOB\n",
1461 - MTDSWAP_PREFIX, mtd->index);
1462 - return;
1463 - }
1464 -
1465 - if (!mtd->oobsize || oinfo->oobavail < MTDSWAP_OOBSIZE) {
1466 + if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) {
1467 printk(KERN_ERR "%s: Not enough free bytes in OOB, "
1468 "%d available, %zu needed.\n",
1469 - MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
1470 + MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE);
1471 return;
1472 }
1473
1474 --- a/drivers/mtd/nand/Kconfig
1475 +++ b/drivers/mtd/nand/Kconfig
1476 @@ -55,7 +55,7 @@ config MTD_NAND_DENALI_PCI
1477 config MTD_NAND_DENALI_DT
1478 tristate "Support Denali NAND controller as a DT device"
1479 select MTD_NAND_DENALI
1480 - depends on HAS_DMA && HAVE_CLK
1481 + depends on HAS_DMA && HAVE_CLK && OF
1482 help
1483 Enable the driver for NAND flash on platforms using a Denali NAND
1484 controller as a DT device.
1485 @@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
1486 config MTD_NAND_GPIO
1487 tristate "GPIO assisted NAND Flash driver"
1488 depends on GPIOLIB || COMPILE_TEST
1489 + depends on HAS_IOMEM
1490 help
1491 This enables a NAND flash driver where control signals are
1492 connected to GPIO pins, and commands and data are communicated
1493 @@ -310,6 +311,7 @@ config MTD_NAND_CAFE
1494 config MTD_NAND_CS553X
1495 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
1496 depends on X86_32
1497 + depends on !UML && HAS_IOMEM
1498 help
1499 The CS553x companion chips for the AMD Geode processor
1500 include NAND flash controllers with built-in hardware ECC
1501 @@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC
1502 config MTD_NAND_VF610_NFC
1503 tristate "Support for Freescale NFC for VF610/MPC5125"
1504 depends on (SOC_VF610 || COMPILE_TEST)
1505 + depends on HAS_IOMEM
1506 help
1507 Enables support for NAND Flash Controller on some Freescale
1508 processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
1509 @@ -480,7 +483,7 @@ config MTD_NAND_MXC
1510
1511 config MTD_NAND_SH_FLCTL
1512 tristate "Support for NAND on Renesas SuperH FLCTL"
1513 - depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
1514 + depends on SUPERH || COMPILE_TEST
1515 depends on HAS_IOMEM
1516 depends on HAS_DMA
1517 help
1518 @@ -519,6 +522,13 @@ config MTD_NAND_JZ4740
1519 help
1520 Enables support for NAND Flash on JZ4740 SoC based boards.
1521
1522 +config MTD_NAND_JZ4780
1523 + tristate "Support for NAND on JZ4780 SoC"
1524 + depends on MACH_JZ4780 && JZ4780_NEMC
1525 + help
1526 + Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
1527 + based boards, using the BCH controller for hardware error correction.
1528 +
1529 config MTD_NAND_FSMC
1530 tristate "Support for NAND on ST Micros FSMC"
1531 depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
1532 @@ -546,4 +556,11 @@ config MTD_NAND_HISI504
1533 help
1534 Enables support for NAND controller on Hisilicon SoC Hip04.
1535
1536 +config MTD_NAND_QCOM
1537 + tristate "Support for NAND on QCOM SoCs"
1538 + depends on ARCH_QCOM
1539 + help
1540 + Enables support for NAND flash chips on SoCs containing the EBI2 NAND
1541 + controller. This controller is found on IPQ806x SoC.
1542 +
1543 endif # MTD_NAND
1544 --- a/drivers/mtd/nand/Makefile
1545 +++ b/drivers/mtd/nand/Makefile
1546 @@ -49,11 +49,13 @@ obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mp
1547 obj-$(CONFIG_MTD_NAND_VF610_NFC) += vf610_nfc.o
1548 obj-$(CONFIG_MTD_NAND_RICOH) += r852.o
1549 obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o
1550 +obj-$(CONFIG_MTD_NAND_JZ4780) += jz4780_nand.o jz4780_bch.o
1551 obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
1552 obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
1553 obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
1554 obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
1555 obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
1556 obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
1557 +obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
1558
1559 nand-objs := nand_base.o nand_bbt.o nand_timings.o
1560 --- a/drivers/mtd/nand/nand_base.c
1561 +++ b/drivers/mtd/nand/nand_base.c
1562 @@ -48,50 +48,6 @@
1563 #include <linux/mtd/partitions.h>
1564 #include <linux/of_mtd.h>
1565
1566 -/* Define default oob placement schemes for large and small page devices */
1567 -static struct nand_ecclayout nand_oob_8 = {
1568 - .eccbytes = 3,
1569 - .eccpos = {0, 1, 2},
1570 - .oobfree = {
1571 - {.offset = 3,
1572 - .length = 2},
1573 - {.offset = 6,
1574 - .length = 2} }
1575 -};
1576 -
1577 -static struct nand_ecclayout nand_oob_16 = {
1578 - .eccbytes = 6,
1579 - .eccpos = {0, 1, 2, 3, 6, 7},
1580 - .oobfree = {
1581 - {.offset = 8,
1582 - . length = 8} }
1583 -};
1584 -
1585 -static struct nand_ecclayout nand_oob_64 = {
1586 - .eccbytes = 24,
1587 - .eccpos = {
1588 - 40, 41, 42, 43, 44, 45, 46, 47,
1589 - 48, 49, 50, 51, 52, 53, 54, 55,
1590 - 56, 57, 58, 59, 60, 61, 62, 63},
1591 - .oobfree = {
1592 - {.offset = 2,
1593 - .length = 38} }
1594 -};
1595 -
1596 -static struct nand_ecclayout nand_oob_128 = {
1597 - .eccbytes = 48,
1598 - .eccpos = {
1599 - 80, 81, 82, 83, 84, 85, 86, 87,
1600 - 88, 89, 90, 91, 92, 93, 94, 95,
1601 - 96, 97, 98, 99, 100, 101, 102, 103,
1602 - 104, 105, 106, 107, 108, 109, 110, 111,
1603 - 112, 113, 114, 115, 116, 117, 118, 119,
1604 - 120, 121, 122, 123, 124, 125, 126, 127},
1605 - .oobfree = {
1606 - {.offset = 2,
1607 - .length = 78} }
1608 -};
1609 -
1610 static int nand_get_device(struct mtd_info *mtd, int new_state);
1611
1612 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
1613 @@ -103,10 +59,96 @@ static int nand_do_write_oob(struct mtd_
1614 */
1615 DEFINE_LED_TRIGGER(nand_led_trigger);
1616
1617 +/* Define default oob placement schemes for large and small page devices */
1618 +static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
1619 + struct mtd_oob_region *oobregion)
1620 +{
1621 + struct nand_chip *chip = mtd_to_nand(mtd);
1622 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1623 +
1624 + if (section > 1)
1625 + return -ERANGE;
1626 +
1627 + if (!section) {
1628 + oobregion->offset = 0;
1629 + oobregion->length = 4;
1630 + } else {
1631 + oobregion->offset = 6;
1632 + oobregion->length = ecc->total - 4;
1633 + }
1634 +
1635 + return 0;
1636 +}
1637 +
1638 +static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
1639 + struct mtd_oob_region *oobregion)
1640 +{
1641 + if (section > 1)
1642 + return -ERANGE;
1643 +
1644 + if (mtd->oobsize == 16) {
1645 + if (section)
1646 + return -ERANGE;
1647 +
1648 + oobregion->length = 8;
1649 + oobregion->offset = 8;
1650 + } else {
1651 + oobregion->length = 2;
1652 + if (!section)
1653 + oobregion->offset = 3;
1654 + else
1655 + oobregion->offset = 6;
1656 + }
1657 +
1658 + return 0;
1659 +}
1660 +
1661 +const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
1662 + .ecc = nand_ooblayout_ecc_sp,
1663 + .free = nand_ooblayout_free_sp,
1664 +};
1665 +EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
1666 +
1667 +static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
1668 + struct mtd_oob_region *oobregion)
1669 +{
1670 + struct nand_chip *chip = mtd_to_nand(mtd);
1671 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1672 +
1673 + if (section)
1674 + return -ERANGE;
1675 +
1676 + oobregion->length = ecc->total;
1677 + oobregion->offset = mtd->oobsize - oobregion->length;
1678 +
1679 + return 0;
1680 +}
1681 +
1682 +static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
1683 + struct mtd_oob_region *oobregion)
1684 +{
1685 + struct nand_chip *chip = mtd_to_nand(mtd);
1686 + struct nand_ecc_ctrl *ecc = &chip->ecc;
1687 +
1688 + if (section)
1689 + return -ERANGE;
1690 +
1691 + oobregion->length = mtd->oobsize - ecc->total - 2;
1692 + oobregion->offset = 2;
1693 +
1694 + return 0;
1695 +}
1696 +
1697 +const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
1698 + .ecc = nand_ooblayout_ecc_lp,
1699 + .free = nand_ooblayout_free_lp,
1700 +};
1701 +EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
1702 +
1703 static int check_offs_len(struct mtd_info *mtd,
1704 loff_t ofs, uint64_t len)
1705 {
1706 - struct nand_chip *chip = mtd->priv;
1707 + struct nand_chip *chip = mtd_to_nand(mtd);
1708 int ret = 0;
1709
1710 /* Start address must align on block boundary */
1711 @@ -132,7 +174,7 @@ static int check_offs_len(struct mtd_inf
1712 */
1713 static void nand_release_device(struct mtd_info *mtd)
1714 {
1715 - struct nand_chip *chip = mtd->priv;
1716 + struct nand_chip *chip = mtd_to_nand(mtd);
1717
1718 /* Release the controller and the chip */
1719 spin_lock(&chip->controller->lock);
1720 @@ -150,7 +192,7 @@ static void nand_release_device(struct m
1721 */
1722 static uint8_t nand_read_byte(struct mtd_info *mtd)
1723 {
1724 - struct nand_chip *chip = mtd->priv;
1725 + struct nand_chip *chip = mtd_to_nand(mtd);
1726 return readb(chip->IO_ADDR_R);
1727 }
1728
1729 @@ -163,7 +205,7 @@ static uint8_t nand_read_byte(struct mtd
1730 */
1731 static uint8_t nand_read_byte16(struct mtd_info *mtd)
1732 {
1733 - struct nand_chip *chip = mtd->priv;
1734 + struct nand_chip *chip = mtd_to_nand(mtd);
1735 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
1736 }
1737
1738 @@ -175,7 +217,7 @@ static uint8_t nand_read_byte16(struct m
1739 */
1740 static u16 nand_read_word(struct mtd_info *mtd)
1741 {
1742 - struct nand_chip *chip = mtd->priv;
1743 + struct nand_chip *chip = mtd_to_nand(mtd);
1744 return readw(chip->IO_ADDR_R);
1745 }
1746
1747 @@ -188,7 +230,7 @@ static u16 nand_read_word(struct mtd_inf
1748 */
1749 static void nand_select_chip(struct mtd_info *mtd, int chipnr)
1750 {
1751 - struct nand_chip *chip = mtd->priv;
1752 + struct nand_chip *chip = mtd_to_nand(mtd);
1753
1754 switch (chipnr) {
1755 case -1:
1756 @@ -211,7 +253,7 @@ static void nand_select_chip(struct mtd_
1757 */
1758 static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
1759 {
1760 - struct nand_chip *chip = mtd->priv;
1761 + struct nand_chip *chip = mtd_to_nand(mtd);
1762
1763 chip->write_buf(mtd, &byte, 1);
1764 }
1765 @@ -225,7 +267,7 @@ static void nand_write_byte(struct mtd_i
1766 */
1767 static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
1768 {
1769 - struct nand_chip *chip = mtd->priv;
1770 + struct nand_chip *chip = mtd_to_nand(mtd);
1771 uint16_t word = byte;
1772
1773 /*
1774 @@ -257,7 +299,7 @@ static void nand_write_byte16(struct mtd
1775 */
1776 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
1777 {
1778 - struct nand_chip *chip = mtd->priv;
1779 + struct nand_chip *chip = mtd_to_nand(mtd);
1780
1781 iowrite8_rep(chip->IO_ADDR_W, buf, len);
1782 }
1783 @@ -272,7 +314,7 @@ static void nand_write_buf(struct mtd_in
1784 */
1785 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1786 {
1787 - struct nand_chip *chip = mtd->priv;
1788 + struct nand_chip *chip = mtd_to_nand(mtd);
1789
1790 ioread8_rep(chip->IO_ADDR_R, buf, len);
1791 }
1792 @@ -287,7 +329,7 @@ static void nand_read_buf(struct mtd_inf
1793 */
1794 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
1795 {
1796 - struct nand_chip *chip = mtd->priv;
1797 + struct nand_chip *chip = mtd_to_nand(mtd);
1798 u16 *p = (u16 *) buf;
1799
1800 iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
1801 @@ -303,7 +345,7 @@ static void nand_write_buf16(struct mtd_
1802 */
1803 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
1804 {
1805 - struct nand_chip *chip = mtd->priv;
1806 + struct nand_chip *chip = mtd_to_nand(mtd);
1807 u16 *p = (u16 *) buf;
1808
1809 ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
1810 @@ -313,14 +355,13 @@ static void nand_read_buf16(struct mtd_i
1811 * nand_block_bad - [DEFAULT] Read bad block marker from the chip
1812 * @mtd: MTD device structure
1813 * @ofs: offset from device start
1814 - * @getchip: 0, if the chip is already selected
1815 *
1816 * Check, if the block is bad.
1817 */
1818 -static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
1819 +static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1820 {
1821 - int page, chipnr, res = 0, i = 0;
1822 - struct nand_chip *chip = mtd->priv;
1823 + int page, res = 0, i = 0;
1824 + struct nand_chip *chip = mtd_to_nand(mtd);
1825 u16 bad;
1826
1827 if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
1828 @@ -328,15 +369,6 @@ static int nand_block_bad(struct mtd_inf
1829
1830 page = (int)(ofs >> chip->page_shift) & chip->pagemask;
1831
1832 - if (getchip) {
1833 - chipnr = (int)(ofs >> chip->chip_shift);
1834 -
1835 - nand_get_device(mtd, FL_READING);
1836 -
1837 - /* Select the NAND device */
1838 - chip->select_chip(mtd, chipnr);
1839 - }
1840 -
1841 do {
1842 if (chip->options & NAND_BUSWIDTH_16) {
1843 chip->cmdfunc(mtd, NAND_CMD_READOOB,
1844 @@ -361,11 +393,6 @@ static int nand_block_bad(struct mtd_inf
1845 i++;
1846 } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
1847
1848 - if (getchip) {
1849 - chip->select_chip(mtd, -1);
1850 - nand_release_device(mtd);
1851 - }
1852 -
1853 return res;
1854 }
1855
1856 @@ -380,7 +407,7 @@ static int nand_block_bad(struct mtd_inf
1857 */
1858 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
1859 {
1860 - struct nand_chip *chip = mtd->priv;
1861 + struct nand_chip *chip = mtd_to_nand(mtd);
1862 struct mtd_oob_ops ops;
1863 uint8_t buf[2] = { 0, 0 };
1864 int ret = 0, res, i = 0;
1865 @@ -430,7 +457,7 @@ static int nand_default_block_markbad(st
1866 */
1867 static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
1868 {
1869 - struct nand_chip *chip = mtd->priv;
1870 + struct nand_chip *chip = mtd_to_nand(mtd);
1871 int res, ret = 0;
1872
1873 if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
1874 @@ -471,7 +498,7 @@ static int nand_block_markbad_lowlevel(s
1875 */
1876 static int nand_check_wp(struct mtd_info *mtd)
1877 {
1878 - struct nand_chip *chip = mtd->priv;
1879 + struct nand_chip *chip = mtd_to_nand(mtd);
1880
1881 /* Broken xD cards report WP despite being writable */
1882 if (chip->options & NAND_BROKEN_XD)
1883 @@ -491,7 +518,7 @@ static int nand_check_wp(struct mtd_info
1884 */
1885 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1886 {
1887 - struct nand_chip *chip = mtd->priv;
1888 + struct nand_chip *chip = mtd_to_nand(mtd);
1889
1890 if (!chip->bbt)
1891 return 0;
1892 @@ -503,19 +530,17 @@ static int nand_block_isreserved(struct
1893 * nand_block_checkbad - [GENERIC] Check if a block is marked bad
1894 * @mtd: MTD device structure
1895 * @ofs: offset from device start
1896 - * @getchip: 0, if the chip is already selected
1897 * @allowbbt: 1, if its allowed to access the bbt area
1898 *
1899 * Check, if the block is bad. Either by reading the bad block table or
1900 * calling of the scan function.
1901 */
1902 -static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
1903 - int allowbbt)
1904 +static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
1905 {
1906 - struct nand_chip *chip = mtd->priv;
1907 + struct nand_chip *chip = mtd_to_nand(mtd);
1908
1909 if (!chip->bbt)
1910 - return chip->block_bad(mtd, ofs, getchip);
1911 + return chip->block_bad(mtd, ofs);
1912
1913 /* Return info from the table */
1914 return nand_isbad_bbt(mtd, ofs, allowbbt);
1915 @@ -531,7 +556,7 @@ static int nand_block_checkbad(struct mt
1916 */
1917 static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
1918 {
1919 - struct nand_chip *chip = mtd->priv;
1920 + struct nand_chip *chip = mtd_to_nand(mtd);
1921 int i;
1922
1923 /* Wait for the device to get ready */
1924 @@ -551,7 +576,7 @@ static void panic_nand_wait_ready(struct
1925 */
1926 void nand_wait_ready(struct mtd_info *mtd)
1927 {
1928 - struct nand_chip *chip = mtd->priv;
1929 + struct nand_chip *chip = mtd_to_nand(mtd);
1930 unsigned long timeo = 400;
1931
1932 if (in_interrupt() || oops_in_progress)
1933 @@ -566,8 +591,8 @@ void nand_wait_ready(struct mtd_info *mt
1934 cond_resched();
1935 } while (time_before(jiffies, timeo));
1936
1937 - pr_warn_ratelimited(
1938 - "timeout while waiting for chip to become ready\n");
1939 + if (!chip->dev_ready(mtd))
1940 + pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
1941 out:
1942 led_trigger_event(nand_led_trigger, LED_OFF);
1943 }
1944 @@ -582,7 +607,7 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
1945 */
1946 static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
1947 {
1948 - register struct nand_chip *chip = mtd->priv;
1949 + register struct nand_chip *chip = mtd_to_nand(mtd);
1950
1951 timeo = jiffies + msecs_to_jiffies(timeo);
1952 do {
1953 @@ -605,7 +630,7 @@ static void nand_wait_status_ready(struc
1954 static void nand_command(struct mtd_info *mtd, unsigned int command,
1955 int column, int page_addr)
1956 {
1957 - register struct nand_chip *chip = mtd->priv;
1958 + register struct nand_chip *chip = mtd_to_nand(mtd);
1959 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
1960
1961 /* Write out the command to the device */
1962 @@ -708,7 +733,7 @@ static void nand_command(struct mtd_info
1963 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
1964 int column, int page_addr)
1965 {
1966 - register struct nand_chip *chip = mtd->priv;
1967 + register struct nand_chip *chip = mtd_to_nand(mtd);
1968
1969 /* Emulate NAND_CMD_READOOB */
1970 if (command == NAND_CMD_READOOB) {
1971 @@ -832,7 +857,7 @@ static void panic_nand_get_device(struct
1972 static int
1973 nand_get_device(struct mtd_info *mtd, int new_state)
1974 {
1975 - struct nand_chip *chip = mtd->priv;
1976 + struct nand_chip *chip = mtd_to_nand(mtd);
1977 spinlock_t *lock = &chip->controller->lock;
1978 wait_queue_head_t *wq = &chip->controller->wq;
1979 DECLARE_WAITQUEUE(wait, current);
1980 @@ -952,7 +977,7 @@ static int __nand_unlock(struct mtd_info
1981 {
1982 int ret = 0;
1983 int status, page;
1984 - struct nand_chip *chip = mtd->priv;
1985 + struct nand_chip *chip = mtd_to_nand(mtd);
1986
1987 /* Submit address of first page to unlock */
1988 page = ofs >> chip->page_shift;
1989 @@ -987,7 +1012,7 @@ int nand_unlock(struct mtd_info *mtd, lo
1990 {
1991 int ret = 0;
1992 int chipnr;
1993 - struct nand_chip *chip = mtd->priv;
1994 + struct nand_chip *chip = mtd_to_nand(mtd);
1995
1996 pr_debug("%s: start = 0x%012llx, len = %llu\n",
1997 __func__, (unsigned long long)ofs, len);
1998 @@ -1050,7 +1075,7 @@ int nand_lock(struct mtd_info *mtd, loff
1999 {
2000 int ret = 0;
2001 int chipnr, status, page;
2002 - struct nand_chip *chip = mtd->priv;
2003 + struct nand_chip *chip = mtd_to_nand(mtd);
2004
2005 pr_debug("%s: start = 0x%012llx, len = %llu\n",
2006 __func__, (unsigned long long)ofs, len);
2007 @@ -1309,13 +1334,12 @@ static int nand_read_page_raw_syndrome(s
2008 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2009 uint8_t *buf, int oob_required, int page)
2010 {
2011 - int i, eccsize = chip->ecc.size;
2012 + int i, eccsize = chip->ecc.size, ret;
2013 int eccbytes = chip->ecc.bytes;
2014 int eccsteps = chip->ecc.steps;
2015 uint8_t *p = buf;
2016 uint8_t *ecc_calc = chip->buffers->ecccalc;
2017 uint8_t *ecc_code = chip->buffers->ecccode;
2018 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2019 unsigned int max_bitflips = 0;
2020
2021 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
2022 @@ -1323,8 +1347,10 @@ static int nand_read_page_swecc(struct m
2023 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2024 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2025
2026 - for (i = 0; i < chip->ecc.total; i++)
2027 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2028 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2029 + chip->ecc.total);
2030 + if (ret)
2031 + return ret;
2032
2033 eccsteps = chip->ecc.steps;
2034 p = buf;
2035 @@ -1356,14 +1382,14 @@ static int nand_read_subpage(struct mtd_
2036 uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
2037 int page)
2038 {
2039 - int start_step, end_step, num_steps;
2040 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2041 + int start_step, end_step, num_steps, ret;
2042 uint8_t *p;
2043 int data_col_addr, i, gaps = 0;
2044 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2045 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2046 - int index;
2047 + int index, section = 0;
2048 unsigned int max_bitflips = 0;
2049 + struct mtd_oob_region oobregion = { };
2050
2051 /* Column address within the page aligned to ECC size (256bytes) */
2052 start_step = data_offs / chip->ecc.size;
2053 @@ -1391,12 +1417,13 @@ static int nand_read_subpage(struct mtd_
2054 * The performance is faster if we position offsets according to
2055 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2056 */
2057 - for (i = 0; i < eccfrag_len - 1; i++) {
2058 - if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
2059 - gaps = 1;
2060 - break;
2061 - }
2062 - }
2063 + ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2064 + if (ret)
2065 + return ret;
2066 +
2067 + if (oobregion.length < eccfrag_len)
2068 + gaps = 1;
2069 +
2070 if (gaps) {
2071 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
2072 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2073 @@ -1405,20 +1432,23 @@ static int nand_read_subpage(struct mtd_
2074 * Send the command to read the particular ECC bytes take care
2075 * about buswidth alignment in read_buf.
2076 */
2077 - aligned_pos = eccpos[index] & ~(busw - 1);
2078 + aligned_pos = oobregion.offset & ~(busw - 1);
2079 aligned_len = eccfrag_len;
2080 - if (eccpos[index] & (busw - 1))
2081 + if (oobregion.offset & (busw - 1))
2082 aligned_len++;
2083 - if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
2084 + if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2085 + (busw - 1))
2086 aligned_len++;
2087
2088 chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
2089 - mtd->writesize + aligned_pos, -1);
2090 + mtd->writesize + aligned_pos, -1);
2091 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
2092 }
2093
2094 - for (i = 0; i < eccfrag_len; i++)
2095 - chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
2096 + ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
2097 + chip->oob_poi, index, eccfrag_len);
2098 + if (ret)
2099 + return ret;
2100
2101 p = bufpoi + data_col_addr;
2102 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2103 @@ -1426,6 +1456,16 @@ static int nand_read_subpage(struct mtd_
2104
2105 stat = chip->ecc.correct(mtd, p,
2106 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
2107 + if (stat == -EBADMSG &&
2108 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2109 + /* check for empty pages with bitflips */
2110 + stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2111 + &chip->buffers->ecccode[i],
2112 + chip->ecc.bytes,
2113 + NULL, 0,
2114 + chip->ecc.strength);
2115 + }
2116 +
2117 if (stat < 0) {
2118 mtd->ecc_stats.failed++;
2119 } else {
2120 @@ -1449,13 +1489,12 @@ static int nand_read_subpage(struct mtd_
2121 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2122 uint8_t *buf, int oob_required, int page)
2123 {
2124 - int i, eccsize = chip->ecc.size;
2125 + int i, eccsize = chip->ecc.size, ret;
2126 int eccbytes = chip->ecc.bytes;
2127 int eccsteps = chip->ecc.steps;
2128 uint8_t *p = buf;
2129 uint8_t *ecc_calc = chip->buffers->ecccalc;
2130 uint8_t *ecc_code = chip->buffers->ecccode;
2131 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2132 unsigned int max_bitflips = 0;
2133
2134 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2135 @@ -1465,8 +1504,10 @@ static int nand_read_page_hwecc(struct m
2136 }
2137 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2138
2139 - for (i = 0; i < chip->ecc.total; i++)
2140 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2141 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2142 + chip->ecc.total);
2143 + if (ret)
2144 + return ret;
2145
2146 eccsteps = chip->ecc.steps;
2147 p = buf;
2148 @@ -1475,6 +1516,15 @@ static int nand_read_page_hwecc(struct m
2149 int stat;
2150
2151 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
2152 + if (stat == -EBADMSG &&
2153 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2154 + /* check for empty pages with bitflips */
2155 + stat = nand_check_erased_ecc_chunk(p, eccsize,
2156 + &ecc_code[i], eccbytes,
2157 + NULL, 0,
2158 + chip->ecc.strength);
2159 + }
2160 +
2161 if (stat < 0) {
2162 mtd->ecc_stats.failed++;
2163 } else {
2164 @@ -1502,12 +1552,11 @@ static int nand_read_page_hwecc(struct m
2165 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
2166 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
2167 {
2168 - int i, eccsize = chip->ecc.size;
2169 + int i, eccsize = chip->ecc.size, ret;
2170 int eccbytes = chip->ecc.bytes;
2171 int eccsteps = chip->ecc.steps;
2172 uint8_t *p = buf;
2173 uint8_t *ecc_code = chip->buffers->ecccode;
2174 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2175 uint8_t *ecc_calc = chip->buffers->ecccalc;
2176 unsigned int max_bitflips = 0;
2177
2178 @@ -1516,8 +1565,10 @@ static int nand_read_page_hwecc_oob_firs
2179 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
2180 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
2181
2182 - for (i = 0; i < chip->ecc.total; i++)
2183 - ecc_code[i] = chip->oob_poi[eccpos[i]];
2184 + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2185 + chip->ecc.total);
2186 + if (ret)
2187 + return ret;
2188
2189 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2190 int stat;
2191 @@ -1527,6 +1578,15 @@ static int nand_read_page_hwecc_oob_firs
2192 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2193
2194 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
2195 + if (stat == -EBADMSG &&
2196 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2197 + /* check for empty pages with bitflips */
2198 + stat = nand_check_erased_ecc_chunk(p, eccsize,
2199 + &ecc_code[i], eccbytes,
2200 + NULL, 0,
2201 + chip->ecc.strength);
2202 + }
2203 +
2204 if (stat < 0) {
2205 mtd->ecc_stats.failed++;
2206 } else {
2207 @@ -1554,6 +1614,7 @@ static int nand_read_page_syndrome(struc
2208 int i, eccsize = chip->ecc.size;
2209 int eccbytes = chip->ecc.bytes;
2210 int eccsteps = chip->ecc.steps;
2211 + int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2212 uint8_t *p = buf;
2213 uint8_t *oob = chip->oob_poi;
2214 unsigned int max_bitflips = 0;
2215 @@ -1573,19 +1634,29 @@ static int nand_read_page_syndrome(struc
2216 chip->read_buf(mtd, oob, eccbytes);
2217 stat = chip->ecc.correct(mtd, p, oob, NULL);
2218
2219 - if (stat < 0) {
2220 - mtd->ecc_stats.failed++;
2221 - } else {
2222 - mtd->ecc_stats.corrected += stat;
2223 - max_bitflips = max_t(unsigned int, max_bitflips, stat);
2224 - }
2225 -
2226 oob += eccbytes;
2227
2228 if (chip->ecc.postpad) {
2229 chip->read_buf(mtd, oob, chip->ecc.postpad);
2230 oob += chip->ecc.postpad;
2231 }
2232 +
2233 + if (stat == -EBADMSG &&
2234 + (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2235 + /* check for empty pages with bitflips */
2236 + stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2237 + oob - eccpadbytes,
2238 + eccpadbytes,
2239 + NULL, 0,
2240 + chip->ecc.strength);
2241 + }
2242 +
2243 + if (stat < 0) {
2244 + mtd->ecc_stats.failed++;
2245 + } else {
2246 + mtd->ecc_stats.corrected += stat;
2247 + max_bitflips = max_t(unsigned int, max_bitflips, stat);
2248 + }
2249 }
2250
2251 /* Calculate remaining oob bytes */
2252 @@ -1598,14 +1669,17 @@ static int nand_read_page_syndrome(struc
2253
2254 /**
2255 * nand_transfer_oob - [INTERN] Transfer oob to client buffer
2256 - * @chip: nand chip structure
2257 + * @mtd: mtd info structure
2258 * @oob: oob destination address
2259 * @ops: oob ops structure
2260 * @len: size of oob to transfer
2261 */
2262 -static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
2263 +static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
2264 struct mtd_oob_ops *ops, size_t len)
2265 {
2266 + struct nand_chip *chip = mtd_to_nand(mtd);
2267 + int ret;
2268 +
2269 switch (ops->mode) {
2270
2271 case MTD_OPS_PLACE_OOB:
2272 @@ -1613,31 +1687,12 @@ static uint8_t *nand_transfer_oob(struct
2273 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
2274 return oob + len;
2275
2276 - case MTD_OPS_AUTO_OOB: {
2277 - struct nand_oobfree *free = chip->ecc.layout->oobfree;
2278 - uint32_t boffs = 0, roffs = ops->ooboffs;
2279 - size_t bytes = 0;
2280 -
2281 - for (; free->length && len; free++, len -= bytes) {
2282 - /* Read request not from offset 0? */
2283 - if (unlikely(roffs)) {
2284 - if (roffs >= free->length) {
2285 - roffs -= free->length;
2286 - continue;
2287 - }
2288 - boffs = free->offset + roffs;
2289 - bytes = min_t(size_t, len,
2290 - (free->length - roffs));
2291 - roffs = 0;
2292 - } else {
2293 - bytes = min_t(size_t, len, free->length);
2294 - boffs = free->offset;
2295 - }
2296 - memcpy(oob, chip->oob_poi + boffs, bytes);
2297 - oob += bytes;
2298 - }
2299 - return oob;
2300 - }
2301 + case MTD_OPS_AUTO_OOB:
2302 + ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
2303 + ops->ooboffs, len);
2304 + BUG_ON(ret);
2305 + return oob + len;
2306 +
2307 default:
2308 BUG();
2309 }
2310 @@ -1655,7 +1710,7 @@ static uint8_t *nand_transfer_oob(struct
2311 */
2312 static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
2313 {
2314 - struct nand_chip *chip = mtd->priv;
2315 + struct nand_chip *chip = mtd_to_nand(mtd);
2316
2317 pr_debug("setting READ RETRY mode %d\n", retry_mode);
2318
2319 @@ -1680,12 +1735,11 @@ static int nand_do_read_ops(struct mtd_i
2320 struct mtd_oob_ops *ops)
2321 {
2322 int chipnr, page, realpage, col, bytes, aligned, oob_required;
2323 - struct nand_chip *chip = mtd->priv;
2324 + struct nand_chip *chip = mtd_to_nand(mtd);
2325 int ret = 0;
2326 uint32_t readlen = ops->len;
2327 uint32_t oobreadlen = ops->ooblen;
2328 - uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
2329 - mtd->oobavail : mtd->oobsize;
2330 + uint32_t max_oobsize = mtd_oobavail(mtd, ops);
2331
2332 uint8_t *bufpoi, *oob, *buf;
2333 int use_bufpoi;
2334 @@ -1772,7 +1826,7 @@ read_retry:
2335 int toread = min(oobreadlen, max_oobsize);
2336
2337 if (toread) {
2338 - oob = nand_transfer_oob(chip,
2339 + oob = nand_transfer_oob(mtd,
2340 oob, ops, toread);
2341 oobreadlen -= toread;
2342 }
2343 @@ -2024,7 +2078,7 @@ static int nand_do_read_oob(struct mtd_i
2344 struct mtd_oob_ops *ops)
2345 {
2346 int page, realpage, chipnr;
2347 - struct nand_chip *chip = mtd->priv;
2348 + struct nand_chip *chip = mtd_to_nand(mtd);
2349 struct mtd_ecc_stats stats;
2350 int readlen = ops->ooblen;
2351 int len;
2352 @@ -2036,10 +2090,7 @@ static int nand_do_read_oob(struct mtd_i
2353
2354 stats = mtd->ecc_stats;
2355
2356 - if (ops->mode == MTD_OPS_AUTO_OOB)
2357 - len = chip->ecc.layout->oobavail;
2358 - else
2359 - len = mtd->oobsize;
2360 + len = mtd_oobavail(mtd, ops);
2361
2362 if (unlikely(ops->ooboffs >= len)) {
2363 pr_debug("%s: attempt to start read outside oob\n",
2364 @@ -2073,7 +2124,7 @@ static int nand_do_read_oob(struct mtd_i
2365 break;
2366
2367 len = min(len, readlen);
2368 - buf = nand_transfer_oob(chip, buf, ops, len);
2369 + buf = nand_transfer_oob(mtd, buf, ops, len);
2370
2371 if (chip->options & NAND_NEED_READRDY) {
2372 /* Apply delay or wait for ready/busy pin */
2373 @@ -2232,19 +2283,20 @@ static int nand_write_page_swecc(struct
2374 const uint8_t *buf, int oob_required,
2375 int page)
2376 {
2377 - int i, eccsize = chip->ecc.size;
2378 + int i, eccsize = chip->ecc.size, ret;
2379 int eccbytes = chip->ecc.bytes;
2380 int eccsteps = chip->ecc.steps;
2381 uint8_t *ecc_calc = chip->buffers->ecccalc;
2382 const uint8_t *p = buf;
2383 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2384
2385 /* Software ECC calculation */
2386 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2387 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2388
2389 - for (i = 0; i < chip->ecc.total; i++)
2390 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2391 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2392 + chip->ecc.total);
2393 + if (ret)
2394 + return ret;
2395
2396 return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2397 }
2398 @@ -2261,12 +2313,11 @@ static int nand_write_page_hwecc(struct
2399 const uint8_t *buf, int oob_required,
2400 int page)
2401 {
2402 - int i, eccsize = chip->ecc.size;
2403 + int i, eccsize = chip->ecc.size, ret;
2404 int eccbytes = chip->ecc.bytes;
2405 int eccsteps = chip->ecc.steps;
2406 uint8_t *ecc_calc = chip->buffers->ecccalc;
2407 const uint8_t *p = buf;
2408 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2409
2410 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2411 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2412 @@ -2274,8 +2325,10 @@ static int nand_write_page_hwecc(struct
2413 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2414 }
2415
2416 - for (i = 0; i < chip->ecc.total; i++)
2417 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2418 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2419 + chip->ecc.total);
2420 + if (ret)
2421 + return ret;
2422
2423 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2424
2425 @@ -2303,11 +2356,10 @@ static int nand_write_subpage_hwecc(stru
2426 int ecc_size = chip->ecc.size;
2427 int ecc_bytes = chip->ecc.bytes;
2428 int ecc_steps = chip->ecc.steps;
2429 - uint32_t *eccpos = chip->ecc.layout->eccpos;
2430 uint32_t start_step = offset / ecc_size;
2431 uint32_t end_step = (offset + data_len - 1) / ecc_size;
2432 int oob_bytes = mtd->oobsize / ecc_steps;
2433 - int step, i;
2434 + int step, ret;
2435
2436 for (step = 0; step < ecc_steps; step++) {
2437 /* configure controller for WRITE access */
2438 @@ -2335,8 +2387,10 @@ static int nand_write_subpage_hwecc(stru
2439 /* copy calculated ECC for whole page to chip->buffer->oob */
2440 /* this include masked-value(0xFF) for unwritten subpages */
2441 ecc_calc = chip->buffers->ecccalc;
2442 - for (i = 0; i < chip->ecc.total; i++)
2443 - chip->oob_poi[eccpos[i]] = ecc_calc[i];
2444 + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
2445 + chip->ecc.total);
2446 + if (ret)
2447 + return ret;
2448
2449 /* write OOB buffer to NAND device */
2450 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
2451 @@ -2472,7 +2526,8 @@ static int nand_write_page(struct mtd_in
2452 static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2453 struct mtd_oob_ops *ops)
2454 {
2455 - struct nand_chip *chip = mtd->priv;
2456 + struct nand_chip *chip = mtd_to_nand(mtd);
2457 + int ret;
2458
2459 /*
2460 * Initialise to all 0xFF, to avoid the possibility of left over OOB
2461 @@ -2487,31 +2542,12 @@ static uint8_t *nand_fill_oob(struct mtd
2462 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
2463 return oob + len;
2464
2465 - case MTD_OPS_AUTO_OOB: {
2466 - struct nand_oobfree *free = chip->ecc.layout->oobfree;
2467 - uint32_t boffs = 0, woffs = ops->ooboffs;
2468 - size_t bytes = 0;
2469 -
2470 - for (; free->length && len; free++, len -= bytes) {
2471 - /* Write request not from offset 0? */
2472 - if (unlikely(woffs)) {
2473 - if (woffs >= free->length) {
2474 - woffs -= free->length;
2475 - continue;
2476 - }
2477 - boffs = free->offset + woffs;
2478 - bytes = min_t(size_t, len,
2479 - (free->length - woffs));
2480 - woffs = 0;
2481 - } else {
2482 - bytes = min_t(size_t, len, free->length);
2483 - boffs = free->offset;
2484 - }
2485 - memcpy(chip->oob_poi + boffs, oob, bytes);
2486 - oob += bytes;
2487 - }
2488 - return oob;
2489 - }
2490 + case MTD_OPS_AUTO_OOB:
2491 + ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
2492 + ops->ooboffs, len);
2493 + BUG_ON(ret);
2494 + return oob + len;
2495 +
2496 default:
2497 BUG();
2498 }
2499 @@ -2532,12 +2568,11 @@ static int nand_do_write_ops(struct mtd_
2500 struct mtd_oob_ops *ops)
2501 {
2502 int chipnr, realpage, page, blockmask, column;
2503 - struct nand_chip *chip = mtd->priv;
2504 + struct nand_chip *chip = mtd_to_nand(mtd);
2505 uint32_t writelen = ops->len;
2506
2507 uint32_t oobwritelen = ops->ooblen;
2508 - uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
2509 - mtd->oobavail : mtd->oobsize;
2510 + uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
2511
2512 uint8_t *oob = ops->oobbuf;
2513 uint8_t *buf = ops->datbuf;
2514 @@ -2662,7 +2697,7 @@ err_out:
2515 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2516 size_t *retlen, const uint8_t *buf)
2517 {
2518 - struct nand_chip *chip = mtd->priv;
2519 + struct nand_chip *chip = mtd_to_nand(mtd);
2520 struct mtd_oob_ops ops;
2521 int ret;
2522
2523 @@ -2722,15 +2757,12 @@ static int nand_do_write_oob(struct mtd_
2524 struct mtd_oob_ops *ops)
2525 {
2526 int chipnr, page, status, len;
2527 - struct nand_chip *chip = mtd->priv;
2528 + struct nand_chip *chip = mtd_to_nand(mtd);
2529
2530 pr_debug("%s: to = 0x%08x, len = %i\n",
2531 __func__, (unsigned int)to, (int)ops->ooblen);
2532
2533 - if (ops->mode == MTD_OPS_AUTO_OOB)
2534 - len = chip->ecc.layout->oobavail;
2535 - else
2536 - len = mtd->oobsize;
2537 + len = mtd_oobavail(mtd, ops);
2538
2539 /* Do not allow write past end of page */
2540 if ((ops->ooboffs + ops->ooblen) > len) {
2541 @@ -2847,7 +2879,7 @@ out:
2542 */
2543 static int single_erase(struct mtd_info *mtd, int page)
2544 {
2545 - struct nand_chip *chip = mtd->priv;
2546 + struct nand_chip *chip = mtd_to_nand(mtd);
2547 /* Send commands to erase a block */
2548 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2549 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2550 @@ -2879,7 +2911,7 @@ int nand_erase_nand(struct mtd_info *mtd
2551 int allowbbt)
2552 {
2553 int page, status, pages_per_block, ret, chipnr;
2554 - struct nand_chip *chip = mtd->priv;
2555 + struct nand_chip *chip = mtd_to_nand(mtd);
2556 loff_t len;
2557
2558 pr_debug("%s: start = 0x%012llx, len = %llu\n",
2559 @@ -2918,7 +2950,7 @@ int nand_erase_nand(struct mtd_info *mtd
2560 while (len) {
2561 /* Check if we have a bad block, we do not erase bad blocks! */
2562 if (nand_block_checkbad(mtd, ((loff_t) page) <<
2563 - chip->page_shift, 0, allowbbt)) {
2564 + chip->page_shift, allowbbt)) {
2565 pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
2566 __func__, page);
2567 instr->state = MTD_ERASE_FAILED;
2568 @@ -3005,7 +3037,20 @@ static void nand_sync(struct mtd_info *m
2569 */
2570 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2571 {
2572 - return nand_block_checkbad(mtd, offs, 1, 0);
2573 + struct nand_chip *chip = mtd_to_nand(mtd);
2574 + int chipnr = (int)(offs >> chip->chip_shift);
2575 + int ret;
2576 +
2577 + /* Select the NAND device */
2578 + nand_get_device(mtd, FL_READING);
2579 + chip->select_chip(mtd, chipnr);
2580 +
2581 + ret = nand_block_checkbad(mtd, offs, 0);
2582 +
2583 + chip->select_chip(mtd, -1);
2584 + nand_release_device(mtd);
2585 +
2586 + return ret;
2587 }
2588
2589 /**
2590 @@ -3094,7 +3139,7 @@ static int nand_suspend(struct mtd_info
2591 */
2592 static void nand_resume(struct mtd_info *mtd)
2593 {
2594 - struct nand_chip *chip = mtd->priv;
2595 + struct nand_chip *chip = mtd_to_nand(mtd);
2596
2597 if (chip->state == FL_PM_SUSPENDED)
2598 nand_release_device(mtd);
2599 @@ -3266,7 +3311,7 @@ ext_out:
2600
2601 static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
2602 {
2603 - struct nand_chip *chip = mtd->priv;
2604 + struct nand_chip *chip = mtd_to_nand(mtd);
2605 uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
2606
2607 return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
2608 @@ -3937,10 +3982,13 @@ ident_done:
2609 return type;
2610 }
2611
2612 -static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
2613 - struct device_node *dn)
2614 +static int nand_dt_init(struct nand_chip *chip)
2615 {
2616 - int ecc_mode, ecc_strength, ecc_step;
2617 + struct device_node *dn = nand_get_flash_node(chip);
2618 + int ecc_mode, ecc_algo, ecc_strength, ecc_step;
2619 +
2620 + if (!dn)
2621 + return 0;
2622
2623 if (of_get_nand_bus_width(dn) == 16)
2624 chip->options |= NAND_BUSWIDTH_16;
2625 @@ -3949,6 +3997,7 @@ static int nand_dt_init(struct mtd_info
2626 chip->bbt_options |= NAND_BBT_USE_FLASH;
2627
2628 ecc_mode = of_get_nand_ecc_mode(dn);
2629 + ecc_algo = of_get_nand_ecc_algo(dn);
2630 ecc_strength = of_get_nand_ecc_strength(dn);
2631 ecc_step = of_get_nand_ecc_step_size(dn);
2632
2633 @@ -3961,6 +4010,9 @@ static int nand_dt_init(struct mtd_info
2634 if (ecc_mode >= 0)
2635 chip->ecc.mode = ecc_mode;
2636
2637 + if (ecc_algo >= 0)
2638 + chip->ecc.algo = ecc_algo;
2639 +
2640 if (ecc_strength >= 0)
2641 chip->ecc.strength = ecc_strength;
2642
2643 @@ -3984,15 +4036,16 @@ int nand_scan_ident(struct mtd_info *mtd
2644 struct nand_flash_dev *table)
2645 {
2646 int i, nand_maf_id, nand_dev_id;
2647 - struct nand_chip *chip = mtd->priv;
2648 + struct nand_chip *chip = mtd_to_nand(mtd);
2649 struct nand_flash_dev *type;
2650 int ret;
2651
2652 - if (chip->flash_node) {
2653 - ret = nand_dt_init(mtd, chip, chip->flash_node);
2654 - if (ret)
2655 - return ret;
2656 - }
2657 + ret = nand_dt_init(chip);
2658 + if (ret)
2659 + return ret;
2660 +
2661 + if (!mtd->name && mtd->dev.parent)
2662 + mtd->name = dev_name(mtd->dev.parent);
2663
2664 if (!mtd->name && mtd->dev.parent)
2665 mtd->name = dev_name(mtd->dev.parent);
2666 @@ -4055,7 +4108,7 @@ EXPORT_SYMBOL(nand_scan_ident);
2667 */
2668 static bool nand_ecc_strength_good(struct mtd_info *mtd)
2669 {
2670 - struct nand_chip *chip = mtd->priv;
2671 + struct nand_chip *chip = mtd_to_nand(mtd);
2672 struct nand_ecc_ctrl *ecc = &chip->ecc;
2673 int corr, ds_corr;
2674
2675 @@ -4083,10 +4136,10 @@ static bool nand_ecc_strength_good(struc
2676 */
2677 int nand_scan_tail(struct mtd_info *mtd)
2678 {
2679 - int i;
2680 - struct nand_chip *chip = mtd->priv;
2681 + struct nand_chip *chip = mtd_to_nand(mtd);
2682 struct nand_ecc_ctrl *ecc = &chip->ecc;
2683 struct nand_buffers *nbuf;
2684 + int ret;
2685
2686 /* New bad blocks should be marked in OOB, flash-based BBT, or both */
2687 BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
2688 @@ -4113,19 +4166,15 @@ int nand_scan_tail(struct mtd_info *mtd)
2689 /*
2690 * If no default placement scheme is given, select an appropriate one.
2691 */
2692 - if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
2693 + if (!mtd->ooblayout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
2694 switch (mtd->oobsize) {
2695 case 8:
2696 - ecc->layout = &nand_oob_8;
2697 - break;
2698 case 16:
2699 - ecc->layout = &nand_oob_16;
2700 + mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
2701 break;
2702 case 64:
2703 - ecc->layout = &nand_oob_64;
2704 - break;
2705 case 128:
2706 - ecc->layout = &nand_oob_128;
2707 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
2708 break;
2709 default:
2710 pr_warn("No oob scheme defined for oobsize %d\n",
2711 @@ -4168,7 +4217,7 @@ int nand_scan_tail(struct mtd_info *mtd)
2712 ecc->write_oob = nand_write_oob_std;
2713 if (!ecc->read_subpage)
2714 ecc->read_subpage = nand_read_subpage;
2715 - if (!ecc->write_subpage)
2716 + if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
2717 ecc->write_subpage = nand_write_subpage_hwecc;
2718
2719 case NAND_ECC_HW_SYNDROME:
2720 @@ -4246,10 +4295,8 @@ int nand_scan_tail(struct mtd_info *mtd)
2721 }
2722
2723 /* See nand_bch_init() for details. */
2724 - ecc->bytes = DIV_ROUND_UP(
2725 - ecc->strength * fls(8 * ecc->size), 8);
2726 - ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes,
2727 - &ecc->layout);
2728 + ecc->bytes = 0;
2729 + ecc->priv = nand_bch_init(mtd);
2730 if (!ecc->priv) {
2731 pr_warn("BCH ECC initialization failed!\n");
2732 BUG();
2733 @@ -4280,20 +4327,9 @@ int nand_scan_tail(struct mtd_info *mtd)
2734 if (!ecc->write_oob_raw)
2735 ecc->write_oob_raw = ecc->write_oob;
2736
2737 - /*
2738 - * The number of bytes available for a client to place data into
2739 - * the out of band area.
2740 - */
2741 - ecc->layout->oobavail = 0;
2742 - for (i = 0; ecc->layout->oobfree[i].length
2743 - && i < ARRAY_SIZE(ecc->layout->oobfree); i++)
2744 - ecc->layout->oobavail += ecc->layout->oobfree[i].length;
2745 - mtd->oobavail = ecc->layout->oobavail;
2746 -
2747 - /* ECC sanity check: warn if it's too weak */
2748 - if (!nand_ecc_strength_good(mtd))
2749 - pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
2750 - mtd->name);
2751 + /* propagate ecc info to mtd_info */
2752 + mtd->ecc_strength = ecc->strength;
2753 + mtd->ecc_step_size = ecc->size;
2754
2755 /*
2756 * Set the number of read / write steps for one page depending on ECC
2757 @@ -4306,6 +4342,21 @@ int nand_scan_tail(struct mtd_info *mtd)
2758 }
2759 ecc->total = ecc->steps * ecc->bytes;
2760
2761 + /*
2762 + * The number of bytes available for a client to place data into
2763 + * the out of band area.
2764 + */
2765 + ret = mtd_ooblayout_count_freebytes(mtd);
2766 + if (ret < 0)
2767 + ret = 0;
2768 +
2769 + mtd->oobavail = ret;
2770 +
2771 + /* ECC sanity check: warn if it's too weak */
2772 + if (!nand_ecc_strength_good(mtd))
2773 + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
2774 + mtd->name);
2775 +
2776 /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
2777 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
2778 switch (ecc->steps) {
2779 @@ -4362,10 +4413,6 @@ int nand_scan_tail(struct mtd_info *mtd)
2780 mtd->_block_markbad = nand_block_markbad;
2781 mtd->writebufsize = mtd->writesize;
2782
2783 - /* propagate ecc info to mtd_info */
2784 - mtd->ecclayout = ecc->layout;
2785 - mtd->ecc_strength = ecc->strength;
2786 - mtd->ecc_step_size = ecc->size;
2787 /*
2788 * Initialize bitflip_threshold to its default prior scan_bbt() call.
2789 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
2790 @@ -4421,7 +4468,7 @@ EXPORT_SYMBOL(nand_scan);
2791 */
2792 void nand_release(struct mtd_info *mtd)
2793 {
2794 - struct nand_chip *chip = mtd->priv;
2795 + struct nand_chip *chip = mtd_to_nand(mtd);
2796
2797 if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
2798 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
2799 --- a/drivers/mtd/nand/nand_bbt.c
2800 +++ b/drivers/mtd/nand/nand_bbt.c
2801 @@ -172,7 +172,7 @@ static int read_bbt(struct mtd_info *mtd
2802 struct nand_bbt_descr *td, int offs)
2803 {
2804 int res, ret = 0, i, j, act = 0;
2805 - struct nand_chip *this = mtd->priv;
2806 + struct nand_chip *this = mtd_to_nand(mtd);
2807 size_t retlen, len, totlen;
2808 loff_t from;
2809 int bits = td->options & NAND_BBT_NRBITS_MSK;
2810 @@ -263,7 +263,7 @@ static int read_bbt(struct mtd_info *mtd
2811 */
2812 static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
2813 {
2814 - struct nand_chip *this = mtd->priv;
2815 + struct nand_chip *this = mtd_to_nand(mtd);
2816 int res = 0, i;
2817
2818 if (td->options & NAND_BBT_PERCHIP) {
2819 @@ -388,7 +388,7 @@ static u32 bbt_get_ver_offs(struct mtd_i
2820 static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
2821 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
2822 {
2823 - struct nand_chip *this = mtd->priv;
2824 + struct nand_chip *this = mtd_to_nand(mtd);
2825
2826 /* Read the primary version, if available */
2827 if (td->options & NAND_BBT_VERSION) {
2828 @@ -454,7 +454,7 @@ static int scan_block_fast(struct mtd_in
2829 static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
2830 struct nand_bbt_descr *bd, int chip)
2831 {
2832 - struct nand_chip *this = mtd->priv;
2833 + struct nand_chip *this = mtd_to_nand(mtd);
2834 int i, numblocks, numpages;
2835 int startblock;
2836 loff_t from;
2837 @@ -523,7 +523,7 @@ static int create_bbt(struct mtd_info *m
2838 */
2839 static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
2840 {
2841 - struct nand_chip *this = mtd->priv;
2842 + struct nand_chip *this = mtd_to_nand(mtd);
2843 int i, chips;
2844 int startblock, block, dir;
2845 int scanlen = mtd->writesize + mtd->oobsize;
2846 @@ -618,7 +618,7 @@ static int write_bbt(struct mtd_info *mt
2847 struct nand_bbt_descr *td, struct nand_bbt_descr *md,
2848 int chipsel)
2849 {
2850 - struct nand_chip *this = mtd->priv;
2851 + struct nand_chip *this = mtd_to_nand(mtd);
2852 struct erase_info einfo;
2853 int i, res, chip = 0;
2854 int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
2855 @@ -819,7 +819,7 @@ static int write_bbt(struct mtd_info *mt
2856 */
2857 static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2858 {
2859 - struct nand_chip *this = mtd->priv;
2860 + struct nand_chip *this = mtd_to_nand(mtd);
2861
2862 return create_bbt(mtd, this->buffers->databuf, bd, -1);
2863 }
2864 @@ -838,7 +838,7 @@ static inline int nand_memory_bbt(struct
2865 static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
2866 {
2867 int i, chips, writeops, create, chipsel, res, res2;
2868 - struct nand_chip *this = mtd->priv;
2869 + struct nand_chip *this = mtd_to_nand(mtd);
2870 struct nand_bbt_descr *td = this->bbt_td;
2871 struct nand_bbt_descr *md = this->bbt_md;
2872 struct nand_bbt_descr *rd, *rd2;
2873 @@ -962,7 +962,7 @@ static int check_create(struct mtd_info
2874 */
2875 static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
2876 {
2877 - struct nand_chip *this = mtd->priv;
2878 + struct nand_chip *this = mtd_to_nand(mtd);
2879 int i, j, chips, block, nrblocks, update;
2880 uint8_t oldval;
2881
2882 @@ -1022,7 +1022,7 @@ static void mark_bbt_region(struct mtd_i
2883 */
2884 static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2885 {
2886 - struct nand_chip *this = mtd->priv;
2887 + struct nand_chip *this = mtd_to_nand(mtd);
2888 u32 pattern_len;
2889 u32 bits;
2890 u32 table_size;
2891 @@ -1074,7 +1074,7 @@ static void verify_bbt_descr(struct mtd_
2892 */
2893 static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
2894 {
2895 - struct nand_chip *this = mtd->priv;
2896 + struct nand_chip *this = mtd_to_nand(mtd);
2897 int len, res;
2898 uint8_t *buf;
2899 struct nand_bbt_descr *td = this->bbt_td;
2900 @@ -1147,7 +1147,7 @@ err:
2901 */
2902 static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
2903 {
2904 - struct nand_chip *this = mtd->priv;
2905 + struct nand_chip *this = mtd_to_nand(mtd);
2906 int len, res = 0;
2907 int chip, chipsel;
2908 uint8_t *buf;
2909 @@ -1281,7 +1281,7 @@ static int nand_create_badblock_pattern(
2910 */
2911 int nand_default_bbt(struct mtd_info *mtd)
2912 {
2913 - struct nand_chip *this = mtd->priv;
2914 + struct nand_chip *this = mtd_to_nand(mtd);
2915 int ret;
2916
2917 /* Is a flash based bad block table requested? */
2918 @@ -1317,7 +1317,7 @@ int nand_default_bbt(struct mtd_info *mt
2919 */
2920 int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
2921 {
2922 - struct nand_chip *this = mtd->priv;
2923 + struct nand_chip *this = mtd_to_nand(mtd);
2924 int block;
2925
2926 block = (int)(offs >> this->bbt_erase_shift);
2927 @@ -1332,7 +1332,7 @@ int nand_isreserved_bbt(struct mtd_info
2928 */
2929 int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
2930 {
2931 - struct nand_chip *this = mtd->priv;
2932 + struct nand_chip *this = mtd_to_nand(mtd);
2933 int block, res;
2934
2935 block = (int)(offs >> this->bbt_erase_shift);
2936 @@ -1359,7 +1359,7 @@ int nand_isbad_bbt(struct mtd_info *mtd,
2937 */
2938 int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
2939 {
2940 - struct nand_chip *this = mtd->priv;
2941 + struct nand_chip *this = mtd_to_nand(mtd);
2942 int block, ret = 0;
2943
2944 block = (int)(offs >> this->bbt_erase_shift);
2945 @@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mt
2946
2947 return ret;
2948 }
2949 -
2950 -EXPORT_SYMBOL(nand_scan_bbt);
2951 --- a/drivers/mtd/nand/nand_bch.c
2952 +++ b/drivers/mtd/nand/nand_bch.c
2953 @@ -32,13 +32,11 @@
2954 /**
2955 * struct nand_bch_control - private NAND BCH control structure
2956 * @bch: BCH control structure
2957 - * @ecclayout: private ecc layout for this BCH configuration
2958 * @errloc: error location array
2959 * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
2960 */
2961 struct nand_bch_control {
2962 struct bch_control *bch;
2963 - struct nand_ecclayout ecclayout;
2964 unsigned int *errloc;
2965 unsigned char *eccmask;
2966 };
2967 @@ -52,7 +50,7 @@ struct nand_bch_control {
2968 int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
2969 unsigned char *code)
2970 {
2971 - const struct nand_chip *chip = mtd->priv;
2972 + const struct nand_chip *chip = mtd_to_nand(mtd);
2973 struct nand_bch_control *nbc = chip->ecc.priv;
2974 unsigned int i;
2975
2976 @@ -79,7 +77,7 @@ EXPORT_SYMBOL(nand_bch_calculate_ecc);
2977 int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
2978 unsigned char *read_ecc, unsigned char *calc_ecc)
2979 {
2980 - const struct nand_chip *chip = mtd->priv;
2981 + const struct nand_chip *chip = mtd_to_nand(mtd);
2982 struct nand_bch_control *nbc = chip->ecc.priv;
2983 unsigned int *errloc = nbc->errloc;
2984 int i, count;
2985 @@ -98,7 +96,7 @@ int nand_bch_correct_data(struct mtd_inf
2986 }
2987 } else if (count < 0) {
2988 printk(KERN_ERR "ecc unrecoverable error\n");
2989 - count = -1;
2990 + count = -EBADMSG;
2991 }
2992 return count;
2993 }
2994 @@ -107,9 +105,6 @@ EXPORT_SYMBOL(nand_bch_correct_data);
2995 /**
2996 * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
2997 * @mtd: MTD block structure
2998 - * @eccsize: ecc block size in bytes
2999 - * @eccbytes: ecc length in bytes
3000 - * @ecclayout: output default layout
3001 *
3002 * Returns:
3003 * a pointer to a new NAND BCH control structure, or NULL upon failure
3004 @@ -123,14 +118,20 @@ EXPORT_SYMBOL(nand_bch_correct_data);
3005 * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
3006 * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
3007 */
3008 -struct nand_bch_control *
3009 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
3010 - struct nand_ecclayout **ecclayout)
3011 +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
3012 {
3013 + struct nand_chip *nand = mtd_to_nand(mtd);
3014 unsigned int m, t, eccsteps, i;
3015 - struct nand_ecclayout *layout;
3016 struct nand_bch_control *nbc = NULL;
3017 unsigned char *erased_page;
3018 + unsigned int eccsize = nand->ecc.size;
3019 + unsigned int eccbytes = nand->ecc.bytes;
3020 + unsigned int eccstrength = nand->ecc.strength;
3021 +
3022 + if (!eccbytes && eccstrength) {
3023 + eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
3024 + nand->ecc.bytes = eccbytes;
3025 + }
3026
3027 if (!eccsize || !eccbytes) {
3028 printk(KERN_WARNING "ecc parameters not supplied\n");
3029 @@ -158,7 +159,7 @@ nand_bch_init(struct mtd_info *mtd, unsi
3030 eccsteps = mtd->writesize/eccsize;
3031
3032 /* if no ecc placement scheme was provided, build one */
3033 - if (!*ecclayout) {
3034 + if (!mtd->ooblayout) {
3035
3036 /* handle large page devices only */
3037 if (mtd->oobsize < 64) {
3038 @@ -167,24 +168,7 @@ nand_bch_init(struct mtd_info *mtd, unsi
3039 goto fail;
3040 }
3041
3042 - layout = &nbc->ecclayout;
3043 - layout->eccbytes = eccsteps*eccbytes;
3044 -
3045 - /* reserve 2 bytes for bad block marker */
3046 - if (layout->eccbytes+2 > mtd->oobsize) {
3047 - printk(KERN_WARNING "no suitable oob scheme available "
3048 - "for oobsize %d eccbytes %u\n", mtd->oobsize,
3049 - eccbytes);
3050 - goto fail;
3051 - }
3052 - /* put ecc bytes at oob tail */
3053 - for (i = 0; i < layout->eccbytes; i++)
3054 - layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
3055 -
3056 - layout->oobfree[0].offset = 2;
3057 - layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
3058 -
3059 - *ecclayout = layout;
3060 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
3061 }
3062
3063 /* sanity checks */
3064 @@ -192,7 +176,8 @@ nand_bch_init(struct mtd_info *mtd, unsi
3065 printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
3066 goto fail;
3067 }
3068 - if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
3069 +
3070 + if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
3071 printk(KERN_WARNING "invalid ecc layout\n");
3072 goto fail;
3073 }
3074 @@ -216,6 +201,9 @@ nand_bch_init(struct mtd_info *mtd, unsi
3075 for (i = 0; i < eccbytes; i++)
3076 nbc->eccmask[i] ^= 0xff;
3077
3078 + if (!eccstrength)
3079 + nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
3080 +
3081 return nbc;
3082 fail:
3083 nand_bch_free(nbc);
3084 --- a/drivers/mtd/nand/nand_ecc.c
3085 +++ b/drivers/mtd/nand/nand_ecc.c
3086 @@ -424,7 +424,7 @@ int nand_calculate_ecc(struct mtd_info *
3087 unsigned char *code)
3088 {
3089 __nand_calculate_ecc(buf,
3090 - ((struct nand_chip *)mtd->priv)->ecc.size, code);
3091 + mtd_to_nand(mtd)->ecc.size, code);
3092
3093 return 0;
3094 }
3095 @@ -524,7 +524,7 @@ int nand_correct_data(struct mtd_info *m
3096 unsigned char *read_ecc, unsigned char *calc_ecc)
3097 {
3098 return __nand_correct_data(buf, read_ecc, calc_ecc,
3099 - ((struct nand_chip *)mtd->priv)->ecc.size);
3100 + mtd_to_nand(mtd)->ecc.size);
3101 }
3102 EXPORT_SYMBOL(nand_correct_data);
3103
3104 --- a/drivers/mtd/nand/nand_ids.c
3105 +++ b/drivers/mtd/nand/nand_ids.c
3106 @@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] =
3107 SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
3108 {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
3109 { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
3110 - SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
3111 - 4 },
3112 + SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
3113 + NAND_ECC_INFO(40, SZ_1K), 4 },
3114
3115 LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
3116 LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
3117 --- a/drivers/mtd/nand/nandsim.c
3118 +++ b/drivers/mtd/nand/nandsim.c
3119 @@ -666,8 +666,8 @@ static char *get_partition_name(int i)
3120 */
3121 static int init_nandsim(struct mtd_info *mtd)
3122 {
3123 - struct nand_chip *chip = mtd->priv;
3124 - struct nandsim *ns = chip->priv;
3125 + struct nand_chip *chip = mtd_to_nand(mtd);
3126 + struct nandsim *ns = nand_get_controller_data(chip);
3127 int i, ret = 0;
3128 uint64_t remains;
3129 uint64_t next_offset;
3130 @@ -1908,7 +1908,8 @@ static void switch_state(struct nandsim
3131
3132 static u_char ns_nand_read_byte(struct mtd_info *mtd)
3133 {
3134 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3135 + struct nand_chip *chip = mtd_to_nand(mtd);
3136 + struct nandsim *ns = nand_get_controller_data(chip);
3137 u_char outb = 0x00;
3138
3139 /* Sanity and correctness checks */
3140 @@ -1969,7 +1970,8 @@ static u_char ns_nand_read_byte(struct m
3141
3142 static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
3143 {
3144 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3145 + struct nand_chip *chip = mtd_to_nand(mtd);
3146 + struct nandsim *ns = nand_get_controller_data(chip);
3147
3148 /* Sanity and correctness checks */
3149 if (!ns->lines.ce) {
3150 @@ -2123,7 +2125,8 @@ static void ns_nand_write_byte(struct mt
3151
3152 static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
3153 {
3154 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3155 + struct nand_chip *chip = mtd_to_nand(mtd);
3156 + struct nandsim *ns = nand_get_controller_data(chip);
3157
3158 ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
3159 ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
3160 @@ -2141,7 +2144,7 @@ static int ns_device_ready(struct mtd_in
3161
3162 static uint16_t ns_nand_read_word(struct mtd_info *mtd)
3163 {
3164 - struct nand_chip *chip = (struct nand_chip *)mtd->priv;
3165 + struct nand_chip *chip = mtd_to_nand(mtd);
3166
3167 NS_DBG("read_word\n");
3168
3169 @@ -2150,7 +2153,8 @@ static uint16_t ns_nand_read_word(struct
3170
3171 static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
3172 {
3173 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3174 + struct nand_chip *chip = mtd_to_nand(mtd);
3175 + struct nandsim *ns = nand_get_controller_data(chip);
3176
3177 /* Check that chip is expecting data input */
3178 if (!(ns->state & STATE_DATAIN_MASK)) {
3179 @@ -2177,7 +2181,8 @@ static void ns_nand_write_buf(struct mtd
3180
3181 static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
3182 {
3183 - struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
3184 + struct nand_chip *chip = mtd_to_nand(mtd);
3185 + struct nandsim *ns = nand_get_controller_data(chip);
3186
3187 /* Sanity and correctness checks */
3188 if (!ns->lines.ce) {
3189 @@ -2198,7 +2203,7 @@ static void ns_nand_read_buf(struct mtd_
3190 int i;
3191
3192 for (i = 0; i < len; i++)
3193 - buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
3194 + buf[i] = mtd_to_nand(mtd)->read_byte(mtd);
3195
3196 return;
3197 }
3198 @@ -2236,16 +2241,15 @@ static int __init ns_init_module(void)
3199 }
3200
3201 /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
3202 - nsmtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
3203 - + sizeof(struct nandsim), GFP_KERNEL);
3204 - if (!nsmtd) {
3205 + chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
3206 + GFP_KERNEL);
3207 + if (!chip) {
3208 NS_ERR("unable to allocate core structures.\n");
3209 return -ENOMEM;
3210 }
3211 - chip = (struct nand_chip *)(nsmtd + 1);
3212 - nsmtd->priv = (void *)chip;
3213 + nsmtd = nand_to_mtd(chip);
3214 nand = (struct nandsim *)(chip + 1);
3215 - chip->priv = (void *)nand;
3216 + nand_set_controller_data(chip, (void *)nand);
3217
3218 /*
3219 * Register simulator's callbacks.
3220 @@ -2257,6 +2261,7 @@ static int __init ns_init_module(void)
3221 chip->read_buf = ns_nand_read_buf;
3222 chip->read_word = ns_nand_read_word;
3223 chip->ecc.mode = NAND_ECC_SOFT;
3224 + chip->ecc.algo = NAND_ECC_HAMMING;
3225 /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
3226 /* and 'badblocks' parameters to work */
3227 chip->options |= NAND_SKIP_BBTSCAN;
3228 @@ -2335,6 +2340,7 @@ static int __init ns_init_module(void)
3229 goto error;
3230 }
3231 chip->ecc.mode = NAND_ECC_SOFT_BCH;
3232 + chip->ecc.algo = NAND_ECC_BCH;
3233 chip->ecc.size = 512;
3234 chip->ecc.strength = bch;
3235 chip->ecc.bytes = eccbytes;
3236 @@ -2392,7 +2398,7 @@ err_exit:
3237 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
3238 kfree(nand->partitions[i].name);
3239 error:
3240 - kfree(nsmtd);
3241 + kfree(chip);
3242 free_lists();
3243
3244 return retval;
3245 @@ -2405,7 +2411,8 @@ module_init(ns_init_module);
3246 */
3247 static void __exit ns_cleanup_module(void)
3248 {
3249 - struct nandsim *ns = ((struct nand_chip *)nsmtd->priv)->priv;
3250 + struct nand_chip *chip = mtd_to_nand(nsmtd);
3251 + struct nandsim *ns = nand_get_controller_data(chip);
3252 int i;
3253
3254 nandsim_debugfs_remove(ns);
3255 @@ -2413,7 +2420,7 @@ static void __exit ns_cleanup_module(voi
3256 nand_release(nsmtd); /* Unregister driver */
3257 for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
3258 kfree(ns->partitions[i].name);
3259 - kfree(nsmtd); /* Free other structures */
3260 + kfree(mtd_to_nand(nsmtd)); /* Free other structures */
3261 free_lists();
3262 }
3263
3264 --- a/drivers/mtd/ofpart.c
3265 +++ b/drivers/mtd/ofpart.c
3266 @@ -26,9 +26,10 @@ static bool node_has_compatible(struct d
3267 }
3268
3269 static int parse_ofpart_partitions(struct mtd_info *master,
3270 - struct mtd_partition **pparts,
3271 + const struct mtd_partition **pparts,
3272 struct mtd_part_parser_data *data)
3273 {
3274 + struct mtd_partition *parts;
3275 struct device_node *mtd_node;
3276 struct device_node *ofpart_node;
3277 const char *partname;
3278 @@ -37,10 +38,8 @@ static int parse_ofpart_partitions(struc
3279 bool dedicated = true;
3280
3281
3282 - if (!data)
3283 - return 0;
3284 -
3285 - mtd_node = data->of_node;
3286 + /* Pull of_node from the master device node */
3287 + mtd_node = mtd_get_of_node(master);
3288 if (!mtd_node)
3289 return 0;
3290
3291 @@ -72,8 +71,8 @@ static int parse_ofpart_partitions(struc
3292 if (nr_parts == 0)
3293 return 0;
3294
3295 - *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
3296 - if (!*pparts)
3297 + parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
3298 + if (!parts)
3299 return -ENOMEM;
3300
3301 i = 0;
3302 @@ -107,19 +106,19 @@ static int parse_ofpart_partitions(struc
3303 goto ofpart_fail;
3304 }
3305
3306 - (*pparts)[i].offset = of_read_number(reg, a_cells);
3307 - (*pparts)[i].size = of_read_number(reg + a_cells, s_cells);
3308 + parts[i].offset = of_read_number(reg, a_cells);
3309 + parts[i].size = of_read_number(reg + a_cells, s_cells);
3310
3311 partname = of_get_property(pp, "label", &len);
3312 if (!partname)
3313 partname = of_get_property(pp, "name", &len);
3314 - (*pparts)[i].name = partname;
3315 + parts[i].name = partname;
3316
3317 if (of_get_property(pp, "read-only", &len))
3318 - (*pparts)[i].mask_flags |= MTD_WRITEABLE;
3319 + parts[i].mask_flags |= MTD_WRITEABLE;
3320
3321 if (of_get_property(pp, "lock", &len))
3322 - (*pparts)[i].mask_flags |= MTD_POWERUP_LOCK;
3323 + parts[i].mask_flags |= MTD_POWERUP_LOCK;
3324
3325 i++;
3326 }
3327 @@ -127,6 +126,7 @@ static int parse_ofpart_partitions(struc
3328 if (!nr_parts)
3329 goto ofpart_none;
3330
3331 + *pparts = parts;
3332 return nr_parts;
3333
3334 ofpart_fail:
3335 @@ -135,21 +135,20 @@ ofpart_fail:
3336 ret = -EINVAL;
3337 ofpart_none:
3338 of_node_put(pp);
3339 - kfree(*pparts);
3340 - *pparts = NULL;
3341 + kfree(parts);
3342 return ret;
3343 }
3344
3345 static struct mtd_part_parser ofpart_parser = {
3346 - .owner = THIS_MODULE,
3347 .parse_fn = parse_ofpart_partitions,
3348 .name = "ofpart",
3349 };
3350
3351 static int parse_ofoldpart_partitions(struct mtd_info *master,
3352 - struct mtd_partition **pparts,
3353 + const struct mtd_partition **pparts,
3354 struct mtd_part_parser_data *data)
3355 {
3356 + struct mtd_partition *parts;
3357 struct device_node *dp;
3358 int i, plen, nr_parts;
3359 const struct {
3360 @@ -157,10 +156,8 @@ static int parse_ofoldpart_partitions(st
3361 } *part;
3362 const char *names;
3363
3364 - if (!data)
3365 - return 0;
3366 -
3367 - dp = data->of_node;
3368 + /* Pull of_node from the master device node */
3369 + dp = mtd_get_of_node(master);
3370 if (!dp)
3371 return 0;
3372
3373 @@ -173,37 +170,37 @@ static int parse_ofoldpart_partitions(st
3374
3375 nr_parts = plen / sizeof(part[0]);
3376
3377 - *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
3378 - if (!*pparts)
3379 + parts = kzalloc(nr_parts * sizeof(*parts), GFP_KERNEL);
3380 + if (!parts)
3381 return -ENOMEM;
3382
3383 names = of_get_property(dp, "partition-names", &plen);
3384
3385 for (i = 0; i < nr_parts; i++) {
3386 - (*pparts)[i].offset = be32_to_cpu(part->offset);
3387 - (*pparts)[i].size = be32_to_cpu(part->len) & ~1;
3388 + parts[i].offset = be32_to_cpu(part->offset);
3389 + parts[i].size = be32_to_cpu(part->len) & ~1;
3390 /* bit 0 set signifies read only partition */
3391 if (be32_to_cpu(part->len) & 1)
3392 - (*pparts)[i].mask_flags = MTD_WRITEABLE;
3393 + parts[i].mask_flags = MTD_WRITEABLE;
3394
3395 if (names && (plen > 0)) {
3396 int len = strlen(names) + 1;
3397
3398 - (*pparts)[i].name = names;
3399 + parts[i].name = names;
3400 plen -= len;
3401 names += len;
3402 } else {
3403 - (*pparts)[i].name = "unnamed";
3404 + parts[i].name = "unnamed";
3405 }
3406
3407 part++;
3408 }
3409
3410 + *pparts = parts;
3411 return nr_parts;
3412 }
3413
3414 static struct mtd_part_parser ofoldpart_parser = {
3415 - .owner = THIS_MODULE,
3416 .parse_fn = parse_ofoldpart_partitions,
3417 .name = "ofoldpart",
3418 };
3419 --- a/drivers/mtd/spi-nor/Kconfig
3420 +++ b/drivers/mtd/spi-nor/Kconfig
3421 @@ -7,6 +7,14 @@ menuconfig MTD_SPI_NOR
3422
3423 if MTD_SPI_NOR
3424
3425 +config MTD_MT81xx_NOR
3426 + tristate "Mediatek MT81xx SPI NOR flash controller"
3427 + depends on HAS_IOMEM
3428 + help
3429 + This enables access to SPI NOR flash, using MT81xx SPI NOR flash
3430 + controller. This controller does not support generic SPI BUS, it only
3431 + supports SPI NOR Flash.
3432 +
3433 config MTD_SPI_NOR_USE_4K_SECTORS
3434 bool "Use small 4096 B erase sectors"
3435 default y
3436 @@ -23,7 +31,7 @@ config MTD_SPI_NOR_USE_4K_SECTORS
3437
3438 config SPI_FSL_QUADSPI
3439 tristate "Freescale Quad SPI controller"
3440 - depends on ARCH_MXC || COMPILE_TEST
3441 + depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
3442 depends on HAS_IOMEM
3443 help
3444 This enables support for the Quad SPI controller in master mode.
3445 --- a/drivers/mtd/spi-nor/Makefile
3446 +++ b/drivers/mtd/spi-nor/Makefile
3447 @@ -1,3 +1,4 @@
3448 obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
3449 obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
3450 +obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
3451 obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
3452 --- /dev/null
3453 +++ b/drivers/mtd/spi-nor/mtk-quadspi.c
3454 @@ -0,0 +1,485 @@
3455 +/*
3456 + * Copyright (c) 2015 MediaTek Inc.
3457 + * Author: Bayi Cheng <bayi.cheng@mediatek.com>
3458 + *
3459 + * This program is free software; you can redistribute it and/or modify
3460 + * it under the terms of the GNU General Public License version 2 as
3461 + * published by the Free Software Foundation.
3462 + *
3463 + * This program is distributed in the hope that it will be useful,
3464 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3465 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3466 + * GNU General Public License for more details.
3467 + */
3468 +
3469 +#include <linux/clk.h>
3470 +#include <linux/delay.h>
3471 +#include <linux/device.h>
3472 +#include <linux/init.h>
3473 +#include <linux/io.h>
3474 +#include <linux/iopoll.h>
3475 +#include <linux/ioport.h>
3476 +#include <linux/math64.h>
3477 +#include <linux/module.h>
3478 +#include <linux/mtd/mtd.h>
3479 +#include <linux/mutex.h>
3480 +#include <linux/of.h>
3481 +#include <linux/of_device.h>
3482 +#include <linux/pinctrl/consumer.h>
3483 +#include <linux/platform_device.h>
3484 +#include <linux/slab.h>
3485 +#include <linux/mtd/mtd.h>
3486 +#include <linux/mtd/partitions.h>
3487 +#include <linux/mtd/spi-nor.h>
3488 +
3489 +#define MTK_NOR_CMD_REG 0x00
3490 +#define MTK_NOR_CNT_REG 0x04
3491 +#define MTK_NOR_RDSR_REG 0x08
3492 +#define MTK_NOR_RDATA_REG 0x0c
3493 +#define MTK_NOR_RADR0_REG 0x10
3494 +#define MTK_NOR_RADR1_REG 0x14
3495 +#define MTK_NOR_RADR2_REG 0x18
3496 +#define MTK_NOR_WDATA_REG 0x1c
3497 +#define MTK_NOR_PRGDATA0_REG 0x20
3498 +#define MTK_NOR_PRGDATA1_REG 0x24
3499 +#define MTK_NOR_PRGDATA2_REG 0x28
3500 +#define MTK_NOR_PRGDATA3_REG 0x2c
3501 +#define MTK_NOR_PRGDATA4_REG 0x30
3502 +#define MTK_NOR_PRGDATA5_REG 0x34
3503 +#define MTK_NOR_SHREG0_REG 0x38
3504 +#define MTK_NOR_SHREG1_REG 0x3c
3505 +#define MTK_NOR_SHREG2_REG 0x40
3506 +#define MTK_NOR_SHREG3_REG 0x44
3507 +#define MTK_NOR_SHREG4_REG 0x48
3508 +#define MTK_NOR_SHREG5_REG 0x4c
3509 +#define MTK_NOR_SHREG6_REG 0x50
3510 +#define MTK_NOR_SHREG7_REG 0x54
3511 +#define MTK_NOR_SHREG8_REG 0x58
3512 +#define MTK_NOR_SHREG9_REG 0x5c
3513 +#define MTK_NOR_CFG1_REG 0x60
3514 +#define MTK_NOR_CFG2_REG 0x64
3515 +#define MTK_NOR_CFG3_REG 0x68
3516 +#define MTK_NOR_STATUS0_REG 0x70
3517 +#define MTK_NOR_STATUS1_REG 0x74
3518 +#define MTK_NOR_STATUS2_REG 0x78
3519 +#define MTK_NOR_STATUS3_REG 0x7c
3520 +#define MTK_NOR_FLHCFG_REG 0x84
3521 +#define MTK_NOR_TIME_REG 0x94
3522 +#define MTK_NOR_PP_DATA_REG 0x98
3523 +#define MTK_NOR_PREBUF_STUS_REG 0x9c
3524 +#define MTK_NOR_DELSEL0_REG 0xa0
3525 +#define MTK_NOR_DELSEL1_REG 0xa4
3526 +#define MTK_NOR_INTRSTUS_REG 0xa8
3527 +#define MTK_NOR_INTREN_REG 0xac
3528 +#define MTK_NOR_CHKSUM_CTL_REG 0xb8
3529 +#define MTK_NOR_CHKSUM_REG 0xbc
3530 +#define MTK_NOR_CMD2_REG 0xc0
3531 +#define MTK_NOR_WRPROT_REG 0xc4
3532 +#define MTK_NOR_RADR3_REG 0xc8
3533 +#define MTK_NOR_DUAL_REG 0xcc
3534 +#define MTK_NOR_DELSEL2_REG 0xd0
3535 +#define MTK_NOR_DELSEL3_REG 0xd4
3536 +#define MTK_NOR_DELSEL4_REG 0xd8
3537 +
3538 +/* commands for mtk nor controller */
3539 +#define MTK_NOR_READ_CMD 0x0
3540 +#define MTK_NOR_RDSR_CMD 0x2
3541 +#define MTK_NOR_PRG_CMD 0x4
3542 +#define MTK_NOR_WR_CMD 0x10
3543 +#define MTK_NOR_PIO_WR_CMD 0x90
3544 +#define MTK_NOR_WRSR_CMD 0x20
3545 +#define MTK_NOR_PIO_READ_CMD 0x81
3546 +#define MTK_NOR_WR_BUF_ENABLE 0x1
3547 +#define MTK_NOR_WR_BUF_DISABLE 0x0
3548 +#define MTK_NOR_ENABLE_SF_CMD 0x30
3549 +#define MTK_NOR_DUAD_ADDR_EN 0x8
3550 +#define MTK_NOR_QUAD_READ_EN 0x4
3551 +#define MTK_NOR_DUAL_ADDR_EN 0x2
3552 +#define MTK_NOR_DUAL_READ_EN 0x1
3553 +#define MTK_NOR_DUAL_DISABLE 0x0
3554 +#define MTK_NOR_FAST_READ 0x1
3555 +
3556 +#define SFLASH_WRBUF_SIZE 128
3557 +
3558 +/* Can shift up to 48 bits (6 bytes) of TX/RX */
3559 +#define MTK_NOR_MAX_RX_TX_SHIFT 6
3560 +/* can shift up to 56 bits (7 bytes) transfer by MTK_NOR_PRG_CMD */
3561 +#define MTK_NOR_MAX_SHIFT 7
3562 +
3563 +/* Helpers for accessing the program data / shift data registers */
3564 +#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
3565 +#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
3566 +
3567 +struct mt8173_nor {
3568 + struct spi_nor nor;
3569 + struct device *dev;
3570 + void __iomem *base; /* nor flash base address */
3571 + struct clk *spi_clk;
3572 + struct clk *nor_clk;
3573 +};
3574 +
3575 +static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
3576 +{
3577 + struct spi_nor *nor = &mt8173_nor->nor;
3578 +
3579 + switch (nor->flash_read) {
3580 + case SPI_NOR_FAST:
3581 + writeb(nor->read_opcode, mt8173_nor->base +
3582 + MTK_NOR_PRGDATA3_REG);
3583 + writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
3584 + MTK_NOR_CFG1_REG);
3585 + break;
3586 + case SPI_NOR_DUAL:
3587 + writeb(nor->read_opcode, mt8173_nor->base +
3588 + MTK_NOR_PRGDATA3_REG);
3589 + writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
3590 + MTK_NOR_DUAL_REG);
3591 + break;
3592 + case SPI_NOR_QUAD:
3593 + writeb(nor->read_opcode, mt8173_nor->base +
3594 + MTK_NOR_PRGDATA4_REG);
3595 + writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
3596 + MTK_NOR_DUAL_REG);
3597 + break;
3598 + default:
3599 + writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
3600 + MTK_NOR_DUAL_REG);
3601 + break;
3602 + }
3603 +}
3604 +
3605 +static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
3606 +{
3607 + int reg;
3608 + u8 val = cmdval & 0x1f;
3609 +
3610 + writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
3611 + return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
3612 + !(reg & val), 100, 10000);
3613 +}
3614 +
3615 +static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
3616 + u8 *tx, int txlen, u8 *rx, int rxlen)
3617 +{
3618 + int len = 1 + txlen + rxlen;
3619 + int i, ret, idx;
3620 +
3621 + if (len > MTK_NOR_MAX_SHIFT)
3622 + return -EINVAL;
3623 +
3624 + writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
3625 +
3626 + /* start at PRGDATA5, go down to PRGDATA0 */
3627 + idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
3628 +
3629 + /* opcode */
3630 + writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3631 + idx--;
3632 +
3633 + /* program TX data */
3634 + for (i = 0; i < txlen; i++, idx--)
3635 + writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3636 +
3637 + /* clear out rest of TX registers */
3638 + while (idx >= 0) {
3639 + writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
3640 + idx--;
3641 + }
3642 +
3643 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
3644 + if (ret)
3645 + return ret;
3646 +
3647 + /* restart at first RX byte */
3648 + idx = rxlen - 1;
3649 +
3650 + /* read out RX data */
3651 + for (i = 0; i < rxlen; i++, idx--)
3652 + rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
3653 +
3654 + return 0;
3655 +}
3656 +
3657 +/* Do a WRSR (Write Status Register) command */
3658 +static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
3659 +{
3660 + writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
3661 + writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
3662 + return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
3663 +}
3664 +
3665 +static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
3666 +{
3667 + u8 reg;
3668 +
3669 + /* the bit0 of MTK_NOR_CFG2_REG is pre-fetch buffer
3670 + * 0: pre-fetch buffer use for read
3671 + * 1: pre-fetch buffer use for page program
3672 + */
3673 + writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
3674 + return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
3675 + 0x01 == (reg & 0x01), 100, 10000);
3676 +}
3677 +
3678 +static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
3679 +{
3680 + u8 reg;
3681 +
3682 + writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
3683 + return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
3684 + MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
3685 + 10000);
3686 +}
3687 +
3688 +static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
3689 +{
3690 + int i;
3691 +
3692 + for (i = 0; i < 3; i++) {
3693 + writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
3694 + addr >>= 8;
3695 + }
3696 + /* Last register is non-contiguous */
3697 + writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
3698 +}
3699 +
3700 +static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
3701 + size_t *retlen, u_char *buffer)
3702 +{
3703 + int i, ret;
3704 + int addr = (int)from;
3705 + u8 *buf = (u8 *)buffer;
3706 + struct mt8173_nor *mt8173_nor = nor->priv;
3707 +
3708 + /* set mode for fast read mode ,dual mode or quad mode */
3709 + mt8173_nor_set_read_mode(mt8173_nor);
3710 + mt8173_nor_set_addr(mt8173_nor, addr);
3711 +
3712 + for (i = 0; i < length; i++, (*retlen)++) {
3713 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
3714 + if (ret < 0)
3715 + return ret;
3716 + buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
3717 + }
3718 + return 0;
3719 +}
3720 +
3721 +static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
3722 + int addr, int length, u8 *data)
3723 +{
3724 + int i, ret;
3725 +
3726 + mt8173_nor_set_addr(mt8173_nor, addr);
3727 +
3728 + for (i = 0; i < length; i++) {
3729 + writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
3730 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
3731 + if (ret < 0)
3732 + return ret;
3733 + }
3734 + return 0;
3735 +}
3736 +
3737 +static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
3738 + const u8 *buf)
3739 +{
3740 + int i, bufidx, data;
3741 +
3742 + mt8173_nor_set_addr(mt8173_nor, addr);
3743 +
3744 + bufidx = 0;
3745 + for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
3746 + data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
3747 + buf[bufidx + 1]<<8 | buf[bufidx];
3748 + bufidx += 4;
3749 + writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
3750 + }
3751 + return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
3752 +}
3753 +
3754 +static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
3755 + size_t *retlen, const u_char *buf)
3756 +{
3757 + int ret;
3758 + struct mt8173_nor *mt8173_nor = nor->priv;
3759 +
3760 + ret = mt8173_nor_write_buffer_enable(mt8173_nor);
3761 + if (ret < 0)
3762 + dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
3763 +
3764 + while (len >= SFLASH_WRBUF_SIZE) {
3765 + ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
3766 + if (ret < 0)
3767 + dev_err(mt8173_nor->dev, "write buffer failed!\n");
3768 + len -= SFLASH_WRBUF_SIZE;
3769 + to += SFLASH_WRBUF_SIZE;
3770 + buf += SFLASH_WRBUF_SIZE;
3771 + (*retlen) += SFLASH_WRBUF_SIZE;
3772 + }
3773 + ret = mt8173_nor_write_buffer_disable(mt8173_nor);
3774 + if (ret < 0)
3775 + dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
3776 +
3777 + if (len) {
3778 + ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len,
3779 + (u8 *)buf);
3780 + if (ret < 0)
3781 + dev_err(mt8173_nor->dev, "write single byte failed!\n");
3782 + (*retlen) += len;
3783 + }
3784 +}
3785 +
3786 +static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
3787 +{
3788 + int ret;
3789 + struct mt8173_nor *mt8173_nor = nor->priv;
3790 +
3791 + switch (opcode) {
3792 + case SPINOR_OP_RDSR:
3793 + ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
3794 + if (ret < 0)
3795 + return ret;
3796 + if (len == 1)
3797 + *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
3798 + else
3799 + dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
3800 + break;
3801 + default:
3802 + ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
3803 + break;
3804 + }
3805 + return ret;
3806 +}
3807 +
3808 +static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
3809 + int len)
3810 +{
3811 + int ret;
3812 + struct mt8173_nor *mt8173_nor = nor->priv;
3813 +
3814 + switch (opcode) {
3815 + case SPINOR_OP_WRSR:
3816 + /* We only handle 1 byte */
3817 + ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
3818 + break;
3819 + default:
3820 + ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
3821 + if (ret)
3822 + dev_warn(mt8173_nor->dev, "write reg failure!\n");
3823 + break;
3824 + }
3825 + return ret;
3826 +}
3827 +
3828 +static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
3829 + struct device_node *flash_node)
3830 +{
3831 + int ret;
3832 + struct spi_nor *nor;
3833 +
3834 + /* initialize controller to accept commands */
3835 + writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
3836 +
3837 + nor = &mt8173_nor->nor;
3838 + nor->dev = mt8173_nor->dev;
3839 + nor->priv = mt8173_nor;
3840 + spi_nor_set_flash_node(nor, flash_node);
3841 +
3842 + /* fill the hooks to spi nor */
3843 + nor->read = mt8173_nor_read;
3844 + nor->read_reg = mt8173_nor_read_reg;
3845 + nor->write = mt8173_nor_write;
3846 + nor->write_reg = mt8173_nor_write_reg;
3847 + nor->mtd.name = "mtk_nor";
3848 + /* initialized with NULL */
3849 + ret = spi_nor_scan(nor, NULL, SPI_NOR_DUAL);
3850 + if (ret)
3851 + return ret;
3852 +
3853 + return mtd_device_register(&nor->mtd, NULL, 0);
3854 +}
3855 +
3856 +static int mtk_nor_drv_probe(struct platform_device *pdev)
3857 +{
3858 + struct device_node *flash_np;
3859 + struct resource *res;
3860 + int ret;
3861 + struct mt8173_nor *mt8173_nor;
3862 +
3863 + if (!pdev->dev.of_node) {
3864 + dev_err(&pdev->dev, "No DT found\n");
3865 + return -EINVAL;
3866 + }
3867 +
3868 + mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
3869 + if (!mt8173_nor)
3870 + return -ENOMEM;
3871 + platform_set_drvdata(pdev, mt8173_nor);
3872 +
3873 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3874 + mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
3875 + if (IS_ERR(mt8173_nor->base))
3876 + return PTR_ERR(mt8173_nor->base);
3877 +
3878 + mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
3879 + if (IS_ERR(mt8173_nor->spi_clk))
3880 + return PTR_ERR(mt8173_nor->spi_clk);
3881 +
3882 + mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
3883 + if (IS_ERR(mt8173_nor->nor_clk))
3884 + return PTR_ERR(mt8173_nor->nor_clk);
3885 +
3886 + mt8173_nor->dev = &pdev->dev;
3887 + ret = clk_prepare_enable(mt8173_nor->spi_clk);
3888 + if (ret)
3889 + return ret;
3890 +
3891 + ret = clk_prepare_enable(mt8173_nor->nor_clk);
3892 + if (ret) {
3893 + clk_disable_unprepare(mt8173_nor->spi_clk);
3894 + return ret;
3895 + }
3896 + /* only support one attached flash */
3897 + flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
3898 + if (!flash_np) {
3899 + dev_err(&pdev->dev, "no SPI flash device to configure\n");
3900 + ret = -ENODEV;
3901 + goto nor_free;
3902 + }
3903 + ret = mtk_nor_init(mt8173_nor, flash_np);
3904 +
3905 +nor_free:
3906 + if (ret) {
3907 + clk_disable_unprepare(mt8173_nor->spi_clk);
3908 + clk_disable_unprepare(mt8173_nor->nor_clk);
3909 + }
3910 + return ret;
3911 +}
3912 +
3913 +static int mtk_nor_drv_remove(struct platform_device *pdev)
3914 +{
3915 + struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
3916 +
3917 + clk_disable_unprepare(mt8173_nor->spi_clk);
3918 + clk_disable_unprepare(mt8173_nor->nor_clk);
3919 + return 0;
3920 +}
3921 +
3922 +static const struct of_device_id mtk_nor_of_ids[] = {
3923 + { .compatible = "mediatek,mt8173-nor"},
3924 + { /* sentinel */ }
3925 +};
3926 +MODULE_DEVICE_TABLE(of, mtk_nor_of_ids);
3927 +
3928 +static struct platform_driver mtk_nor_driver = {
3929 + .probe = mtk_nor_drv_probe,
3930 + .remove = mtk_nor_drv_remove,
3931 + .driver = {
3932 + .name = "mtk-nor",
3933 + .of_match_table = mtk_nor_of_ids,
3934 + },
3935 +};
3936 +
3937 +module_platform_driver(mtk_nor_driver);
3938 +MODULE_LICENSE("GPL v2");
3939 +MODULE_DESCRIPTION("MediaTek SPI NOR Flash Driver");
3940 --- a/drivers/mtd/spi-nor/spi-nor.c
3941 +++ b/drivers/mtd/spi-nor/spi-nor.c
3942 @@ -38,6 +38,7 @@
3943 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
3944
3945 #define SPI_NOR_MAX_ID_LEN 6
3946 +#define SPI_NOR_MAX_ADDR_WIDTH 4
3947
3948 struct flash_info {
3949 char *name;
3950 @@ -60,14 +61,20 @@ struct flash_info {
3951 u16 addr_width;
3952
3953 u16 flags;
3954 -#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */
3955 -#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */
3956 -#define SST_WRITE 0x04 /* use SST byte programming */
3957 -#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */
3958 -#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
3959 -#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
3960 -#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
3961 -#define USE_FSR 0x80 /* use flag status register */
3962 +#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
3963 +#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
3964 +#define SST_WRITE BIT(2) /* use SST byte programming */
3965 +#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
3966 +#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
3967 +#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
3968 +#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
3969 +#define USE_FSR BIT(7) /* use flag status register */
3970 +#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
3971 +#define SPI_NOR_HAS_TB BIT(9) /*
3972 + * Flash SR has Top/Bottom (TB) protect
3973 + * bit. Must be used with
3974 + * SPI_NOR_HAS_LOCK.
3975 + */
3976 };
3977
3978 #define JEDEC_MFR(info) ((info)->id[0])
3979 @@ -313,6 +320,29 @@ static void spi_nor_unlock_and_unprep(st
3980 }
3981
3982 /*
3983 + * Initiate the erasure of a single sector
3984 + */
3985 +static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
3986 +{
3987 + u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
3988 + int i;
3989 +
3990 + if (nor->erase)
3991 + return nor->erase(nor, addr);
3992 +
3993 + /*
3994 + * Default implementation, if driver doesn't have a specialized HW
3995 + * control
3996 + */
3997 + for (i = nor->addr_width - 1; i >= 0; i--) {
3998 + buf[i] = addr & 0xff;
3999 + addr >>= 8;
4000 + }
4001 +
4002 + return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
4003 +}
4004 +
4005 +/*
4006 * Erase an address range on the nor chip. The address range may extend
4007 * one or more erase sectors. Return an error is there is a problem erasing.
4008 */
4009 @@ -371,10 +401,9 @@ static int spi_nor_erase(struct mtd_info
4010 while (len) {
4011 write_enable(nor);
4012
4013 - if (nor->erase(nor, addr)) {
4014 - ret = -EIO;
4015 + ret = spi_nor_erase_sector(nor, addr);
4016 + if (ret)
4017 goto erase_err;
4018 - }
4019
4020 addr += mtd->erasesize;
4021 len -= mtd->erasesize;
4022 @@ -387,17 +416,13 @@ static int spi_nor_erase(struct mtd_info
4023
4024 write_disable(nor);
4025
4026 +erase_err:
4027 spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
4028
4029 - instr->state = MTD_ERASE_DONE;
4030 + instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
4031 mtd_erase_callback(instr);
4032
4033 return ret;
4034 -
4035 -erase_err:
4036 - spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
4037 - instr->state = MTD_ERASE_FAILED;
4038 - return ret;
4039 }
4040
4041 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
4042 @@ -415,32 +440,58 @@ static void stm_get_locked_range(struct
4043 } else {
4044 pow = ((sr & mask) ^ mask) >> shift;
4045 *len = mtd->size >> pow;
4046 - *ofs = mtd->size - *len;
4047 + if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
4048 + *ofs = 0;
4049 + else
4050 + *ofs = mtd->size - *len;
4051 }
4052 }
4053
4054 /*
4055 - * Return 1 if the entire region is locked, 0 otherwise
4056 + * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
4057 + * @locked is false); 0 otherwise
4058 */
4059 -static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4060 - u8 sr)
4061 +static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4062 + u8 sr, bool locked)
4063 {
4064 loff_t lock_offs;
4065 uint64_t lock_len;
4066
4067 + if (!len)
4068 + return 1;
4069 +
4070 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
4071
4072 - return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
4073 + if (locked)
4074 + /* Requested range is a sub-range of locked range */
4075 + return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
4076 + else
4077 + /* Requested range does not overlap with locked range */
4078 + return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
4079 +}
4080 +
4081 +static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4082 + u8 sr)
4083 +{
4084 + return stm_check_lock_status_sr(nor, ofs, len, sr, true);
4085 +}
4086 +
4087 +static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
4088 + u8 sr)
4089 +{
4090 + return stm_check_lock_status_sr(nor, ofs, len, sr, false);
4091 }
4092
4093 /*
4094 * Lock a region of the flash. Compatible with ST Micro and similar flash.
4095 - * Supports only the block protection bits BP{0,1,2} in the status register
4096 + * Supports the block protection bits BP{0,1,2} in the status register
4097 * (SR). Does not support these features found in newer SR bitfields:
4098 - * - TB: top/bottom protect - only handle TB=0 (top protect)
4099 * - SEC: sector/block protect - only handle SEC=0 (block protect)
4100 * - CMP: complement protect - only support CMP=0 (range is not complemented)
4101 *
4102 + * Support for the following is provided conditionally for some flash:
4103 + * - TB: top/bottom protect
4104 + *
4105 * Sample table portion for 8MB flash (Winbond w25q64fw):
4106 *
4107 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
4108 @@ -453,26 +504,55 @@ static int stm_is_locked_sr(struct spi_n
4109 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
4110 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
4111 * X | X | 1 | 1 | 1 | 8 MB | ALL
4112 + * ------|-------|-------|-------|-------|---------------|-------------------
4113 + * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
4114 + * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
4115 + * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
4116 + * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
4117 + * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
4118 + * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
4119 *
4120 * Returns negative on errors, 0 on success.
4121 */
4122 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
4123 {
4124 struct mtd_info *mtd = &nor->mtd;
4125 - u8 status_old, status_new;
4126 + int status_old, status_new;
4127 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
4128 u8 shift = ffs(mask) - 1, pow, val;
4129 + loff_t lock_len;
4130 + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
4131 + bool use_top;
4132 + int ret;
4133
4134 status_old = read_sr(nor);
4135 + if (status_old < 0)
4136 + return status_old;
4137
4138 - /* SPI NOR always locks to the end */
4139 - if (ofs + len != mtd->size) {
4140 - /* Does combined region extend to end? */
4141 - if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
4142 - status_old))
4143 - return -EINVAL;
4144 - len = mtd->size - ofs;
4145 - }
4146 + /* If nothing in our range is unlocked, we don't need to do anything */
4147 + if (stm_is_locked_sr(nor, ofs, len, status_old))
4148 + return 0;
4149 +
4150 + /* If anything below us is unlocked, we can't use 'bottom' protection */
4151 + if (!stm_is_locked_sr(nor, 0, ofs, status_old))
4152 + can_be_bottom = false;
4153 +
4154 + /* If anything above us is unlocked, we can't use 'top' protection */
4155 + if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
4156 + status_old))
4157 + can_be_top = false;
4158 +
4159 + if (!can_be_bottom && !can_be_top)
4160 + return -EINVAL;
4161 +
4162 + /* Prefer top, if both are valid */
4163 + use_top = can_be_top;
4164 +
4165 + /* lock_len: length of region that should end up locked */
4166 + if (use_top)
4167 + lock_len = mtd->size - ofs;
4168 + else
4169 + lock_len = ofs + len;
4170
4171 /*
4172 * Need smallest pow such that:
4173 @@ -483,7 +563,7 @@ static int stm_lock(struct spi_nor *nor,
4174 *
4175 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
4176 */
4177 - pow = ilog2(mtd->size) - ilog2(len);
4178 + pow = ilog2(mtd->size) - ilog2(lock_len);
4179 val = mask - (pow << shift);
4180 if (val & ~mask)
4181 return -EINVAL;
4182 @@ -491,14 +571,27 @@ static int stm_lock(struct spi_nor *nor,
4183 if (!(val & mask))
4184 return -EINVAL;
4185
4186 - status_new = (status_old & ~mask) | val;
4187 + status_new = (status_old & ~mask & ~SR_TB) | val;
4188 +
4189 + /* Disallow further writes if WP pin is asserted */
4190 + status_new |= SR_SRWD;
4191 +
4192 + if (!use_top)
4193 + status_new |= SR_TB;
4194 +
4195 + /* Don't bother if they're the same */
4196 + if (status_new == status_old)
4197 + return 0;
4198
4199 /* Only modify protection if it will not unlock other areas */
4200 - if ((status_new & mask) <= (status_old & mask))
4201 + if ((status_new & mask) < (status_old & mask))
4202 return -EINVAL;
4203
4204 write_enable(nor);
4205 - return write_sr(nor, status_new);
4206 + ret = write_sr(nor, status_new);
4207 + if (ret)
4208 + return ret;
4209 + return spi_nor_wait_till_ready(nor);
4210 }
4211
4212 /*
4213 @@ -509,17 +602,43 @@ static int stm_lock(struct spi_nor *nor,
4214 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
4215 {
4216 struct mtd_info *mtd = &nor->mtd;
4217 - uint8_t status_old, status_new;
4218 + int status_old, status_new;
4219 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
4220 u8 shift = ffs(mask) - 1, pow, val;
4221 + loff_t lock_len;
4222 + bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
4223 + bool use_top;
4224 + int ret;
4225
4226 status_old = read_sr(nor);
4227 + if (status_old < 0)
4228 + return status_old;
4229 +
4230 + /* If nothing in our range is locked, we don't need to do anything */
4231 + if (stm_is_unlocked_sr(nor, ofs, len, status_old))
4232 + return 0;
4233 +
4234 + /* If anything below us is locked, we can't use 'top' protection */
4235 + if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
4236 + can_be_top = false;
4237 +
4238 + /* If anything above us is locked, we can't use 'bottom' protection */
4239 + if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
4240 + status_old))
4241 + can_be_bottom = false;
4242
4243 - /* Cannot unlock; would unlock larger region than requested */
4244 - if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
4245 - status_old))
4246 + if (!can_be_bottom && !can_be_top)
4247 return -EINVAL;
4248
4249 + /* Prefer top, if both are valid */
4250 + use_top = can_be_top;
4251 +
4252 + /* lock_len: length of region that should remain locked */
4253 + if (use_top)
4254 + lock_len = mtd->size - (ofs + len);
4255 + else
4256 + lock_len = ofs;
4257 +
4258 /*
4259 * Need largest pow such that:
4260 *
4261 @@ -529,8 +648,8 @@ static int stm_unlock(struct spi_nor *no
4262 *
4263 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
4264 */
4265 - pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len));
4266 - if (ofs + len == mtd->size) {
4267 + pow = ilog2(mtd->size) - order_base_2(lock_len);
4268 + if (lock_len == 0) {
4269 val = 0; /* fully unlocked */
4270 } else {
4271 val = mask - (pow << shift);
4272 @@ -539,14 +658,28 @@ static int stm_unlock(struct spi_nor *no
4273 return -EINVAL;
4274 }
4275
4276 - status_new = (status_old & ~mask) | val;
4277 + status_new = (status_old & ~mask & ~SR_TB) | val;
4278 +
4279 + /* Don't protect status register if we're fully unlocked */
4280 + if (lock_len == mtd->size)
4281 + status_new &= ~SR_SRWD;
4282 +
4283 + if (!use_top)
4284 + status_new |= SR_TB;
4285 +
4286 + /* Don't bother if they're the same */
4287 + if (status_new == status_old)
4288 + return 0;
4289
4290 /* Only modify protection if it will not lock other areas */
4291 - if ((status_new & mask) >= (status_old & mask))
4292 + if ((status_new & mask) > (status_old & mask))
4293 return -EINVAL;
4294
4295 write_enable(nor);
4296 - return write_sr(nor, status_new);
4297 + ret = write_sr(nor, status_new);
4298 + if (ret)
4299 + return ret;
4300 + return spi_nor_wait_till_ready(nor);
4301 }
4302
4303 /*
4304 @@ -715,9 +848,9 @@ static const struct flash_info spi_nor_i
4305 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
4306 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
4307 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
4308 - { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
4309 + { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
4310 { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
4311 - { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
4312 + { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
4313 { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
4314 { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
4315 { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
4316 @@ -732,8 +865,8 @@ static const struct flash_info spi_nor_i
4317 { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
4318 { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
4319 { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
4320 - { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
4321 - { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) },
4322 + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
4323 + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
4324 { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
4325 { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
4326 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
4327 @@ -767,6 +900,7 @@ static const struct flash_info spi_nor_i
4328 { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4329 { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4330 { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
4331 + { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4332 { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
4333 { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
4334 { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
4335 @@ -830,11 +964,23 @@ static const struct flash_info spi_nor_i
4336 { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
4337 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
4338 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
4339 - { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4340 + {
4341 + "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64,
4342 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4343 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4344 + },
4345 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
4346 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
4347 - { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4348 - { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
4349 + {
4350 + "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
4351 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4352 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4353 + },
4354 + {
4355 + "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
4356 + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
4357 + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
4358 + },
4359 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
4360 { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
4361 { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
4362 @@ -857,7 +1003,7 @@ static const struct flash_info *spi_nor_
4363
4364 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
4365 if (tmp < 0) {
4366 - dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp);
4367 + dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
4368 return ERR_PTR(tmp);
4369 }
4370
4371 @@ -868,7 +1014,7 @@ static const struct flash_info *spi_nor_
4372 return &spi_nor_ids[tmp];
4373 }
4374 }
4375 - dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n",
4376 + dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
4377 id[0], id[1], id[2]);
4378 return ERR_PTR(-ENODEV);
4379 }
4380 @@ -1014,6 +1160,8 @@ static int macronix_quad_enable(struct s
4381 int ret, val;
4382
4383 val = read_sr(nor);
4384 + if (val < 0)
4385 + return val;
4386 write_enable(nor);
4387
4388 write_sr(nor, val | SR_QUAD_EN_MX);
4389 @@ -1095,7 +1243,7 @@ static int set_quad_mode(struct spi_nor
4390 static int spi_nor_check(struct spi_nor *nor)
4391 {
4392 if (!nor->dev || !nor->read || !nor->write ||
4393 - !nor->read_reg || !nor->write_reg || !nor->erase) {
4394 + !nor->read_reg || !nor->write_reg) {
4395 pr_err("spi-nor: please fill all the necessary fields!\n");
4396 return -EINVAL;
4397 }
4398 @@ -1108,7 +1256,7 @@ int spi_nor_scan(struct spi_nor *nor, co
4399 const struct flash_info *info = NULL;
4400 struct device *dev = nor->dev;
4401 struct mtd_info *mtd = &nor->mtd;
4402 - struct device_node *np = nor->flash_node;
4403 + struct device_node *np = spi_nor_get_flash_node(nor);
4404 int ret;
4405 int i;
4406
4407 @@ -1158,9 +1306,11 @@ int spi_nor_scan(struct spi_nor *nor, co
4408 if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
4409 JEDEC_MFR(info) == SNOR_MFR_INTEL ||
4410 JEDEC_MFR(info) == SNOR_MFR_MACRONIX ||
4411 - JEDEC_MFR(info) == SNOR_MFR_SST) {
4412 + JEDEC_MFR(info) == SNOR_MFR_SST ||
4413 + info->flags & SPI_NOR_HAS_LOCK) {
4414 write_enable(nor);
4415 write_sr(nor, 0);
4416 + spi_nor_wait_till_ready(nor);
4417 }
4418
4419 if (!mtd->name)
4420 @@ -1174,7 +1324,8 @@ int spi_nor_scan(struct spi_nor *nor, co
4421 mtd->_read = spi_nor_read;
4422
4423 /* NOR protection support for STmicro/Micron chips and similar */
4424 - if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
4425 + if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
4426 + info->flags & SPI_NOR_HAS_LOCK) {
4427 nor->flash_lock = stm_lock;
4428 nor->flash_unlock = stm_unlock;
4429 nor->flash_is_locked = stm_is_locked;
4430 @@ -1194,6 +1345,8 @@ int spi_nor_scan(struct spi_nor *nor, co
4431
4432 if (info->flags & USE_FSR)
4433 nor->flags |= SNOR_F_USE_FSR;
4434 + if (info->flags & SPI_NOR_HAS_TB)
4435 + nor->flags |= SNOR_F_HAS_SR_TB;
4436
4437 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
4438 /* prefer "small sector" erase if possible */
4439 @@ -1296,6 +1449,12 @@ int spi_nor_scan(struct spi_nor *nor, co
4440 nor->addr_width = 3;
4441 }
4442
4443 + if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
4444 + dev_err(dev, "address width is too large: %u\n",
4445 + nor->addr_width);
4446 + return -EINVAL;
4447 + }
4448 +
4449 nor->read_dummy = spi_nor_read_dummy_cycles(nor);
4450
4451 dev_info(dev, "%s (%lld Kbytes)\n", info->name,
4452 --- a/drivers/mtd/tests/mtd_nandecctest.c
4453 +++ b/drivers/mtd/tests/mtd_nandecctest.c
4454 @@ -187,7 +187,7 @@ static int double_bit_error_detect(void
4455 __nand_calculate_ecc(error_data, size, calc_ecc);
4456 ret = __nand_correct_data(error_data, error_ecc, calc_ecc, size);
4457
4458 - return (ret == -1) ? 0 : -EINVAL;
4459 + return (ret == -EBADMSG) ? 0 : -EINVAL;
4460 }
4461
4462 static const struct nand_ecc_test nand_ecc_test[] = {
4463 --- a/drivers/mtd/tests/oobtest.c
4464 +++ b/drivers/mtd/tests/oobtest.c
4465 @@ -215,19 +215,19 @@ static int verify_eraseblock(int ebnum)
4466 pr_info("ignoring error as within bitflip_limit\n");
4467 }
4468
4469 - if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
4470 + if (use_offset != 0 || use_len < mtd->oobavail) {
4471 int k;
4472
4473 ops.mode = MTD_OPS_AUTO_OOB;
4474 ops.len = 0;
4475 ops.retlen = 0;
4476 - ops.ooblen = mtd->ecclayout->oobavail;
4477 + ops.ooblen = mtd->oobavail;
4478 ops.oobretlen = 0;
4479 ops.ooboffs = 0;
4480 ops.datbuf = NULL;
4481 ops.oobbuf = readbuf;
4482 err = mtd_read_oob(mtd, addr, &ops);
4483 - if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
4484 + if (err || ops.oobretlen != mtd->oobavail) {
4485 pr_err("error: readoob failed at %#llx\n",
4486 (long long)addr);
4487 errcnt += 1;
4488 @@ -244,7 +244,7 @@ static int verify_eraseblock(int ebnum)
4489 /* verify post-(use_offset + use_len) area for 0xff */
4490 k = use_offset + use_len;
4491 bitflips += memffshow(addr, k, readbuf + k,
4492 - mtd->ecclayout->oobavail - k);
4493 + mtd->oobavail - k);
4494
4495 if (bitflips > bitflip_limit) {
4496 pr_err("error: verify failed at %#llx\n",
4497 @@ -269,8 +269,8 @@ static int verify_eraseblock_in_one_go(i
4498 struct mtd_oob_ops ops;
4499 int err = 0;
4500 loff_t addr = (loff_t)ebnum * mtd->erasesize;
4501 - size_t len = mtd->ecclayout->oobavail * pgcnt;
4502 - size_t oobavail = mtd->ecclayout->oobavail;
4503 + size_t len = mtd->oobavail * pgcnt;
4504 + size_t oobavail = mtd->oobavail;
4505 size_t bitflips;
4506 int i;
4507
4508 @@ -394,8 +394,8 @@ static int __init mtd_oobtest_init(void)
4509 goto out;
4510
4511 use_offset = 0;
4512 - use_len = mtd->ecclayout->oobavail;
4513 - use_len_max = mtd->ecclayout->oobavail;
4514 + use_len = mtd->oobavail;
4515 + use_len_max = mtd->oobavail;
4516 vary_offset = 0;
4517
4518 /* First test: write all OOB, read it back and verify */
4519 @@ -460,8 +460,8 @@ static int __init mtd_oobtest_init(void)
4520
4521 /* Write all eraseblocks */
4522 use_offset = 0;
4523 - use_len = mtd->ecclayout->oobavail;
4524 - use_len_max = mtd->ecclayout->oobavail;
4525 + use_len = mtd->oobavail;
4526 + use_len_max = mtd->oobavail;
4527 vary_offset = 1;
4528 prandom_seed_state(&rnd_state, 5);
4529
4530 @@ -471,8 +471,8 @@ static int __init mtd_oobtest_init(void)
4531
4532 /* Check all eraseblocks */
4533 use_offset = 0;
4534 - use_len = mtd->ecclayout->oobavail;
4535 - use_len_max = mtd->ecclayout->oobavail;
4536 + use_len = mtd->oobavail;
4537 + use_len_max = mtd->oobavail;
4538 vary_offset = 1;
4539 prandom_seed_state(&rnd_state, 5);
4540 err = verify_all_eraseblocks();
4541 @@ -480,8 +480,8 @@ static int __init mtd_oobtest_init(void)
4542 goto out;
4543
4544 use_offset = 0;
4545 - use_len = mtd->ecclayout->oobavail;
4546 - use_len_max = mtd->ecclayout->oobavail;
4547 + use_len = mtd->oobavail;
4548 + use_len_max = mtd->oobavail;
4549 vary_offset = 0;
4550
4551 /* Fourth test: try to write off end of device */
4552 @@ -501,7 +501,7 @@ static int __init mtd_oobtest_init(void)
4553 ops.retlen = 0;
4554 ops.ooblen = 1;
4555 ops.oobretlen = 0;
4556 - ops.ooboffs = mtd->ecclayout->oobavail;
4557 + ops.ooboffs = mtd->oobavail;
4558 ops.datbuf = NULL;
4559 ops.oobbuf = writebuf;
4560 pr_info("attempting to start write past end of OOB\n");
4561 @@ -521,7 +521,7 @@ static int __init mtd_oobtest_init(void)
4562 ops.retlen = 0;
4563 ops.ooblen = 1;
4564 ops.oobretlen = 0;
4565 - ops.ooboffs = mtd->ecclayout->oobavail;
4566 + ops.ooboffs = mtd->oobavail;
4567 ops.datbuf = NULL;
4568 ops.oobbuf = readbuf;
4569 pr_info("attempting to start read past end of OOB\n");
4570 @@ -543,7 +543,7 @@ static int __init mtd_oobtest_init(void)
4571 ops.mode = MTD_OPS_AUTO_OOB;
4572 ops.len = 0;
4573 ops.retlen = 0;
4574 - ops.ooblen = mtd->ecclayout->oobavail + 1;
4575 + ops.ooblen = mtd->oobavail + 1;
4576 ops.oobretlen = 0;
4577 ops.ooboffs = 0;
4578 ops.datbuf = NULL;
4579 @@ -563,7 +563,7 @@ static int __init mtd_oobtest_init(void)
4580 ops.mode = MTD_OPS_AUTO_OOB;
4581 ops.len = 0;
4582 ops.retlen = 0;
4583 - ops.ooblen = mtd->ecclayout->oobavail + 1;
4584 + ops.ooblen = mtd->oobavail + 1;
4585 ops.oobretlen = 0;
4586 ops.ooboffs = 0;
4587 ops.datbuf = NULL;
4588 @@ -587,7 +587,7 @@ static int __init mtd_oobtest_init(void)
4589 ops.mode = MTD_OPS_AUTO_OOB;
4590 ops.len = 0;
4591 ops.retlen = 0;
4592 - ops.ooblen = mtd->ecclayout->oobavail;
4593 + ops.ooblen = mtd->oobavail;
4594 ops.oobretlen = 0;
4595 ops.ooboffs = 1;
4596 ops.datbuf = NULL;
4597 @@ -607,7 +607,7 @@ static int __init mtd_oobtest_init(void)
4598 ops.mode = MTD_OPS_AUTO_OOB;
4599 ops.len = 0;
4600 ops.retlen = 0;
4601 - ops.ooblen = mtd->ecclayout->oobavail;
4602 + ops.ooblen = mtd->oobavail;
4603 ops.oobretlen = 0;
4604 ops.ooboffs = 1;
4605 ops.datbuf = NULL;
4606 @@ -638,7 +638,7 @@ static int __init mtd_oobtest_init(void)
4607 for (i = 0; i < ebcnt - 1; ++i) {
4608 int cnt = 2;
4609 int pg;
4610 - size_t sz = mtd->ecclayout->oobavail;
4611 + size_t sz = mtd->oobavail;
4612 if (bbt[i] || bbt[i + 1])
4613 continue;
4614 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
4615 @@ -673,13 +673,12 @@ static int __init mtd_oobtest_init(void)
4616 for (i = 0; i < ebcnt - 1; ++i) {
4617 if (bbt[i] || bbt[i + 1])
4618 continue;
4619 - prandom_bytes_state(&rnd_state, writebuf,
4620 - mtd->ecclayout->oobavail * 2);
4621 + prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
4622 addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
4623 ops.mode = MTD_OPS_AUTO_OOB;
4624 ops.len = 0;
4625 ops.retlen = 0;
4626 - ops.ooblen = mtd->ecclayout->oobavail * 2;
4627 + ops.ooblen = mtd->oobavail * 2;
4628 ops.oobretlen = 0;
4629 ops.ooboffs = 0;
4630 ops.datbuf = NULL;
4631 @@ -688,7 +687,7 @@ static int __init mtd_oobtest_init(void)
4632 if (err)
4633 goto out;
4634 if (memcmpshow(addr, readbuf, writebuf,
4635 - mtd->ecclayout->oobavail * 2)) {
4636 + mtd->oobavail * 2)) {
4637 pr_err("error: verify failed at %#llx\n",
4638 (long long)addr);
4639 errcnt += 1;
4640 --- a/drivers/mtd/tests/pagetest.c
4641 +++ b/drivers/mtd/tests/pagetest.c
4642 @@ -127,13 +127,12 @@ static int crosstest(void)
4643 unsigned char *pp1, *pp2, *pp3, *pp4;
4644
4645 pr_info("crosstest\n");
4646 - pp1 = kmalloc(pgsize * 4, GFP_KERNEL);
4647 + pp1 = kzalloc(pgsize * 4, GFP_KERNEL);
4648 if (!pp1)
4649 return -ENOMEM;
4650 pp2 = pp1 + pgsize;
4651 pp3 = pp2 + pgsize;
4652 pp4 = pp3 + pgsize;
4653 - memset(pp1, 0, pgsize * 4);
4654
4655 addr0 = 0;
4656 for (i = 0; i < ebcnt && bbt[i]; ++i)
4657 --- a/drivers/mtd/ubi/cdev.c
4658 +++ b/drivers/mtd/ubi/cdev.c
4659 @@ -174,9 +174,9 @@ static int vol_cdev_fsync(struct file *f
4660 struct ubi_device *ubi = desc->vol->ubi;
4661 struct inode *inode = file_inode(file);
4662 int err;
4663 - mutex_lock(&inode->i_mutex);
4664 + inode_lock(inode);
4665 err = ubi_sync(ubi->ubi_num);
4666 - mutex_unlock(&inode->i_mutex);
4667 + inode_unlock(inode);
4668 return err;
4669 }
4670
4671 --- a/drivers/mtd/ubi/misc.c
4672 +++ b/drivers/mtd/ubi/misc.c
4673 @@ -153,3 +153,52 @@ int ubi_check_pattern(const void *buf, u
4674 return 0;
4675 return 1;
4676 }
4677 +
4678 +/* Normal UBI messages */
4679 +void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...)
4680 +{
4681 + struct va_format vaf;
4682 + va_list args;
4683 +
4684 + va_start(args, fmt);
4685 +
4686 + vaf.fmt = fmt;
4687 + vaf.va = &args;
4688 +
4689 + pr_notice(UBI_NAME_STR "%d: %pV\n", ubi->ubi_num, &vaf);
4690 +
4691 + va_end(args);
4692 +}
4693 +
4694 +/* UBI warning messages */
4695 +void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...)
4696 +{
4697 + struct va_format vaf;
4698 + va_list args;
4699 +
4700 + va_start(args, fmt);
4701 +
4702 + vaf.fmt = fmt;
4703 + vaf.va = &args;
4704 +
4705 + pr_warn(UBI_NAME_STR "%d warning: %ps: %pV\n",
4706 + ubi->ubi_num, __builtin_return_address(0), &vaf);
4707 +
4708 + va_end(args);
4709 +}
4710 +
4711 +/* UBI error messages */
4712 +void ubi_err(const struct ubi_device *ubi, const char *fmt, ...)
4713 +{
4714 + struct va_format vaf;
4715 + va_list args;
4716 +
4717 + va_start(args, fmt);
4718 +
4719 + vaf.fmt = fmt;
4720 + vaf.va = &args;
4721 +
4722 + pr_err(UBI_NAME_STR "%d error: %ps: %pV\n",
4723 + ubi->ubi_num, __builtin_return_address(0), &vaf);
4724 + va_end(args);
4725 +}
4726 --- a/drivers/mtd/ubi/ubi.h
4727 +++ b/drivers/mtd/ubi/ubi.h
4728 @@ -49,15 +49,19 @@
4729 /* UBI name used for character devices, sysfs, etc */
4730 #define UBI_NAME_STR "ubi"
4731
4732 +struct ubi_device;
4733 +
4734 /* Normal UBI messages */
4735 -#define ubi_msg(ubi, fmt, ...) pr_notice(UBI_NAME_STR "%d: " fmt "\n", \
4736 - ubi->ubi_num, ##__VA_ARGS__)
4737 +__printf(2, 3)
4738 +void ubi_msg(const struct ubi_device *ubi, const char *fmt, ...);
4739 +
4740 /* UBI warning messages */
4741 -#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
4742 - ubi->ubi_num, __func__, ##__VA_ARGS__)
4743 +__printf(2, 3)
4744 +void ubi_warn(const struct ubi_device *ubi, const char *fmt, ...);
4745 +
4746 /* UBI error messages */
4747 -#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
4748 - ubi->ubi_num, __func__, ##__VA_ARGS__)
4749 +__printf(2, 3)
4750 +void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
4751
4752 /* Background thread name pattern */
4753 #define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
4754 --- a/drivers/mtd/ubi/wl.c
4755 +++ b/drivers/mtd/ubi/wl.c
4756 @@ -628,6 +628,7 @@ static int do_sync_erase(struct ubi_devi
4757 return __erase_worker(ubi, &wl_wrk);
4758 }
4759
4760 +static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
4761 /**
4762 * wear_leveling_worker - wear-leveling worker function.
4763 * @ubi: UBI device description object
4764 @@ -649,6 +650,7 @@ static int wear_leveling_worker(struct u
4765 #endif
4766 struct ubi_wl_entry *e1, *e2;
4767 struct ubi_vid_hdr *vid_hdr;
4768 + int dst_leb_clean = 0;
4769
4770 kfree(wrk);
4771 if (shutdown)
4772 @@ -753,6 +755,7 @@ static int wear_leveling_worker(struct u
4773
4774 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
4775 if (err && err != UBI_IO_BITFLIPS) {
4776 + dst_leb_clean = 1;
4777 if (err == UBI_IO_FF) {
4778 /*
4779 * We are trying to move PEB without a VID header. UBI
4780 @@ -798,10 +801,12 @@ static int wear_leveling_worker(struct u
4781 * protection queue.
4782 */
4783 protect = 1;
4784 + dst_leb_clean = 1;
4785 goto out_not_moved;
4786 }
4787 if (err == MOVE_RETRY) {
4788 scrubbing = 1;
4789 + dst_leb_clean = 1;
4790 goto out_not_moved;
4791 }
4792 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
4793 @@ -827,6 +832,7 @@ static int wear_leveling_worker(struct u
4794 ubi->erroneous_peb_count);
4795 goto out_error;
4796 }
4797 + dst_leb_clean = 1;
4798 erroneous = 1;
4799 goto out_not_moved;
4800 }
4801 @@ -897,15 +903,24 @@ out_not_moved:
4802 wl_tree_add(e1, &ubi->scrub);
4803 else
4804 wl_tree_add(e1, &ubi->used);
4805 + if (dst_leb_clean) {
4806 + wl_tree_add(e2, &ubi->free);
4807 + ubi->free_count++;
4808 + }
4809 +
4810 ubi_assert(!ubi->move_to_put);
4811 ubi->move_from = ubi->move_to = NULL;
4812 ubi->wl_scheduled = 0;
4813 spin_unlock(&ubi->wl_lock);
4814
4815 ubi_free_vid_hdr(ubi, vid_hdr);
4816 - err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
4817 - if (err)
4818 - goto out_ro;
4819 + if (dst_leb_clean) {
4820 + ensure_wear_leveling(ubi, 1);
4821 + } else {
4822 + err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
4823 + if (err)
4824 + goto out_ro;
4825 + }
4826
4827 mutex_unlock(&ubi->move_mutex);
4828 return 0;
4829 --- a/include/linux/mtd/bbm.h
4830 +++ b/include/linux/mtd/bbm.h
4831 @@ -166,7 +166,6 @@ struct bbm_info {
4832 };
4833
4834 /* OneNAND BBT interface */
4835 -extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
4836 extern int onenand_default_bbt(struct mtd_info *mtd);
4837
4838 #endif /* __LINUX_MTD_BBM_H */
4839 --- a/include/linux/mtd/fsmc.h
4840 +++ b/include/linux/mtd/fsmc.h
4841 @@ -103,24 +103,6 @@
4842
4843 #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
4844
4845 -/*
4846 - * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
4847 - * and it has to be read consecutively and immediately after the 512
4848 - * byte data block for hardware to generate the error bit offsets
4849 - * Managing the ecc bytes in the following way is easier. This way is
4850 - * similar to oobfree structure maintained already in u-boot nand driver
4851 - */
4852 -#define MAX_ECCPLACE_ENTRIES 32
4853 -
4854 -struct fsmc_nand_eccplace {
4855 - uint8_t offset;
4856 - uint8_t length;
4857 -};
4858 -
4859 -struct fsmc_eccplace {
4860 - struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
4861 -};
4862 -
4863 struct fsmc_nand_timings {
4864 uint8_t tclr;
4865 uint8_t tar;
4866 --- a/include/linux/mtd/inftl.h
4867 +++ b/include/linux/mtd/inftl.h
4868 @@ -44,7 +44,6 @@ struct INFTLrecord {
4869 unsigned int nb_blocks; /* number of physical blocks */
4870 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
4871 struct erase_info instr;
4872 - struct nand_ecclayout oobinfo;
4873 };
4874
4875 int INFTL_mount(struct INFTLrecord *s);
4876 --- a/include/linux/mtd/map.h
4877 +++ b/include/linux/mtd/map.h
4878 @@ -142,7 +142,9 @@
4879 #endif
4880
4881 #ifndef map_bankwidth
4882 +#ifdef CONFIG_MTD
4883 #warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
4884 +#endif
4885 static inline int map_bankwidth(void *map)
4886 {
4887 BUG();
4888 @@ -238,8 +240,11 @@ struct map_info {
4889 If there is no cache to care about this can be set to NULL. */
4890 void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
4891
4892 - /* set_vpp() must handle being reentered -- enable, enable, disable
4893 - must leave it enabled. */
4894 + /* This will be called with 1 as parameter when the first map user
4895 + * needs VPP, and called with 0 when the last user exits. The map
4896 + * core maintains a reference counter, and assumes that VPP is a
4897 + * global resource applying to all mapped flash chips on the system.
4898 + */
4899 void (*set_vpp)(struct map_info *, int);
4900
4901 unsigned long pfow_base;
4902 --- a/include/linux/mtd/mtd.h
4903 +++ b/include/linux/mtd/mtd.h
4904 @@ -100,17 +100,35 @@ struct mtd_oob_ops {
4905
4906 #define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
4907 #define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
4908 +/**
4909 + * struct mtd_oob_region - oob region definition
4910 + * @offset: region offset
4911 + * @length: region length
4912 + *
4913 + * This structure describes a region of the OOB area, and is used
4914 + * to retrieve ECC or free bytes sections.
4915 + * Each section is defined by an offset within the OOB area and a
4916 + * length.
4917 + */
4918 +struct mtd_oob_region {
4919 + u32 offset;
4920 + u32 length;
4921 +};
4922 +
4923 /*
4924 - * Internal ECC layout control structure. For historical reasons, there is a
4925 - * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
4926 - * for export to user-space via the ECCGETLAYOUT ioctl.
4927 - * nand_ecclayout should be expandable in the future simply by the above macros.
4928 + * struct mtd_ooblayout_ops - NAND OOB layout operations
4929 + * @ecc: function returning an ECC region in the OOB area.
4930 + * Should return -ERANGE if %section exceeds the total number of
4931 + * ECC sections.
4932 + * @free: function returning a free region in the OOB area.
4933 + * Should return -ERANGE if %section exceeds the total number of
4934 + * free sections.
4935 */
4936 -struct nand_ecclayout {
4937 - __u32 eccbytes;
4938 - __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
4939 - __u32 oobavail;
4940 - struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
4941 +struct mtd_ooblayout_ops {
4942 + int (*ecc)(struct mtd_info *mtd, int section,
4943 + struct mtd_oob_region *oobecc);
4944 + int (*free)(struct mtd_info *mtd, int section,
4945 + struct mtd_oob_region *oobfree);
4946 };
4947
4948 struct module; /* only needed for owner field in mtd_info */
4949 @@ -171,8 +189,8 @@ struct mtd_info {
4950 const char *name;
4951 int index;
4952
4953 - /* ECC layout structure pointer - read only! */
4954 - struct nand_ecclayout *ecclayout;
4955 + /* OOB layout description */
4956 + const struct mtd_ooblayout_ops *ooblayout;
4957
4958 /* the ecc step size. */
4959 unsigned int ecc_step_size;
4960 @@ -258,6 +276,46 @@ struct mtd_info {
4961 int usecount;
4962 };
4963
4964 +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
4965 + struct mtd_oob_region *oobecc);
4966 +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
4967 + int *section,
4968 + struct mtd_oob_region *oobregion);
4969 +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
4970 + const u8 *oobbuf, int start, int nbytes);
4971 +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
4972 + u8 *oobbuf, int start, int nbytes);
4973 +int mtd_ooblayout_free(struct mtd_info *mtd, int section,
4974 + struct mtd_oob_region *oobfree);
4975 +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
4976 + const u8 *oobbuf, int start, int nbytes);
4977 +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
4978 + u8 *oobbuf, int start, int nbytes);
4979 +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
4980 +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
4981 +
4982 +static inline void mtd_set_ooblayout(struct mtd_info *mtd,
4983 + const struct mtd_ooblayout_ops *ooblayout)
4984 +{
4985 + mtd->ooblayout = ooblayout;
4986 +}
4987 +
4988 +static inline void mtd_set_of_node(struct mtd_info *mtd,
4989 + struct device_node *np)
4990 +{
4991 + mtd->dev.of_node = np;
4992 +}
4993 +
4994 +static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
4995 +{
4996 + return mtd->dev.of_node;
4997 +}
4998 +
4999 +static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
5000 +{
5001 + return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
5002 +}
5003 +
5004 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
5005 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
5006 void **virt, resource_size_t *phys);
5007 --- a/include/linux/mtd/nand.h
5008 +++ b/include/linux/mtd/nand.h
5009 @@ -119,6 +119,12 @@ typedef enum {
5010 NAND_ECC_SOFT_BCH,
5011 } nand_ecc_modes_t;
5012
5013 +enum nand_ecc_algo {
5014 + NAND_ECC_UNKNOWN,
5015 + NAND_ECC_HAMMING,
5016 + NAND_ECC_BCH,
5017 +};
5018 +
5019 /*
5020 * Constants for Hardware ECC
5021 */
5022 @@ -129,6 +135,14 @@ typedef enum {
5023 /* Enable Hardware ECC before syndrome is read back from flash */
5024 #define NAND_ECC_READSYN 2
5025
5026 +/*
5027 + * Enable generic NAND 'page erased' check. This check is only done when
5028 + * ecc.correct() returns -EBADMSG.
5029 + * Set this flag if your implementation does not fix bitflips in erased
5030 + * pages and you want to rely on the default implementation.
5031 + */
5032 +#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
5033 +
5034 /* Bit mask for flags passed to do_nand_read_ecc */
5035 #define NAND_GET_DEVICE 0x80
5036
5037 @@ -160,6 +174,12 @@ typedef enum {
5038 /* Device supports subpage reads */
5039 #define NAND_SUBPAGE_READ 0x00001000
5040
5041 +/*
5042 + * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
5043 + * patterns.
5044 + */
5045 +#define NAND_NEED_SCRAMBLING 0x00002000
5046 +
5047 /* Options valid for Samsung large page devices */
5048 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
5049
5050 @@ -276,15 +296,15 @@ struct nand_onfi_params {
5051 __le16 t_r;
5052 __le16 t_ccs;
5053 __le16 src_sync_timing_mode;
5054 - __le16 src_ssync_features;
5055 + u8 src_ssync_features;
5056 __le16 clk_pin_capacitance_typ;
5057 __le16 io_pin_capacitance_typ;
5058 __le16 input_pin_capacitance_typ;
5059 u8 input_pin_capacitance_max;
5060 u8 driver_strength_support;
5061 __le16 t_int_r;
5062 - __le16 t_ald;
5063 - u8 reserved4[7];
5064 + __le16 t_adl;
5065 + u8 reserved4[8];
5066
5067 /* vendor */
5068 __le16 vendor_revision;
5069 @@ -407,7 +427,7 @@ struct nand_jedec_params {
5070 __le16 input_pin_capacitance_typ;
5071 __le16 clk_pin_capacitance_typ;
5072 u8 driver_strength_support;
5073 - __le16 t_ald;
5074 + __le16 t_adl;
5075 u8 reserved4[36];
5076
5077 /* ECC and endurance block */
5078 @@ -444,6 +464,7 @@ struct nand_hw_control {
5079 /**
5080 * struct nand_ecc_ctrl - Control structure for ECC
5081 * @mode: ECC mode
5082 + * @algo: ECC algorithm
5083 * @steps: number of ECC steps per page
5084 * @size: data bytes per ECC step
5085 * @bytes: ECC bytes per step
5086 @@ -451,12 +472,18 @@ struct nand_hw_control {
5087 * @total: total number of ECC bytes per page
5088 * @prepad: padding information for syndrome based ECC generators
5089 * @postpad: padding information for syndrome based ECC generators
5090 - * @layout: ECC layout control struct pointer
5091 + * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
5092 * @priv: pointer to private ECC control data
5093 * @hwctl: function to control hardware ECC generator. Must only
5094 * be provided if an hardware ECC is available
5095 * @calculate: function for ECC calculation or readback from ECC hardware
5096 - * @correct: function for ECC correction, matching to ECC generator (sw/hw)
5097 + * @correct: function for ECC correction, matching to ECC generator (sw/hw).
5098 + * Should return a positive number representing the number of
5099 + * corrected bitflips, -EBADMSG if the number of bitflips exceed
5100 + * ECC strength, or any other error code if the error is not
5101 + * directly related to correction.
5102 + * If -EBADMSG is returned the input buffers should be left
5103 + * untouched.
5104 * @read_page_raw: function to read a raw page without ECC. This function
5105 * should hide the specific layout used by the ECC
5106 * controller and always return contiguous in-band and
5107 @@ -487,6 +514,7 @@ struct nand_hw_control {
5108 */
5109 struct nand_ecc_ctrl {
5110 nand_ecc_modes_t mode;
5111 + enum nand_ecc_algo algo;
5112 int steps;
5113 int size;
5114 int bytes;
5115 @@ -494,7 +522,7 @@ struct nand_ecc_ctrl {
5116 int strength;
5117 int prepad;
5118 int postpad;
5119 - struct nand_ecclayout *layout;
5120 + unsigned int options;
5121 void *priv;
5122 void (*hwctl)(struct mtd_info *mtd, int mode);
5123 int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
5124 @@ -540,11 +568,11 @@ struct nand_buffers {
5125
5126 /**
5127 * struct nand_chip - NAND Private Flash Chip Data
5128 + * @mtd: MTD device registered to the MTD framework
5129 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
5130 * flash device
5131 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
5132 * flash device.
5133 - * @flash_node: [BOARDSPECIFIC] device node describing this instance
5134 * @read_byte: [REPLACEABLE] read one byte from the chip
5135 * @read_word: [REPLACEABLE] read one word from the chip
5136 * @write_byte: [REPLACEABLE] write a single byte to the chip on the
5137 @@ -640,18 +668,17 @@ struct nand_buffers {
5138 */
5139
5140 struct nand_chip {
5141 + struct mtd_info mtd;
5142 void __iomem *IO_ADDR_R;
5143 void __iomem *IO_ADDR_W;
5144
5145 - struct device_node *flash_node;
5146 -
5147 uint8_t (*read_byte)(struct mtd_info *mtd);
5148 u16 (*read_word)(struct mtd_info *mtd);
5149 void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
5150 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
5151 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
5152 void (*select_chip)(struct mtd_info *mtd, int chip);
5153 - int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
5154 + int (*block_bad)(struct mtd_info *mtd, loff_t ofs);
5155 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
5156 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
5157 int (*dev_ready)(struct mtd_info *mtd);
5158 @@ -719,6 +746,40 @@ struct nand_chip {
5159 void *priv;
5160 };
5161
5162 +extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
5163 +extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
5164 +
5165 +static inline void nand_set_flash_node(struct nand_chip *chip,
5166 + struct device_node *np)
5167 +{
5168 + mtd_set_of_node(&chip->mtd, np);
5169 +}
5170 +
5171 +static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
5172 +{
5173 + return mtd_get_of_node(&chip->mtd);
5174 +}
5175 +
5176 +static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
5177 +{
5178 + return container_of(mtd, struct nand_chip, mtd);
5179 +}
5180 +
5181 +static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
5182 +{
5183 + return &chip->mtd;
5184 +}
5185 +
5186 +static inline void *nand_get_controller_data(struct nand_chip *chip)
5187 +{
5188 + return chip->priv;
5189 +}
5190 +
5191 +static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
5192 +{
5193 + chip->priv = priv;
5194 +}
5195 +
5196 /*
5197 * NAND Flash Manufacturer ID Codes
5198 */
5199 @@ -850,7 +911,6 @@ extern int nand_do_read(struct mtd_info
5200 * @chip_delay: R/B delay value in us
5201 * @options: Option flags, e.g. 16bit buswidth
5202 * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
5203 - * @ecclayout: ECC layout info structure
5204 * @part_probe_types: NULL-terminated array of probe types
5205 */
5206 struct platform_nand_chip {
5207 @@ -858,7 +918,6 @@ struct platform_nand_chip {
5208 int chip_offset;
5209 int nr_partitions;
5210 struct mtd_partition *partitions;
5211 - struct nand_ecclayout *ecclayout;
5212 int chip_delay;
5213 unsigned int options;
5214 unsigned int bbt_options;
5215 @@ -908,15 +967,6 @@ struct platform_nand_data {
5216 struct platform_nand_ctrl ctrl;
5217 };
5218
5219 -/* Some helpers to access the data structures */
5220 -static inline
5221 -struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
5222 -{
5223 - struct nand_chip *chip = mtd->priv;
5224 -
5225 - return chip->priv;
5226 -}
5227 -
5228 /* return the supported features. */
5229 static inline int onfi_feature(struct nand_chip *chip)
5230 {
5231 --- a/include/linux/mtd/nand_bch.h
5232 +++ b/include/linux/mtd/nand_bch.h
5233 @@ -32,9 +32,7 @@ int nand_bch_correct_data(struct mtd_inf
5234 /*
5235 * Initialize BCH encoder/decoder
5236 */
5237 -struct nand_bch_control *
5238 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
5239 - unsigned int eccbytes, struct nand_ecclayout **ecclayout);
5240 +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
5241 /*
5242 * Release BCH encoder/decoder resources
5243 */
5244 @@ -55,12 +53,10 @@ static inline int
5245 nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
5246 unsigned char *read_ecc, unsigned char *calc_ecc)
5247 {
5248 - return -1;
5249 + return -ENOTSUPP;
5250 }
5251
5252 -static inline struct nand_bch_control *
5253 -nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
5254 - unsigned int eccbytes, struct nand_ecclayout **ecclayout)
5255 +static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
5256 {
5257 return NULL;
5258 }
5259 --- a/include/linux/mtd/nftl.h
5260 +++ b/include/linux/mtd/nftl.h
5261 @@ -50,7 +50,6 @@ struct NFTLrecord {
5262 unsigned int nb_blocks; /* number of physical blocks */
5263 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
5264 struct erase_info instr;
5265 - struct nand_ecclayout oobinfo;
5266 };
5267
5268 int NFTL_mount(struct NFTLrecord *s);
5269 --- a/include/linux/mtd/onenand.h
5270 +++ b/include/linux/mtd/onenand.h
5271 @@ -80,7 +80,6 @@ struct onenand_bufferram {
5272 * @page_buf: [INTERN] page main data buffer
5273 * @oob_buf: [INTERN] page oob data buffer
5274 * @subpagesize: [INTERN] holds the subpagesize
5275 - * @ecclayout: [REPLACEABLE] the default ecc placement scheme
5276 * @bbm: [REPLACEABLE] pointer to Bad Block Management
5277 * @priv: [OPTIONAL] pointer to private chip date
5278 */
5279 @@ -134,7 +133,6 @@ struct onenand_chip {
5280 #endif
5281
5282 int subpagesize;
5283 - struct nand_ecclayout *ecclayout;
5284
5285 void *bbm;
5286
5287 --- a/include/linux/mtd/partitions.h
5288 +++ b/include/linux/mtd/partitions.h
5289 @@ -42,7 +42,6 @@ struct mtd_partition {
5290 uint64_t size; /* partition size */
5291 uint64_t offset; /* offset within the master MTD space */
5292 uint32_t mask_flags; /* master MTD flags to mask out for this partition */
5293 - struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
5294 };
5295
5296 #define MTDPART_OFS_RETAIN (-3)
5297 @@ -56,11 +55,9 @@ struct device_node;
5298 /**
5299 * struct mtd_part_parser_data - used to pass data to MTD partition parsers.
5300 * @origin: for RedBoot, start address of MTD device
5301 - * @of_node: for OF parsers, device node containing partitioning information
5302 */
5303 struct mtd_part_parser_data {
5304 unsigned long origin;
5305 - struct device_node *of_node;
5306 };
5307
5308
5309 @@ -78,14 +75,34 @@ struct mtd_part_parser {
5310 struct list_head list;
5311 struct module *owner;
5312 const char *name;
5313 - int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
5314 + int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
5315 struct mtd_part_parser_data *);
5316 + void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
5317 enum mtd_parser_type type;
5318 };
5319
5320 -extern void register_mtd_parser(struct mtd_part_parser *parser);
5321 +/* Container for passing around a set of parsed partitions */
5322 +struct mtd_partitions {
5323 + const struct mtd_partition *parts;
5324 + int nr_parts;
5325 + const struct mtd_part_parser *parser;
5326 +};
5327 +
5328 +extern int __register_mtd_parser(struct mtd_part_parser *parser,
5329 + struct module *owner);
5330 +#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE)
5331 +
5332 extern void deregister_mtd_parser(struct mtd_part_parser *parser);
5333
5334 +/*
5335 + * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't
5336 + * do anything special in module init/exit. Each driver may only use this macro
5337 + * once, and calling it replaces module_init() and module_exit().
5338 + */
5339 +#define module_mtd_part_parser(__mtd_part_parser) \
5340 + module_driver(__mtd_part_parser, register_mtd_parser, \
5341 + deregister_mtd_parser)
5342 +
5343 int mtd_is_partition(const struct mtd_info *mtd);
5344 int mtd_add_partition(struct mtd_info *master, const char *name,
5345 long long offset, long long length);
5346 --- a/include/linux/mtd/sh_flctl.h
5347 +++ b/include/linux/mtd/sh_flctl.h
5348 @@ -143,11 +143,11 @@ enum flctl_ecc_res_t {
5349 struct dma_chan;
5350
5351 struct sh_flctl {
5352 - struct mtd_info mtd;
5353 struct nand_chip chip;
5354 struct platform_device *pdev;
5355 struct dev_pm_qos_request pm_qos;
5356 void __iomem *reg;
5357 + resource_size_t fifo;
5358
5359 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
5360 int read_bytes;
5361 @@ -186,7 +186,7 @@ struct sh_flctl_platform_data {
5362
5363 static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
5364 {
5365 - return container_of(mtdinfo, struct sh_flctl, mtd);
5366 + return container_of(mtd_to_nand(mtdinfo), struct sh_flctl, chip);
5367 }
5368
5369 #endif /* __SH_FLCTL_H__ */
5370 --- a/include/linux/mtd/sharpsl.h
5371 +++ b/include/linux/mtd/sharpsl.h
5372 @@ -14,7 +14,7 @@
5373
5374 struct sharpsl_nand_platform_data {
5375 struct nand_bbt_descr *badblock_pattern;
5376 - struct nand_ecclayout *ecc_layout;
5377 + const struct mtd_ooblayout_ops *ecc_layout;
5378 struct mtd_partition *partitions;
5379 unsigned int nr_partitions;
5380 };
5381 --- a/include/uapi/mtd/mtd-abi.h
5382 +++ b/include/uapi/mtd/mtd-abi.h
5383 @@ -228,7 +228,7 @@ struct nand_oobfree {
5384 * complete set of ECC information. The ioctl truncates the larger internal
5385 * structure to retain binary compatibility with the static declaration of the
5386 * ioctl. Note that the "MTD_MAX_..._ENTRIES" macros represent the max size of
5387 - * the user struct, not the MAX size of the internal struct nand_ecclayout.
5388 + * the user struct, not the MAX size of the internal OOB layout representation.
5389 */
5390 struct nand_ecclayout_user {
5391 __u32 eccbytes;
5392 --- a/fs/jffs2/wbuf.c
5393 +++ b/fs/jffs2/wbuf.c
5394 @@ -1153,7 +1153,7 @@ static struct jffs2_sb_info *work_to_sb(
5395 {
5396 struct delayed_work *dwork;
5397
5398 - dwork = container_of(work, struct delayed_work, work);
5399 + dwork = to_delayed_work(work);
5400 return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
5401 }
5402
5403 @@ -1183,22 +1183,20 @@ void jffs2_dirty_trigger(struct jffs2_sb
5404
5405 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
5406 {
5407 - struct nand_ecclayout *oinfo = c->mtd->ecclayout;
5408 -
5409 if (!c->mtd->oobsize)
5410 return 0;
5411
5412 /* Cleanmarker is out-of-band, so inline size zero */
5413 c->cleanmarker_size = 0;
5414
5415 - if (!oinfo || oinfo->oobavail == 0) {
5416 + if (c->mtd->oobavail == 0) {
5417 pr_err("inconsistent device description\n");
5418 return -EINVAL;
5419 }
5420
5421 jffs2_dbg(1, "using OOB on NAND\n");
5422
5423 - c->oobavail = oinfo->oobavail;
5424 + c->oobavail = c->mtd->oobavail;
5425
5426 /* Initialise write buffer */
5427 init_rwsem(&c->wbuf_sem);
5428 --- a/include/linux/mtd/spi-nor.h
5429 +++ b/include/linux/mtd/spi-nor.h
5430 @@ -85,6 +85,7 @@
5431 #define SR_BP0 BIT(2) /* Block protect 0 */
5432 #define SR_BP1 BIT(3) /* Block protect 1 */
5433 #define SR_BP2 BIT(4) /* Block protect 2 */
5434 +#define SR_TB BIT(5) /* Top/Bottom protect */
5435 #define SR_SRWD BIT(7) /* SR write protect */
5436
5437 #define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
5438 @@ -116,6 +117,7 @@ enum spi_nor_ops {
5439
5440 enum spi_nor_option_flags {
5441 SNOR_F_USE_FSR = BIT(0),
5442 + SNOR_F_HAS_SR_TB = BIT(1),
5443 };
5444
5445 /**
5446 @@ -123,7 +125,6 @@ enum spi_nor_option_flags {
5447 * @mtd: point to a mtd_info structure
5448 * @lock: the lock for the read/write/erase/lock/unlock operations
5449 * @dev: point to a spi device, or a spi nor controller device.
5450 - * @flash_node: point to a device node describing this flash instance.
5451 * @page_size: the page size of the SPI NOR
5452 * @addr_width: number of address bytes
5453 * @erase_opcode: the opcode for erasing a sector
5454 @@ -143,7 +144,8 @@ enum spi_nor_option_flags {
5455 * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
5456 * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
5457 * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
5458 - * at the offset @offs
5459 + * at the offset @offs; if not provided by the driver,
5460 + * spi-nor will send the erase opcode via write_reg()
5461 * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
5462 * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
5463 * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is
5464 @@ -154,7 +156,6 @@ struct spi_nor {
5465 struct mtd_info mtd;
5466 struct mutex lock;
5467 struct device *dev;
5468 - struct device_node *flash_node;
5469 u32 page_size;
5470 u8 addr_width;
5471 u8 erase_opcode;
5472 @@ -184,6 +185,17 @@ struct spi_nor {
5473 void *priv;
5474 };
5475
5476 +static inline void spi_nor_set_flash_node(struct spi_nor *nor,
5477 + struct device_node *np)
5478 +{
5479 + mtd_set_of_node(&nor->mtd, np);
5480 +}
5481 +
5482 +static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
5483 +{
5484 + return mtd_get_of_node(&nor->mtd);
5485 +}
5486 +
5487 /**
5488 * spi_nor_scan() - scan the SPI NOR
5489 * @nor: the spi_nor structure