[kernel] generic-2.6: revert yaffs changes [11378], the new code is not working corre...
[openwrt/svn-archive/archive.git] / target / linux / generic-2.6 / patches-2.6.23 / 065-rootfs_split.patch
1 --- a/drivers/mtd/Kconfig
2 +++ b/drivers/mtd/Kconfig
3 @@ -47,6 +47,16 @@
4 devices. Partitioning on NFTL 'devices' is a different - that's the
5 'normal' form of partitioning used on a block device.
6
7 +config MTD_ROOTFS_ROOT_DEV
8 + bool "Automatically set 'rootfs' partition to be root filesystem"
9 + depends on MTD_PARTITIONS
10 + default y
11 +
12 +config MTD_ROOTFS_SPLIT
13 + bool "Automatically split 'rootfs' partition for squashfs"
14 + depends on MTD_PARTITIONS
15 + default y
16 +
17 config MTD_REDBOOT_PARTS
18 tristate "RedBoot partition table parsing"
19 depends on MTD_PARTITIONS
20 --- a/drivers/mtd/mtdpart.c
21 +++ b/drivers/mtd/mtdpart.c
22 @@ -20,6 +20,8 @@
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/partitions.h>
25 #include <linux/mtd/compatmac.h>
26 +#include <linux/squashfs_fs.h>
27 +#include <linux/root_dev.h>
28
29 /* Our partition linked list */
30 static LIST_HEAD(mtd_partitions);
31 @@ -39,7 +41,7 @@
32 * the pointer to that structure with this macro.
33 */
34 #define PART(x) ((struct mtd_part *)(x))
35 -
36 +#define IS_PART(mtd) (mtd->read == part_read)
37
38 /*
39 * MTD methods which simply translate the effective address and pass through
40 @@ -308,6 +310,312 @@
41 return 0;
42 }
43
44 +static u_int32_t cur_offset = 0;
45 +static int add_one_partition(struct mtd_info *master, const struct mtd_partition *part,
46 + int i, struct mtd_part **slp)
47 +{
48 + struct mtd_part *slave;
49 +
50 + /* allocate the partition structure */
51 + slave = kzalloc (sizeof(*slave), GFP_KERNEL);
52 + if (!slave) {
53 + printk ("memory allocation error while creating partitions for \"%s\"\n",
54 + master->name);
55 + del_mtd_partitions(master);
56 + return -ENOMEM;
57 + }
58 + list_add(&slave->list, &mtd_partitions);
59 +
60 + /* set up the MTD object for this partition */
61 + slave->mtd.type = master->type;
62 + slave->mtd.flags = master->flags & ~part->mask_flags;
63 + slave->mtd.size = part->size;
64 + slave->mtd.writesize = master->writesize;
65 + slave->mtd.oobsize = master->oobsize;
66 + slave->mtd.oobavail = master->oobavail;
67 + slave->mtd.subpage_sft = master->subpage_sft;
68 +
69 + slave->mtd.name = part->name;
70 + slave->mtd.owner = master->owner;
71 +
72 + slave->mtd.read = part_read;
73 + slave->mtd.write = part_write;
74 + slave->mtd.refresh_device = part->refresh_partition;
75 +
76 + if(master->point && master->unpoint){
77 + slave->mtd.point = part_point;
78 + slave->mtd.unpoint = part_unpoint;
79 + }
80 +
81 + if (master->read_oob)
82 + slave->mtd.read_oob = part_read_oob;
83 + if (master->write_oob)
84 + slave->mtd.write_oob = part_write_oob;
85 + if(master->read_user_prot_reg)
86 + slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
87 + if(master->read_fact_prot_reg)
88 + slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
89 + if(master->write_user_prot_reg)
90 + slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
91 + if(master->lock_user_prot_reg)
92 + slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
93 + if(master->get_user_prot_info)
94 + slave->mtd.get_user_prot_info = part_get_user_prot_info;
95 + if(master->get_fact_prot_info)
96 + slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
97 + if (master->sync)
98 + slave->mtd.sync = part_sync;
99 + if (!i && master->suspend && master->resume) {
100 + slave->mtd.suspend = part_suspend;
101 + slave->mtd.resume = part_resume;
102 + }
103 + if (master->writev)
104 + slave->mtd.writev = part_writev;
105 + if (master->lock)
106 + slave->mtd.lock = part_lock;
107 + if (master->unlock)
108 + slave->mtd.unlock = part_unlock;
109 + if (master->block_isbad)
110 + slave->mtd.block_isbad = part_block_isbad;
111 + if (master->block_markbad)
112 + slave->mtd.block_markbad = part_block_markbad;
113 + slave->mtd.erase = part_erase;
114 + slave->master = master;
115 + slave->offset = part->offset;
116 + slave->index = i;
117 +
118 + if (slave->offset == MTDPART_OFS_APPEND)
119 + slave->offset = cur_offset;
120 + if (slave->offset == MTDPART_OFS_NXTBLK) {
121 + slave->offset = cur_offset;
122 + if ((cur_offset % master->erasesize) != 0) {
123 + /* Round up to next erasesize */
124 + slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
125 + printk(KERN_NOTICE "Moving partition %d: "
126 + "0x%08x -> 0x%08x\n", i,
127 + cur_offset, slave->offset);
128 + }
129 + }
130 + if (slave->mtd.size == MTDPART_SIZ_FULL)
131 + slave->mtd.size = master->size - slave->offset;
132 + cur_offset = slave->offset + slave->mtd.size;
133 +
134 + printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
135 + slave->offset + slave->mtd.size, slave->mtd.name);
136 +
137 + /* let's do some sanity checks */
138 + if (slave->offset >= master->size) {
139 + /* let's register it anyway to preserve ordering */
140 + slave->offset = 0;
141 + slave->mtd.size = 0;
142 + printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
143 + part->name);
144 + }
145 + if (slave->offset + slave->mtd.size > master->size) {
146 + slave->mtd.size = master->size - slave->offset;
147 + printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
148 + part->name, master->name, slave->mtd.size);
149 + }
150 + if (master->numeraseregions>1) {
151 + /* Deal with variable erase size stuff */
152 + int i;
153 + struct mtd_erase_region_info *regions = master->eraseregions;
154 +
155 + /* Find the first erase regions which is part of this partition. */
156 + for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
157 + ;
158 +
159 + for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
160 + if (slave->mtd.erasesize < regions[i].erasesize) {
161 + slave->mtd.erasesize = regions[i].erasesize;
162 + }
163 + }
164 + } else {
165 + /* Single erase size */
166 + slave->mtd.erasesize = master->erasesize;
167 + }
168 +
169 + if ((slave->mtd.flags & MTD_WRITEABLE) &&
170 + (slave->offset % slave->mtd.erasesize)) {
171 + /* Doesn't start on a boundary of major erase size */
172 + /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
173 + slave->mtd.flags &= ~MTD_WRITEABLE;
174 + printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
175 + part->name);
176 + }
177 + if ((slave->mtd.flags & MTD_WRITEABLE) &&
178 + (slave->mtd.size % slave->mtd.erasesize)) {
179 + slave->mtd.flags &= ~MTD_WRITEABLE;
180 + printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
181 + part->name);
182 + }
183 +
184 + slave->mtd.ecclayout = master->ecclayout;
185 + if (master->block_isbad) {
186 + uint32_t offs = 0;
187 +
188 + while(offs < slave->mtd.size) {
189 + if (master->block_isbad(master,
190 + offs + slave->offset))
191 + slave->mtd.ecc_stats.badblocks++;
192 + offs += slave->mtd.erasesize;
193 + }
194 + }
195 +
196 + if(part->mtdp)
197 + { /* store the object pointer (caller may or may not register it */
198 + *part->mtdp = &slave->mtd;
199 + slave->registered = 0;
200 + }
201 + else
202 + {
203 + /* register our partition */
204 + add_mtd_device(&slave->mtd);
205 + slave->registered = 1;
206 + }
207 +
208 + if (slp)
209 + *slp = slave;
210 +
211 + return 0;
212 +}
213 +
214 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
215 +#define ROOTFS_SPLIT_NAME "rootfs_data"
216 +#define ROOTFS_REMOVED_NAME "<removed>"
217 +static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
218 +{
219 + char buf[512];
220 + struct squashfs_super_block *sb = (struct squashfs_super_block *) buf;
221 + int len, ret;
222 +
223 + ret = master->read(master, offset, sizeof(*sb), &len, buf);
224 + if (ret || (len != sizeof(*sb))) {
225 + printk(KERN_ALERT "split_squashfs: error occured while reading "
226 + "from \"%s\"\n", master->name);
227 + return -EINVAL;
228 + }
229 +
230 + if (*((u32 *) buf) != SQUASHFS_MAGIC) {
231 + printk(KERN_ALERT "split_squashfs: no squashfs found in \"%s\"\n",
232 + master->name);
233 + *split_offset = 0;
234 + return 0;
235 + }
236 +
237 + if (sb->bytes_used <= 0) {
238 + printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
239 + master->name);
240 + *split_offset = 0;
241 + return 0;
242 + }
243 +
244 + len = (u32) sb->bytes_used;
245 + len += (offset & 0x000fffff);
246 + len += (master->erasesize - 1);
247 + len &= ~(master->erasesize - 1);
248 + len -= (offset & 0x000fffff);
249 + *split_offset = offset + len;
250 +
251 + return 0;
252 +}
253 +
254 +static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, struct mtd_partition *part,
255 + int index)
256 +{
257 + struct mtd_partition *dpart;
258 + struct mtd_part *slave = NULL;
259 + int split_offset = 0;
260 + int ret;
261 +
262 + ret = split_squashfs(master, part->offset, &split_offset);
263 + if (ret)
264 + return ret;
265 +
266 + if (split_offset <= 0)
267 + return 0;
268 +
269 + dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
270 + if (dpart == NULL) {
271 + printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
272 + ROOTFS_SPLIT_NAME);
273 + return -ENOMEM;
274 + }
275 +
276 + memcpy(dpart, part, sizeof(*part));
277 + dpart->name = (unsigned char *)&dpart[1];
278 + strcpy(dpart->name, ROOTFS_SPLIT_NAME);
279 +
280 + dpart->size -= split_offset - dpart->offset;
281 + dpart->offset = split_offset;
282 +
283 + if (dpart == NULL)
284 + return 1;
285 +
286 + printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%X, len=%X \n",
287 + ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
288 +
289 + ret = add_one_partition(master, dpart, index, &slave);
290 + if (ret)
291 + kfree(dpart);
292 + else if (slave)
293 + rpart->split = &slave->mtd;
294 +
295 + return ret;
296 +}
297 +
298 +static int refresh_rootfs_split(struct mtd_info *mtd)
299 +{
300 + struct mtd_partition tpart;
301 + struct mtd_part *part;
302 + int index = 0;
303 + int offset, size;
304 + int ret;
305 +
306 + part = PART(mtd);
307 +
308 + /* check for the new squashfs offset first */
309 + ret = split_squashfs(part->master, part->offset, &offset);
310 + if (ret)
311 + return ret;
312 +
313 + if ((offset > 0) && !mtd->split) {
314 + printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
315 + /* if we don't have a rootfs split partition, create a new one */
316 + tpart.name = mtd->name;
317 + tpart.size = mtd->size;
318 + tpart.offset = part->offset;
319 +
320 + /* find the index of the last partition */
321 + if (!list_empty(&mtd_partitions))
322 + index = list_first_entry(&mtd_partitions, struct mtd_part, list)->index + 1;
323 +
324 + return split_rootfs_data(part->master, &part->mtd, &tpart, index);
325 + } else if ((offset > 0) && mtd->split) {
326 + /* update the offsets of the existing partition */
327 + size = mtd->size + part->offset - offset;
328 +
329 + part = PART(mtd->split);
330 + part->offset = offset;
331 + part->mtd.size = size;
332 + printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
333 + __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
334 + part->offset, part->mtd.size);
335 + strcpy(part->mtd.name, ROOTFS_SPLIT_NAME);
336 + } else if ((offset <= 0) && mtd->split) {
337 + printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
338 +
339 + /* mark existing partition as removed */
340 + part = PART(mtd->split);
341 + strcpy(part->mtd.name, ROOTFS_REMOVED_NAME);
342 + part->offset = 0;
343 + part->mtd.size = 0;
344 + }
345 +
346 + return 0;
347 +}
348 +#endif /* CONFIG_MTD_ROOTFS_SPLIT */
349 +
350 /*
351 * This function, given a master MTD object and a partition table, creates
352 * and registers slave MTD objects which are bound to the master according to
353 @@ -320,168 +628,31 @@
354 int nbparts)
355 {
356 struct mtd_part *slave;
357 - u_int32_t cur_offset = 0;
358 - int i;
359 + struct mtd_partition *part;
360 + int i, j, ret = 0;
361
362 printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
363
364 - for (i = 0; i < nbparts; i++) {
365 -
366 - /* allocate the partition structure */
367 - slave = kzalloc (sizeof(*slave), GFP_KERNEL);
368 - if (!slave) {
369 - printk ("memory allocation error while creating partitions for \"%s\"\n",
370 - master->name);
371 - del_mtd_partitions(master);
372 - return -ENOMEM;
373 - }
374 - list_add(&slave->list, &mtd_partitions);
375 -
376 - /* set up the MTD object for this partition */
377 - slave->mtd.type = master->type;
378 - slave->mtd.flags = master->flags & ~parts[i].mask_flags;
379 - slave->mtd.size = parts[i].size;
380 - slave->mtd.writesize = master->writesize;
381 - slave->mtd.oobsize = master->oobsize;
382 - slave->mtd.oobavail = master->oobavail;
383 - slave->mtd.subpage_sft = master->subpage_sft;
384 -
385 - slave->mtd.name = parts[i].name;
386 - slave->mtd.owner = master->owner;
387 -
388 - slave->mtd.read = part_read;
389 - slave->mtd.write = part_write;
390 -
391 - if(master->point && master->unpoint){
392 - slave->mtd.point = part_point;
393 - slave->mtd.unpoint = part_unpoint;
394 - }
395 -
396 - if (master->read_oob)
397 - slave->mtd.read_oob = part_read_oob;
398 - if (master->write_oob)
399 - slave->mtd.write_oob = part_write_oob;
400 - if(master->read_user_prot_reg)
401 - slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
402 - if(master->read_fact_prot_reg)
403 - slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
404 - if(master->write_user_prot_reg)
405 - slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
406 - if(master->lock_user_prot_reg)
407 - slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
408 - if(master->get_user_prot_info)
409 - slave->mtd.get_user_prot_info = part_get_user_prot_info;
410 - if(master->get_fact_prot_info)
411 - slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
412 - if (master->sync)
413 - slave->mtd.sync = part_sync;
414 - if (!i && master->suspend && master->resume) {
415 - slave->mtd.suspend = part_suspend;
416 - slave->mtd.resume = part_resume;
417 - }
418 - if (master->writev)
419 - slave->mtd.writev = part_writev;
420 - if (master->lock)
421 - slave->mtd.lock = part_lock;
422 - if (master->unlock)
423 - slave->mtd.unlock = part_unlock;
424 - if (master->block_isbad)
425 - slave->mtd.block_isbad = part_block_isbad;
426 - if (master->block_markbad)
427 - slave->mtd.block_markbad = part_block_markbad;
428 - slave->mtd.erase = part_erase;
429 - slave->master = master;
430 - slave->offset = parts[i].offset;
431 - slave->index = i;
432 -
433 - if (slave->offset == MTDPART_OFS_APPEND)
434 - slave->offset = cur_offset;
435 - if (slave->offset == MTDPART_OFS_NXTBLK) {
436 - slave->offset = cur_offset;
437 - if ((cur_offset % master->erasesize) != 0) {
438 - /* Round up to next erasesize */
439 - slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
440 - printk(KERN_NOTICE "Moving partition %d: "
441 - "0x%08x -> 0x%08x\n", i,
442 - cur_offset, slave->offset);
443 - }
444 - }
445 - if (slave->mtd.size == MTDPART_SIZ_FULL)
446 - slave->mtd.size = master->size - slave->offset;
447 - cur_offset = slave->offset + slave->mtd.size;
448 -
449 - printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
450 - slave->offset + slave->mtd.size, slave->mtd.name);
451 -
452 - /* let's do some sanity checks */
453 - if (slave->offset >= master->size) {
454 - /* let's register it anyway to preserve ordering */
455 - slave->offset = 0;
456 - slave->mtd.size = 0;
457 - printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
458 - parts[i].name);
459 - }
460 - if (slave->offset + slave->mtd.size > master->size) {
461 - slave->mtd.size = master->size - slave->offset;
462 - printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
463 - parts[i].name, master->name, slave->mtd.size);
464 - }
465 - if (master->numeraseregions>1) {
466 - /* Deal with variable erase size stuff */
467 - int i;
468 - struct mtd_erase_region_info *regions = master->eraseregions;
469 -
470 - /* Find the first erase regions which is part of this partition. */
471 - for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
472 - ;
473 -
474 - for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
475 - if (slave->mtd.erasesize < regions[i].erasesize) {
476 - slave->mtd.erasesize = regions[i].erasesize;
477 - }
478 + for (i = 0, j = 0; i < nbparts; i++) {
479 + part = (struct mtd_partition *) &parts[i];
480 + ret = add_one_partition(master, part, j, &slave);
481 + if (ret)
482 + return ret;
483 + j++;
484 +
485 + if (strcmp(part->name, "rootfs") == 0 && slave->registered) {
486 +#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
487 + if (ROOT_DEV == 0) {
488 + printk(KERN_NOTICE "mtd: partition \"rootfs\" "
489 + "set to be root filesystem\n");
490 + ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
491 }
492 - } else {
493 - /* Single erase size */
494 - slave->mtd.erasesize = master->erasesize;
495 - }
496 -
497 - if ((slave->mtd.flags & MTD_WRITEABLE) &&
498 - (slave->offset % slave->mtd.erasesize)) {
499 - /* Doesn't start on a boundary of major erase size */
500 - /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
501 - slave->mtd.flags &= ~MTD_WRITEABLE;
502 - printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
503 - parts[i].name);
504 - }
505 - if ((slave->mtd.flags & MTD_WRITEABLE) &&
506 - (slave->mtd.size % slave->mtd.erasesize)) {
507 - slave->mtd.flags &= ~MTD_WRITEABLE;
508 - printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
509 - parts[i].name);
510 - }
511 -
512 - slave->mtd.ecclayout = master->ecclayout;
513 - if (master->block_isbad) {
514 - uint32_t offs = 0;
515 -
516 - while(offs < slave->mtd.size) {
517 - if (master->block_isbad(master,
518 - offs + slave->offset))
519 - slave->mtd.ecc_stats.badblocks++;
520 - offs += slave->mtd.erasesize;
521 - }
522 - }
523 -
524 - if(parts[i].mtdp)
525 - { /* store the object pointer (caller may or may not register it */
526 - *parts[i].mtdp = &slave->mtd;
527 - slave->registered = 0;
528 - }
529 - else
530 - {
531 - /* register our partition */
532 - add_mtd_device(&slave->mtd);
533 - slave->registered = 1;
534 +#endif
535 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
536 + ret = split_rootfs_data(master, &slave->mtd, part, j);
537 + if (ret == 0)
538 + j++;
539 +#endif
540 }
541 }
542
543 @@ -557,6 +728,32 @@
544 return ret;
545 }
546
547 +int refresh_mtd_partitions(struct mtd_info *mtd)
548 +{
549 + int ret = 0;
550 +
551 + if (IS_PART(mtd)) {
552 + struct mtd_part *part;
553 + struct mtd_info *master;
554 +
555 + part = PART(mtd);
556 + master = part->master;
557 + if (master->refresh_device)
558 + ret = master->refresh_device(master);
559 + }
560 +
561 + if (!ret && mtd->refresh_device)
562 + ret = mtd->refresh_device(mtd);
563 +
564 +#ifdef CONFIG_MTD_ROOTFS_SPLIT
565 + if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
566 + refresh_rootfs_split(mtd);
567 +#endif
568 +
569 + return 0;
570 +}
571 +
572 EXPORT_SYMBOL_GPL(parse_mtd_partitions);
573 +EXPORT_SYMBOL_GPL(refresh_mtd_partitions);
574 EXPORT_SYMBOL_GPL(register_mtd_parser);
575 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
576 --- a/drivers/mtd/devices/block2mtd.c
577 +++ b/drivers/mtd/devices/block2mtd.c
578 @@ -34,6 +34,8 @@
579 struct block_device *blkdev;
580 struct mtd_info mtd;
581 struct mutex write_mutex;
582 + rwlock_t bdev_mutex;
583 + char devname[0];
584 };
585
586
587 @@ -86,6 +88,12 @@
588 size_t len = instr->len;
589 int err;
590
591 + read_lock(&dev->bdev_mutex);
592 + if (!dev->blkdev) {
593 + err = -EINVAL;
594 + goto done;
595 + }
596 +
597 instr->state = MTD_ERASING;
598 mutex_lock(&dev->write_mutex);
599 err = _block2mtd_erase(dev, from, len);
600 @@ -98,6 +106,10 @@
601
602 instr->state = MTD_ERASE_DONE;
603 mtd_erase_callback(instr);
604 +
605 +done:
606 + read_unlock(&dev->bdev_mutex);
607 +
608 return err;
609 }
610
611 @@ -109,10 +121,14 @@
612 struct page *page;
613 int index = from >> PAGE_SHIFT;
614 int offset = from & (PAGE_SIZE-1);
615 - int cpylen;
616 + int cpylen, err = 0;
617 +
618 + read_lock(&dev->bdev_mutex);
619 + if (!dev->blkdev || (from > mtd->size)) {
620 + err = -EINVAL;
621 + goto done;
622 + }
623
624 - if (from > mtd->size)
625 - return -EINVAL;
626 if (from + len > mtd->size)
627 len = mtd->size - from;
628
629 @@ -127,10 +143,14 @@
630 len = len - cpylen;
631
632 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
633 - if (!page)
634 - return -ENOMEM;
635 - if (IS_ERR(page))
636 - return PTR_ERR(page);
637 + if (!page) {
638 + err = -ENOMEM;
639 + goto done;
640 + }
641 + if (IS_ERR(page)) {
642 + err = PTR_ERR(page);
643 + goto done;
644 + }
645
646 memcpy(buf, page_address(page) + offset, cpylen);
647 page_cache_release(page);
648 @@ -141,7 +161,10 @@
649 offset = 0;
650 index++;
651 }
652 - return 0;
653 +
654 +done:
655 + read_unlock(&dev->bdev_mutex);
656 + return err;
657 }
658
659
660 @@ -193,12 +216,22 @@
661 size_t *retlen, const u_char *buf)
662 {
663 struct block2mtd_dev *dev = mtd->priv;
664 - int err;
665 + int err = 0;
666 +
667 + read_lock(&dev->bdev_mutex);
668 + if (!dev->blkdev) {
669 + err = -EINVAL;
670 + goto done;
671 + }
672
673 if (!len)
674 - return 0;
675 - if (to >= mtd->size)
676 - return -ENOSPC;
677 + goto done;
678 +
679 + if (to >= mtd->size) {
680 + err = -ENOSPC;
681 + goto done;
682 + }
683 +
684 if (to + len > mtd->size)
685 len = mtd->size - to;
686
687 @@ -207,6 +240,9 @@
688 mutex_unlock(&dev->write_mutex);
689 if (err > 0)
690 err = 0;
691 +
692 +done:
693 + read_unlock(&dev->bdev_mutex);
694 return err;
695 }
696
697 @@ -215,51 +251,29 @@
698 static void block2mtd_sync(struct mtd_info *mtd)
699 {
700 struct block2mtd_dev *dev = mtd->priv;
701 - sync_blockdev(dev->blkdev);
702 - return;
703 -}
704 -
705 -
706 -static void block2mtd_free_device(struct block2mtd_dev *dev)
707 -{
708 - if (!dev)
709 - return;
710 -
711 - kfree(dev->mtd.name);
712
713 - if (dev->blkdev) {
714 - invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
715 - 0, -1);
716 - close_bdev_excl(dev->blkdev);
717 - }
718 + read_lock(&dev->bdev_mutex);
719 + if (dev->blkdev)
720 + sync_blockdev(dev->blkdev);
721 + read_unlock(&dev->bdev_mutex);
722
723 - kfree(dev);
724 + return;
725 }
726
727
728 -/* FIXME: ensure that mtd->size % erase_size == 0 */
729 -static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
730 +static int _open_bdev(struct block2mtd_dev *dev)
731 {
732 struct block_device *bdev;
733 - struct block2mtd_dev *dev;
734 - struct mtd_partition *part;
735 -
736 - if (!devname)
737 - return NULL;
738 -
739 - dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
740 - if (!dev)
741 - return NULL;
742
743 /* Get a handle on the device */
744 - bdev = open_bdev_excl(devname, O_RDWR, NULL);
745 + bdev = open_bdev_excl(dev->devname, O_RDWR, NULL);
746 #ifndef MODULE
747 if (IS_ERR(bdev)) {
748
749 /* We might not have rootfs mounted at this point. Try
750 to resolve the device name by other means. */
751
752 - dev_t devt = name_to_dev_t(devname);
753 + dev_t devt = name_to_dev_t(dev->devname);
754 if (devt) {
755 bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
756 }
757 @@ -267,17 +281,96 @@
758 #endif
759
760 if (IS_ERR(bdev)) {
761 - ERROR("error: cannot open device %s", devname);
762 - goto devinit_err;
763 + ERROR("error: cannot open device %s", dev->devname);
764 + return 1;
765 }
766 dev->blkdev = bdev;
767
768 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
769 ERROR("attempting to use an MTD device as a block device");
770 - goto devinit_err;
771 + return 1;
772 }
773
774 + return 0;
775 +}
776 +
777 +static void _close_bdev(struct block2mtd_dev *dev)
778 +{
779 + struct block_device *bdev;
780 +
781 + if (!dev->blkdev)
782 + return;
783 +
784 + bdev = dev->blkdev;
785 + invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1);
786 + close_bdev_excl(dev->blkdev);
787 + dev->blkdev = NULL;
788 +}
789 +
790 +static void block2mtd_free_device(struct block2mtd_dev *dev)
791 +{
792 + if (!dev)
793 + return;
794 +
795 + kfree(dev->mtd.name);
796 + _close_bdev(dev);
797 + kfree(dev);
798 +}
799 +
800 +
801 +static int block2mtd_refresh(struct mtd_info *mtd)
802 +{
803 + struct block2mtd_dev *dev = mtd->priv;
804 + struct block_device *bdev;
805 + dev_t devt;
806 + int err = 0;
807 +
808 + /* no other mtd function can run at this point */
809 + write_lock(&dev->bdev_mutex);
810 +
811 + /* get the device number for the whole disk */
812 + devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0);
813 +
814 + /* close the old block device */
815 + _close_bdev(dev);
816 +
817 + /* open the whole disk, issue a partition rescan, then */
818 + bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
819 + if (!bdev || !bdev->bd_disk)
820 + err = -EINVAL;
821 + else {
822 + err = rescan_partitions(bdev->bd_disk, bdev);
823 + }
824 + if (bdev)
825 + close_bdev_excl(bdev);
826 +
827 + /* try to open the partition block device again */
828 + _open_bdev(dev);
829 + write_unlock(&dev->bdev_mutex);
830 +
831 + return err;
832 +}
833 +
834 +/* FIXME: ensure that mtd->size % erase_size == 0 */
835 +static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
836 +{
837 + struct block2mtd_dev *dev;
838 + struct mtd_partition *part;
839 +
840 + if (!devname)
841 + return NULL;
842 +
843 + dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL);
844 + if (!dev)
845 + return NULL;
846 +
847 + strcpy(dev->devname, devname);
848 +
849 + if (_open_bdev(dev))
850 + goto devinit_err;
851 +
852 mutex_init(&dev->write_mutex);
853 + rwlock_init(&dev->bdev_mutex);
854
855 /* Setup the MTD structure */
856 /* make the name contain the block device in */
857 @@ -304,6 +397,7 @@
858 dev->mtd.read = block2mtd_read;
859 dev->mtd.priv = dev;
860 dev->mtd.owner = THIS_MODULE;
861 + dev->mtd.refresh_device = block2mtd_refresh;
862
863 part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
864 part->name = dev->mtd.name;
865 --- a/drivers/mtd/mtdchar.c
866 +++ b/drivers/mtd/mtdchar.c
867 @@ -17,6 +17,7 @@
868
869 #include <linux/mtd/mtd.h>
870 #include <linux/mtd/compatmac.h>
871 +#include <linux/mtd/partitions.h>
872
873 #include <asm/uaccess.h>
874
875 @@ -753,6 +754,13 @@
876 file->f_pos = 0;
877 break;
878 }
879 +#ifdef CONFIG_MTD_PARTITIONS
880 + case MTDREFRESH:
881 + {
882 + ret = refresh_mtd_partitions(mtd);
883 + break;
884 + }
885 +#endif
886
887 default:
888 ret = -ENOTTY;
889 --- a/include/linux/mtd/mtd.h
890 +++ b/include/linux/mtd/mtd.h
891 @@ -98,6 +98,7 @@
892 uint8_t *oobbuf;
893 };
894
895 +struct mtd_info;
896 struct mtd_info {
897 u_char type;
898 u_int32_t flags;
899 @@ -195,6 +196,9 @@
900 struct module *owner;
901 int usecount;
902
903 + int (*refresh_device)(struct mtd_info *mtd);
904 + struct mtd_info *split;
905 +
906 /* If the driver is something smart, like UBI, it may need to maintain
907 * its own reference counting. The below functions are only for driver.
908 * The driver may register its callbacks. These callbacks are not
909 --- a/include/linux/mtd/partitions.h
910 +++ b/include/linux/mtd/partitions.h
911 @@ -36,6 +36,7 @@
912 * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
913 */
914
915 +struct mtd_partition;
916 struct mtd_partition {
917 char *name; /* identifier string */
918 u_int32_t size; /* partition size */
919 @@ -43,6 +44,7 @@
920 u_int32_t mask_flags; /* master MTD flags to mask out for this partition */
921 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
922 struct mtd_info **mtdp; /* pointer to store the MTD object */
923 + int (*refresh_partition)(struct mtd_info *);
924 };
925
926 #define MTDPART_OFS_NXTBLK (-2)
927 @@ -52,6 +54,7 @@
928
929 int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
930 int del_mtd_partitions(struct mtd_info *);
931 +int refresh_mtd_partitions(struct mtd_info *);
932
933 /*
934 * Functions dealing with the various ways of partitioning the space
935 --- a/include/mtd/mtd-abi.h
936 +++ b/include/mtd/mtd-abi.h
937 @@ -95,6 +95,7 @@
938 #define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout)
939 #define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
940 #define MTDFILEMODE _IO('M', 19)
941 +#define MTDREFRESH _IO('M', 23)
942
943 /*
944 * Obsolete legacy interface. Keep it in order not to break userspace