1 From fcd9c52d160376184cbd25e04586aa6eef6abd61 Mon Sep 17 00:00:00 2001
2 From: Fam Zheng <famcool@gmail.com>
3 Date: Tue, 12 Jul 2011 19:56:28 +0800
4 Subject: [PATCH 02/12] VMDK: introduce VmdkExtent
6 Introduced VmdkExtent array into BDRVVmdkState, enable holding multiple
7 image extents for multiple file image support.
9 Signed-off-by: Fam Zheng <famcool@gmail.com>
10 Reviewed-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11 Signed-off-by: Kevin Wolf <kwolf@redhat.com>
13 block/vmdk.c | 348 +++++++++++++++++++++++++++++++++++++++++------------------
14 1 file changed, 246 insertions(+), 102 deletions(-)
18 @@ -60,7 +60,11 @@ typedef struct {
20 #define L2_CACHE_SIZE 16
22 -typedef struct BDRVVmdkState {
23 +typedef struct VmdkExtent {
24 + BlockDriverState *file;
28 int64_t l1_table_offset;
29 int64_t l1_backup_table_offset;
31 @@ -74,7 +78,13 @@ typedef struct BDRVVmdkState {
32 uint32_t l2_cache_counts[L2_CACHE_SIZE];
34 unsigned int cluster_sectors;
37 +typedef struct BDRVVmdkState {
40 + /* Extent array with num_extents entries, ascend ordered by address */
41 + VmdkExtent *extents;
44 typedef struct VmdkMetaData {
45 @@ -105,6 +115,19 @@ static int vmdk_probe(const uint8_t *buf
46 #define DESC_SIZE 20*SECTOR_SIZE // 20 sectors of 512 bytes each
47 #define HEADER_SIZE 512 // first sector of 512 bytes
49 +static void vmdk_free_extents(BlockDriverState *bs)
52 + BDRVVmdkState *s = bs->opaque;
54 + for (i = 0; i < s->num_extents; i++) {
55 + qemu_free(s->extents[i].l1_table);
56 + qemu_free(s->extents[i].l2_cache);
57 + qemu_free(s->extents[i].l1_backup_table);
59 + qemu_free(s->extents);
62 static uint32_t vmdk_read_cid(BlockDriverState *bs, int parent)
65 @@ -358,11 +381,50 @@ static int vmdk_parent_open(BlockDriverS
69 +/* Create and append extent to the extent array. Return the added VmdkExtent
70 + * address. return NULL if allocation failed. */
71 +static VmdkExtent *vmdk_add_extent(BlockDriverState *bs,
72 + BlockDriverState *file, bool flat, int64_t sectors,
73 + int64_t l1_offset, int64_t l1_backup_offset,
75 + int l2_size, unsigned int cluster_sectors)
78 + BDRVVmdkState *s = bs->opaque;
80 + s->extents = qemu_realloc(s->extents,
81 + (s->num_extents + 1) * sizeof(VmdkExtent));
82 + extent = &s->extents[s->num_extents];
85 + memset(extent, 0, sizeof(VmdkExtent));
86 + extent->file = file;
87 + extent->flat = flat;
88 + extent->sectors = sectors;
89 + extent->l1_table_offset = l1_offset;
90 + extent->l1_backup_table_offset = l1_backup_offset;
91 + extent->l1_size = l1_size;
92 + extent->l1_entry_sectors = l2_size * cluster_sectors;
93 + extent->l2_size = l2_size;
94 + extent->cluster_sectors = cluster_sectors;
96 + if (s->num_extents > 1) {
97 + extent->end_sector = (*(extent - 1)).end_sector + extent->sectors;
99 + extent->end_sector = extent->sectors;
101 + bs->total_sectors = extent->end_sector;
106 static int vmdk_open(BlockDriverState *bs, int flags)
108 BDRVVmdkState *s = bs->opaque;
112 + uint32_t l1_size, l1_entry_sectors;
113 + VmdkExtent *extent = NULL;
115 if (bdrv_pread(bs->file, 0, &magic, sizeof(magic)) != sizeof(magic))
117 @@ -370,32 +432,34 @@ static int vmdk_open(BlockDriverState *b
118 magic = be32_to_cpu(magic);
119 if (magic == VMDK3_MAGIC) {
122 - if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header)) != sizeof(header))
123 + if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header))
124 + != sizeof(header)) {
126 - s->cluster_sectors = le32_to_cpu(header.granularity);
127 - s->l2_size = 1 << 9;
128 - s->l1_size = 1 << 6;
129 - bs->total_sectors = le32_to_cpu(header.disk_sectors);
130 - s->l1_table_offset = le32_to_cpu(header.l1dir_offset) << 9;
131 - s->l1_backup_table_offset = 0;
132 - s->l1_entry_sectors = s->l2_size * s->cluster_sectors;
134 + extent = vmdk_add_extent(bs, bs->file, false,
135 + le32_to_cpu(header.disk_sectors),
136 + le32_to_cpu(header.l1dir_offset) << 9, 0,
137 + 1 << 6, 1 << 9, le32_to_cpu(header.granularity));
138 } else if (magic == VMDK4_MAGIC) {
141 - if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header)) != sizeof(header))
142 + if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header))
143 + != sizeof(header)) {
145 - bs->total_sectors = le64_to_cpu(header.capacity);
146 - s->cluster_sectors = le64_to_cpu(header.granularity);
147 - s->l2_size = le32_to_cpu(header.num_gtes_per_gte);
148 - s->l1_entry_sectors = s->l2_size * s->cluster_sectors;
149 - if (s->l1_entry_sectors <= 0)
151 + l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gte)
152 + * le64_to_cpu(header.granularity);
153 + l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1)
154 + / l1_entry_sectors;
155 + extent = vmdk_add_extent(bs, bs->file, false,
156 + le64_to_cpu(header.capacity),
157 + le64_to_cpu(header.gd_offset) << 9,
158 + le64_to_cpu(header.rgd_offset) << 9,
160 + le32_to_cpu(header.num_gtes_per_gte),
161 + le64_to_cpu(header.granularity));
162 + if (extent->l1_entry_sectors <= 0) {
164 - s->l1_size = (bs->total_sectors + s->l1_entry_sectors - 1)
165 - / s->l1_entry_sectors;
166 - s->l1_table_offset = le64_to_cpu(header.rgd_offset) << 9;
167 - s->l1_backup_table_offset = le64_to_cpu(header.gd_offset) << 9;
170 // try to open parent images, if exist
171 if (vmdk_parent_open(bs) != 0)
173 @@ -406,40 +470,49 @@ static int vmdk_open(BlockDriverState *b
176 /* read the L1 table */
177 - l1_size = s->l1_size * sizeof(uint32_t);
178 - s->l1_table = qemu_malloc(l1_size);
179 - if (bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, l1_size) != l1_size)
181 - for(i = 0; i < s->l1_size; i++) {
182 - le32_to_cpus(&s->l1_table[i]);
185 - if (s->l1_backup_table_offset) {
186 - s->l1_backup_table = qemu_malloc(l1_size);
187 - if (bdrv_pread(bs->file, s->l1_backup_table_offset, s->l1_backup_table, l1_size) != l1_size)
188 + l1_size = extent->l1_size * sizeof(uint32_t);
189 + extent->l1_table = qemu_malloc(l1_size);
190 + if (bdrv_pread(bs->file,
191 + extent->l1_table_offset,
197 + for (i = 0; i < extent->l1_size; i++) {
198 + le32_to_cpus(&extent->l1_table[i]);
201 + if (extent->l1_backup_table_offset) {
202 + extent->l1_backup_table = qemu_malloc(l1_size);
203 + if (bdrv_pread(bs->file,
204 + extent->l1_backup_table_offset,
205 + extent->l1_backup_table,
209 - for(i = 0; i < s->l1_size; i++) {
210 - le32_to_cpus(&s->l1_backup_table[i]);
212 + for (i = 0; i < extent->l1_size; i++) {
213 + le32_to_cpus(&extent->l1_backup_table[i]);
217 - s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
219 + qemu_malloc(extent->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
222 - qemu_free(s->l1_backup_table);
223 - qemu_free(s->l1_table);
224 - qemu_free(s->l2_cache);
225 + vmdk_free_extents(bs);
229 -static uint64_t get_cluster_offset(BlockDriverState *bs, VmdkMetaData *m_data,
230 - uint64_t offset, int allocate);
232 -static int get_whole_cluster(BlockDriverState *bs, uint64_t cluster_offset,
233 - uint64_t offset, int allocate)
234 +static int get_whole_cluster(BlockDriverState *bs,
235 + VmdkExtent *extent,
236 + uint64_t cluster_offset,
240 - BDRVVmdkState *s = bs->opaque;
241 - uint8_t whole_grain[s->cluster_sectors*512]; // 128 sectors * 512 bytes each = grain size 64KB
242 + /* 128 sectors * 512 bytes each = grain size 64KB */
243 + uint8_t whole_grain[extent->cluster_sectors * 512];
245 // we will be here if it's first write on non-exist grain(cluster).
246 // try to read from parent image, if exist
247 @@ -450,14 +523,14 @@ static int get_whole_cluster(BlockDriver
250 ret = bdrv_read(bs->backing_hd, offset >> 9, whole_grain,
251 - s->cluster_sectors);
252 + extent->cluster_sectors);
257 //Write grain only into the active image
258 - ret = bdrv_write(bs->file, cluster_offset, whole_grain,
259 - s->cluster_sectors);
260 + ret = bdrv_write(extent->file, cluster_offset, whole_grain,
261 + extent->cluster_sectors);
265 @@ -465,29 +538,39 @@ static int get_whole_cluster(BlockDriver
269 -static int vmdk_L2update(BlockDriverState *bs, VmdkMetaData *m_data)
270 +static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data)
272 - BDRVVmdkState *s = bs->opaque;
274 /* update L2 table */
275 - if (bdrv_pwrite_sync(bs->file, ((int64_t)m_data->l2_offset * 512) + (m_data->l2_index * sizeof(m_data->offset)),
276 - &(m_data->offset), sizeof(m_data->offset)) < 0)
277 + if (bdrv_pwrite_sync(
279 + ((int64_t)m_data->l2_offset * 512)
280 + + (m_data->l2_index * sizeof(m_data->offset)),
282 + sizeof(m_data->offset)
286 /* update backup L2 table */
287 - if (s->l1_backup_table_offset != 0) {
288 - m_data->l2_offset = s->l1_backup_table[m_data->l1_index];
289 - if (bdrv_pwrite_sync(bs->file, ((int64_t)m_data->l2_offset * 512) + (m_data->l2_index * sizeof(m_data->offset)),
290 - &(m_data->offset), sizeof(m_data->offset)) < 0)
291 + if (extent->l1_backup_table_offset != 0) {
292 + m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
293 + if (bdrv_pwrite_sync(
295 + ((int64_t)m_data->l2_offset * 512)
296 + + (m_data->l2_index * sizeof(m_data->offset)),
297 + &(m_data->offset), sizeof(m_data->offset)
306 -static uint64_t get_cluster_offset(BlockDriverState *bs, VmdkMetaData *m_data,
307 - uint64_t offset, int allocate)
308 +static uint64_t get_cluster_offset(BlockDriverState *bs,
309 + VmdkExtent *extent,
310 + VmdkMetaData *m_data,
311 + uint64_t offset, int allocate)
313 - BDRVVmdkState *s = bs->opaque;
314 unsigned int l1_index, l2_offset, l2_index;
316 uint32_t min_count, *l2_table, tmp = 0;
317 @@ -496,21 +579,23 @@ static uint64_t get_cluster_offset(Block
321 - l1_index = (offset >> 9) / s->l1_entry_sectors;
322 - if (l1_index >= s->l1_size)
323 + l1_index = (offset >> 9) / extent->l1_entry_sectors;
324 + if (l1_index >= extent->l1_size) {
326 - l2_offset = s->l1_table[l1_index];
329 + l2_offset = extent->l1_table[l1_index];
333 for(i = 0; i < L2_CACHE_SIZE; i++) {
334 - if (l2_offset == s->l2_cache_offsets[i]) {
335 + if (l2_offset == extent->l2_cache_offsets[i]) {
336 /* increment the hit count */
337 - if (++s->l2_cache_counts[i] == 0xffffffff) {
338 + if (++extent->l2_cache_counts[i] == 0xffffffff) {
339 for(j = 0; j < L2_CACHE_SIZE; j++) {
340 - s->l2_cache_counts[j] >>= 1;
341 + extent->l2_cache_counts[j] >>= 1;
344 - l2_table = s->l2_cache + (i * s->l2_size);
345 + l2_table = extent->l2_cache + (i * extent->l2_size);
349 @@ -518,20 +603,25 @@ static uint64_t get_cluster_offset(Block
351 min_count = 0xffffffff;
352 for(i = 0; i < L2_CACHE_SIZE; i++) {
353 - if (s->l2_cache_counts[i] < min_count) {
354 - min_count = s->l2_cache_counts[i];
355 + if (extent->l2_cache_counts[i] < min_count) {
356 + min_count = extent->l2_cache_counts[i];
360 - l2_table = s->l2_cache + (min_index * s->l2_size);
361 - if (bdrv_pread(bs->file, (int64_t)l2_offset * 512, l2_table, s->l2_size * sizeof(uint32_t)) !=
362 - s->l2_size * sizeof(uint32_t))
363 + l2_table = extent->l2_cache + (min_index * extent->l2_size);
366 + (int64_t)l2_offset * 512,
368 + extent->l2_size * sizeof(uint32_t)
369 + ) != extent->l2_size * sizeof(uint32_t)) {
373 - s->l2_cache_offsets[min_index] = l2_offset;
374 - s->l2_cache_counts[min_index] = 1;
375 + extent->l2_cache_offsets[min_index] = l2_offset;
376 + extent->l2_cache_counts[min_index] = 1;
378 - l2_index = ((offset >> 9) / s->cluster_sectors) % s->l2_size;
379 + l2_index = ((offset >> 9) / extent->cluster_sectors) % extent->l2_size;
380 cluster_offset = le32_to_cpu(l2_table[l2_index]);
382 if (!cluster_offset) {
383 @@ -539,8 +629,11 @@ static uint64_t get_cluster_offset(Block
386 // Avoid the L2 tables update for the images that have snapshots.
387 - cluster_offset = bdrv_getlength(bs->file);
388 - bdrv_truncate(bs->file, cluster_offset + (s->cluster_sectors << 9));
389 + cluster_offset = bdrv_getlength(extent->file);
392 + cluster_offset + (extent->cluster_sectors << 9)
395 cluster_offset >>= 9;
396 tmp = cpu_to_le32(cluster_offset);
397 @@ -551,7 +644,8 @@ static uint64_t get_cluster_offset(Block
398 * This problem may occur because of insufficient space on host disk
399 * or inappropriate VM shutdown.
401 - if (get_whole_cluster(bs, cluster_offset, offset, allocate) == -1)
402 + if (get_whole_cluster(
403 + bs, extent, cluster_offset, offset, allocate) == -1)
407 @@ -566,33 +660,69 @@ static uint64_t get_cluster_offset(Block
408 return cluster_offset;
411 +static VmdkExtent *find_extent(BDRVVmdkState *s,
412 + int64_t sector_num, VmdkExtent *start_hint)
414 + VmdkExtent *extent = start_hint;
417 + extent = &s->extents[0];
419 + while (extent < &s->extents[s->num_extents]) {
420 + if (sector_num < extent->end_sector) {
428 static int vmdk_is_allocated(BlockDriverState *bs, int64_t sector_num,
429 int nb_sectors, int *pnum)
431 BDRVVmdkState *s = bs->opaque;
432 - int index_in_cluster, n;
433 - uint64_t cluster_offset;
435 - cluster_offset = get_cluster_offset(bs, NULL, sector_num << 9, 0);
436 - index_in_cluster = sector_num % s->cluster_sectors;
437 - n = s->cluster_sectors - index_in_cluster;
438 + int64_t index_in_cluster, n, ret;
440 + VmdkExtent *extent;
442 + extent = find_extent(s, sector_num, NULL);
446 + if (extent->flat) {
447 + n = extent->end_sector - sector_num;
450 + offset = get_cluster_offset(bs, extent, NULL, sector_num * 512, 0);
451 + index_in_cluster = sector_num % extent->cluster_sectors;
452 + n = extent->cluster_sectors - index_in_cluster;
453 + ret = offset ? 1 : 0;
458 - return (cluster_offset != 0);
462 static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
463 uint8_t *buf, int nb_sectors)
465 BDRVVmdkState *s = bs->opaque;
466 - int index_in_cluster, n, ret;
468 + uint64_t n, index_in_cluster;
469 + VmdkExtent *extent = NULL;
470 uint64_t cluster_offset;
472 while (nb_sectors > 0) {
473 - cluster_offset = get_cluster_offset(bs, NULL, sector_num << 9, 0);
474 - index_in_cluster = sector_num % s->cluster_sectors;
475 - n = s->cluster_sectors - index_in_cluster;
476 + extent = find_extent(s, sector_num, extent);
480 + cluster_offset = get_cluster_offset(
481 + bs, extent, NULL, sector_num << 9, 0);
482 + index_in_cluster = sector_num % extent->cluster_sectors;
483 + n = extent->cluster_sectors - index_in_cluster;
486 if (!cluster_offset) {
487 @@ -621,10 +751,12 @@ static int vmdk_write(BlockDriverState *
488 const uint8_t *buf, int nb_sectors)
490 BDRVVmdkState *s = bs->opaque;
491 - VmdkMetaData m_data;
492 - int index_in_cluster, n;
493 + VmdkExtent *extent = NULL;
495 + int64_t index_in_cluster;
496 uint64_t cluster_offset;
497 static int cid_update = 0;
498 + VmdkMetaData m_data;
500 if (sector_num > bs->total_sectors) {
502 @@ -635,20 +767,35 @@ static int vmdk_write(BlockDriverState *
505 while (nb_sectors > 0) {
506 - index_in_cluster = sector_num & (s->cluster_sectors - 1);
507 - n = s->cluster_sectors - index_in_cluster;
508 - if (n > nb_sectors)
510 - cluster_offset = get_cluster_offset(bs, &m_data, sector_num << 9, 1);
511 - if (!cluster_offset)
512 + extent = find_extent(s, sector_num, extent);
516 + cluster_offset = get_cluster_offset(
520 + sector_num << 9, 1);
521 + if (!cluster_offset) {
524 + index_in_cluster = sector_num % extent->cluster_sectors;
525 + n = extent->cluster_sectors - index_in_cluster;
526 + if (n > nb_sectors) {
530 - if (bdrv_pwrite(bs->file, cluster_offset + index_in_cluster * 512, buf, n * 512) != n * 512)
531 + if (bdrv_pwrite(bs->file,
532 + cluster_offset + index_in_cluster * 512,
538 /* update L2 tables */
539 - if (vmdk_L2update(bs, &m_data) == -1)
540 + if (vmdk_L2update(extent, &m_data) == -1) {
546 @@ -822,10 +969,7 @@ exit:
548 static void vmdk_close(BlockDriverState *bs)
550 - BDRVVmdkState *s = bs->opaque;
552 - qemu_free(s->l1_table);
553 - qemu_free(s->l2_cache);
554 + vmdk_free_extents(bs);
557 static int vmdk_flush(BlockDriverState *bs)