1 From 97cf5df76657bab81d6b8669607f6f13215201c1 Mon Sep 17 00:00:00 2001
2 From: Fam Zheng <famcool@gmail.com>
3 Date: Tue, 12 Jul 2011 19:56:31 +0800
4 Subject: [PATCH 05/12] VMDK: separate vmdk_open by format version
6 Separate vmdk_open by subformats to:
10 Signed-off-by: Fam Zheng <famcool@gmail.com>
11 Reviewed-by: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
12 Signed-off-by: Kevin Wolf <kwolf@redhat.com>
14 block/vmdk.c | 178 +++++++++++++++++++++++++++++++++++++----------------------
15 1 file changed, 112 insertions(+), 66 deletions(-)
19 @@ -458,67 +458,20 @@ static VmdkExtent *vmdk_add_extent(Block
24 -static int vmdk_open(BlockDriverState *bs, int flags)
25 +static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent)
27 - BDRVVmdkState *s = bs->opaque;
30 - uint32_t l1_size, l1_entry_sectors;
31 - VmdkExtent *extent = NULL;
33 - if (bdrv_pread(bs->file, 0, &magic, sizeof(magic)) != sizeof(magic))
36 - magic = be32_to_cpu(magic);
37 - if (magic == VMDK3_MAGIC) {
39 - if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header))
40 - != sizeof(header)) {
43 - extent = vmdk_add_extent(bs, bs->file, false,
44 - le32_to_cpu(header.disk_sectors),
45 - le32_to_cpu(header.l1dir_offset) << 9, 0,
46 - 1 << 6, 1 << 9, le32_to_cpu(header.granularity));
47 - } else if (magic == VMDK4_MAGIC) {
49 - if (bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header))
50 - != sizeof(header)) {
53 - l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gte)
54 - * le64_to_cpu(header.granularity);
55 - l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1)
57 - extent = vmdk_add_extent(bs, bs->file, false,
58 - le64_to_cpu(header.capacity),
59 - le64_to_cpu(header.gd_offset) << 9,
60 - le64_to_cpu(header.rgd_offset) << 9,
62 - le32_to_cpu(header.num_gtes_per_gte),
63 - le64_to_cpu(header.granularity));
64 - if (extent->l1_entry_sectors <= 0) {
67 - // try to open parent images, if exist
68 - if (vmdk_parent_open(bs) != 0)
70 - // write the CID once after the image creation
71 - s->parent_cid = vmdk_read_cid(bs,1);
78 /* read the L1 table */
79 l1_size = extent->l1_size * sizeof(uint32_t);
80 extent->l1_table = qemu_malloc(l1_size);
81 - if (bdrv_pread(bs->file,
82 - extent->l1_table_offset,
87 + ret = bdrv_pread(extent->file,
88 + extent->l1_table_offset,
94 for (i = 0; i < extent->l1_size; i++) {
95 le32_to_cpus(&extent->l1_table[i]);
96 @@ -526,12 +479,12 @@ static int vmdk_open(BlockDriverState *b
98 if (extent->l1_backup_table_offset) {
99 extent->l1_backup_table = qemu_malloc(l1_size);
100 - if (bdrv_pread(bs->file,
101 - extent->l1_backup_table_offset,
102 - extent->l1_backup_table,
106 + ret = bdrv_pread(extent->file,
107 + extent->l1_backup_table_offset,
108 + extent->l1_backup_table,
113 for (i = 0; i < extent->l1_size; i++) {
114 le32_to_cpus(&extent->l1_backup_table[i]);
115 @@ -541,9 +494,102 @@ static int vmdk_open(BlockDriverState *b
117 qemu_malloc(extent->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
120 + qemu_free(extent->l1_backup_table);
122 + qemu_free(extent->l1_table);
126 +static int vmdk_open_vmdk3(BlockDriverState *bs, int flags)
130 + VMDK3Header header;
131 + VmdkExtent *extent;
133 + ret = bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header));
137 + extent = vmdk_add_extent(bs,
139 + le32_to_cpu(header.disk_sectors),
140 + le32_to_cpu(header.l1dir_offset) << 9,
142 + le32_to_cpu(header.granularity));
143 + ret = vmdk_init_tables(bs, extent);
145 + /* vmdk_init_tables cleans up on fail, so only free allocation of
146 + * vmdk_add_extent here. */
151 vmdk_free_extents(bs);
156 +static int vmdk_open_vmdk4(BlockDriverState *bs, int flags)
160 + uint32_t l1_size, l1_entry_sectors;
161 + VMDK4Header header;
162 + BDRVVmdkState *s = bs->opaque;
163 + VmdkExtent *extent;
165 + ret = bdrv_pread(bs->file, sizeof(magic), &header, sizeof(header));
169 + l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gte)
170 + * le64_to_cpu(header.granularity);
171 + l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1)
172 + / l1_entry_sectors;
173 + extent = vmdk_add_extent(bs, bs->file, false,
174 + le64_to_cpu(header.capacity),
175 + le64_to_cpu(header.gd_offset) << 9,
176 + le64_to_cpu(header.rgd_offset) << 9,
178 + le32_to_cpu(header.num_gtes_per_gte),
179 + le64_to_cpu(header.granularity));
180 + if (extent->l1_entry_sectors <= 0) {
184 + /* try to open parent images, if exist */
185 + ret = vmdk_parent_open(bs);
189 + s->parent_cid = vmdk_read_cid(bs, 1);
190 + ret = vmdk_init_tables(bs, extent);
196 + vmdk_free_extents(bs);
200 +static int vmdk_open(BlockDriverState *bs, int flags)
204 + if (bdrv_pread(bs->file, 0, &magic, sizeof(magic)) != sizeof(magic)) {
208 + magic = be32_to_cpu(magic);
209 + if (magic == VMDK3_MAGIC) {
210 + return vmdk_open_vmdk3(bs, flags);
211 + } else if (magic == VMDK4_MAGIC) {
212 + return vmdk_open_vmdk4(bs, flags);
218 static int get_whole_cluster(BlockDriverState *bs,
219 @@ -630,11 +676,11 @@ static uint64_t get_cluster_offset(Block
223 - for(i = 0; i < L2_CACHE_SIZE; i++) {
224 + for (i = 0; i < L2_CACHE_SIZE; i++) {
225 if (l2_offset == extent->l2_cache_offsets[i]) {
226 /* increment the hit count */
227 if (++extent->l2_cache_counts[i] == 0xffffffff) {
228 - for(j = 0; j < L2_CACHE_SIZE; j++) {
229 + for (j = 0; j < L2_CACHE_SIZE; j++) {
230 extent->l2_cache_counts[j] >>= 1;
233 @@ -645,7 +691,7 @@ static uint64_t get_cluster_offset(Block
234 /* not found: load a new entry in the least used one */
236 min_count = 0xffffffff;
237 - for(i = 0; i < L2_CACHE_SIZE; i++) {
238 + for (i = 0; i < L2_CACHE_SIZE; i++) {
239 if (extent->l2_cache_counts[i] < min_count) {
240 min_count = extent->l2_cache_counts[i];