2 * Copyright (C) 2010 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ext4_utils.h"
20 #include <sparse/sparse.h>
33 struct block_group_info
{
50 struct xattr_list_element
{
51 struct ext4_inode
*inode
;
52 struct ext4_xattr_header
*header
;
53 struct xattr_list_element
*next
;
56 struct block_allocation
*create_allocation()
58 struct block_allocation
*alloc
= malloc(sizeof(struct block_allocation
));
59 alloc
->list
.first
= NULL
;
60 alloc
->list
.last
= NULL
;
61 alloc
->oob_list
.first
= NULL
;
62 alloc
->oob_list
.last
= NULL
;
63 alloc
->list
.iter
= NULL
;
64 alloc
->list
.partial_iter
= 0;
65 alloc
->oob_list
.iter
= NULL
;
66 alloc
->oob_list
.partial_iter
= 0;
67 alloc
->filename
= NULL
;
72 static struct ext4_xattr_header
*xattr_list_find(struct ext4_inode
*inode
)
74 struct xattr_list_element
*element
;
75 for (element
= aux_info
.xattrs
; element
!= NULL
; element
= element
->next
) {
76 if (element
->inode
== inode
)
77 return element
->header
;
82 static void xattr_list_insert(struct ext4_inode
*inode
, struct ext4_xattr_header
*header
)
84 struct xattr_list_element
*element
= malloc(sizeof(struct xattr_list_element
));
85 element
->inode
= inode
;
86 element
->header
= header
;
87 element
->next
= aux_info
.xattrs
;
88 aux_info
.xattrs
= element
;
91 static void region_list_remove(struct region_list
*list
, struct region
*reg
)
94 reg
->prev
->next
= reg
->next
;
97 reg
->next
->prev
= reg
->prev
;
99 if (list
->first
== reg
)
100 list
->first
= reg
->next
;
102 if (list
->last
== reg
)
103 list
->last
= reg
->prev
;
109 static void region_list_append(struct region_list
*list
, struct region
*reg
)
111 if (list
->first
== NULL
) {
115 list
->partial_iter
= 0;
118 list
->last
->next
= reg
;
119 reg
->prev
= list
->last
;
126 static void dump_starting_from(struct region
*reg
)
128 for (; reg
; reg
= reg
->next
) {
129 printf("%p: Blocks %d-%d (%d)\n", reg
,
130 reg
->block
, reg
->block
+ reg
->len
- 1, reg
->len
)
134 static void dump_region_lists(struct block_allocation
*alloc
) {
136 printf("Main list:\n");
137 dump_starting_from(alloc
->list
.first
);
139 printf("OOB list:\n");
140 dump_starting_from(alloc
->oob_list
.first
);
144 void print_blocks(FILE* f
, struct block_allocation
*alloc
)
147 for (reg
= alloc
->list
.first
; reg
; reg
= reg
->next
) {
149 fprintf(f
, " %d", reg
->block
);
151 fprintf(f
, " %d-%d", reg
->block
, reg
->block
+ reg
->len
- 1);
157 void append_region(struct block_allocation
*alloc
,
158 u32 block
, u32 len
, int bg_num
)
161 reg
= malloc(sizeof(struct region
));
167 region_list_append(&alloc
->list
, reg
);
170 static void allocate_bg_inode_table(struct block_group_info
*bg
)
172 if (bg
->inode_table
!= NULL
)
175 u32 block
= bg
->first_block
+ 2;
177 if (bg
->has_superblock
)
178 block
+= aux_info
.bg_desc_blocks
+ info
.bg_desc_reserve_blocks
+ 1;
180 bg
->inode_table
= calloc(aux_info
.inode_table_blocks
, info
.block_size
);
181 if (bg
->inode_table
== NULL
)
182 critical_error_errno("calloc");
184 sparse_file_add_data(ext4_sparse_file
, bg
->inode_table
,
185 aux_info
.inode_table_blocks
* info
.block_size
, block
);
187 bg
->flags
&= ~EXT4_BG_INODE_UNINIT
;
190 static int bitmap_set_bit(u8
*bitmap
, u32 bit
)
192 if (bitmap
[bit
/ 8] & 1 << (bit
% 8))
195 bitmap
[bit
/ 8] |= 1 << (bit
% 8);
199 static int bitmap_set_8_bits(u8
*bitmap
, u32 bit
)
201 int ret
= bitmap
[bit
/ 8];
202 bitmap
[bit
/ 8] = 0xFF;
206 /* Marks a the first num_blocks blocks in a block group as used, and accounts
207 for them in the block group free block info. */
208 static int reserve_blocks(struct block_group_info
*bg
, u32 start
, u32 num
)
213 if (num
> bg
->free_blocks
)
216 for (i
= 0; i
< num
&& block
% 8 != 0; i
++, block
++) {
217 if (bitmap_set_bit(bg
->block_bitmap
, block
)) {
218 error("attempted to reserve already reserved block");
223 for (; i
+ 8 <= (num
& ~7); i
+= 8, block
+= 8) {
224 if (bitmap_set_8_bits(bg
->block_bitmap
, block
)) {
225 error("attempted to reserve already reserved block");
230 for (; i
< num
; i
++, block
++) {
231 if (bitmap_set_bit(bg
->block_bitmap
, block
)) {
232 error("attempted to reserve already reserved block");
237 bg
->free_blocks
-= num
;
238 if (start
== bg
->first_free_block
)
239 bg
->first_free_block
= start
+ num
;
244 static void free_blocks(struct block_group_info
*bg
, u32 num_blocks
)
247 u32 block
= bg
->first_free_block
- 1;
248 for (i
= 0; i
< num_blocks
; i
++, block
--)
249 bg
->block_bitmap
[block
/ 8] &= ~(1 << (block
% 8));
250 bg
->free_blocks
+= num_blocks
;
251 bg
->first_free_block
-= num_blocks
;
254 /* Reduces an existing allocation by len blocks by return the last blocks
255 to the free pool in their block group. Assumes that the blocks being
256 returned were the last ones allocated out of the block group */
257 void reduce_allocation(struct block_allocation
*alloc
, u32 len
)
260 struct region
*last_reg
= alloc
->list
.last
;
262 if (last_reg
->len
> len
) {
263 free_blocks(&aux_info
.bgs
[last_reg
->bg
], len
);
264 last_reg
->len
-= len
;
267 struct region
*reg
= alloc
->list
.last
->prev
;
268 free_blocks(&aux_info
.bgs
[last_reg
->bg
], last_reg
->len
);
269 len
-= last_reg
->len
;
273 alloc
->list
.first
= NULL
;
274 alloc
->list
.last
= NULL
;
275 alloc
->list
.iter
= NULL
;
276 alloc
->list
.partial_iter
= 0;
283 static void init_bg(struct block_group_info
*bg
, unsigned int i
)
285 int header_blocks
= 2 + aux_info
.inode_table_blocks
;
287 bg
->has_superblock
= ext4_bg_has_super_block(i
);
289 if (bg
->has_superblock
)
290 header_blocks
+= 1 + aux_info
.bg_desc_blocks
+ info
.bg_desc_reserve_blocks
;
292 bg
->bitmaps
= calloc(info
.block_size
, 2);
293 bg
->block_bitmap
= bg
->bitmaps
;
294 bg
->inode_bitmap
= bg
->bitmaps
+ info
.block_size
;
296 bg
->header_blocks
= header_blocks
;
297 bg
->first_block
= aux_info
.first_data_block
+ i
* info
.blocks_per_group
;
299 u32 block
= bg
->first_block
;
300 if (bg
->has_superblock
)
301 block
+= 1 + aux_info
.bg_desc_blocks
+ info
.bg_desc_reserve_blocks
;
302 sparse_file_add_data(ext4_sparse_file
, bg
->bitmaps
, 2 * info
.block_size
,
305 bg
->data_blocks_used
= 0;
306 bg
->free_blocks
= info
.blocks_per_group
;
307 bg
->first_free_block
= 0;
308 bg
->free_inodes
= info
.inodes_per_group
;
309 bg
->first_free_inode
= 1;
310 bg
->flags
= EXT4_BG_INODE_UNINIT
;
312 if (reserve_blocks(bg
, bg
->first_free_block
, bg
->header_blocks
) < 0)
313 error("failed to reserve %u blocks in block group %u\n", bg
->header_blocks
, i
);
315 if (bg
->first_block
+ info
.blocks_per_group
> aux_info
.len_blocks
) {
316 u32 overrun
= bg
->first_block
+ info
.blocks_per_group
- aux_info
.len_blocks
;
317 reserve_blocks(bg
, info
.blocks_per_group
- overrun
, overrun
);
321 void block_allocator_init()
325 aux_info
.bgs
= calloc(sizeof(struct block_group_info
), aux_info
.groups
);
326 if (aux_info
.bgs
== NULL
)
327 critical_error_errno("calloc");
329 for (i
= 0; i
< aux_info
.groups
; i
++)
330 init_bg(&aux_info
.bgs
[i
], i
);
333 void block_allocator_free()
337 for (i
= 0; i
< aux_info
.groups
; i
++) {
338 free(aux_info
.bgs
[i
].bitmaps
);
339 free(aux_info
.bgs
[i
].inode_table
);
344 static u32
ext4_allocate_blocks_from_block_group(u32 len
, int bg_num
)
346 if (get_free_blocks(bg_num
) < len
)
347 return EXT4_ALLOCATE_FAILED
;
349 u32 block
= aux_info
.bgs
[bg_num
].first_free_block
;
350 struct block_group_info
*bg
= &aux_info
.bgs
[bg_num
];
351 if (reserve_blocks(bg
, bg
->first_free_block
, len
) < 0) {
352 error("failed to reserve %u blocks in block group %u\n", len
, bg_num
);
353 return EXT4_ALLOCATE_FAILED
;
356 aux_info
.bgs
[bg_num
].data_blocks_used
+= len
;
358 return bg
->first_block
+ block
;
361 /* Allocate a single block and return its block number */
365 for (i
= 0; i
< aux_info
.groups
; i
++) {
366 u32 block
= ext4_allocate_blocks_from_block_group(1, i
);
368 if (block
!= EXT4_ALLOCATE_FAILED
)
372 return EXT4_ALLOCATE_FAILED
;
375 static struct region
*ext4_allocate_best_fit_partial(u32 len
)
378 unsigned int found_bg
= 0;
379 u32 found_bg_len
= 0;
381 for (i
= 0; i
< aux_info
.groups
; i
++) {
382 u32 bg_len
= aux_info
.bgs
[i
].free_blocks
;
384 if ((len
<= bg_len
&& (found_bg_len
== 0 || bg_len
< found_bg_len
)) ||
385 (len
> found_bg_len
&& bg_len
> found_bg_len
)) {
387 found_bg_len
= bg_len
;
392 u32 allocate_len
= min(len
, found_bg_len
);
394 u32 block
= ext4_allocate_blocks_from_block_group(allocate_len
, found_bg
);
395 if (block
== EXT4_ALLOCATE_FAILED
) {
396 error("failed to allocate %d blocks in block group %d", allocate_len
, found_bg
);
399 reg
= malloc(sizeof(struct region
));
401 reg
->len
= allocate_len
;
407 error("failed to allocate %u blocks, out of space?", len
);
413 static struct region
*ext4_allocate_best_fit(u32 len
)
415 struct region
*first_reg
= NULL
;
416 struct region
*prev_reg
= NULL
;
420 reg
= ext4_allocate_best_fit_partial(len
);
424 if (first_reg
== NULL
)
428 prev_reg
->next
= reg
;
429 reg
->prev
= prev_reg
;
439 /* Allocate len blocks. The blocks may be spread across multiple block groups,
440 and are returned in a linked list of the blocks in each block group. The
441 allocation algorithm is:
442 1. If the remaining allocation is larger than any available contiguous region,
443 allocate the largest contiguous region and loop
444 2. Otherwise, allocate the smallest contiguous region that it fits in
446 struct block_allocation
*allocate_blocks(u32 len
)
448 struct region
*reg
= ext4_allocate_best_fit(len
);
453 struct block_allocation
*alloc
= create_allocation();
454 alloc
->list
.first
= reg
;
455 alloc
->list
.last
= reg
;
456 alloc
->list
.iter
= alloc
->list
.first
;
457 alloc
->list
.partial_iter
= 0;
461 /* Returns the number of discontiguous regions used by an allocation */
462 int block_allocation_num_regions(struct block_allocation
*alloc
)
465 struct region
*reg
= alloc
->list
.first
;
467 for (i
= 0; reg
!= NULL
; reg
= reg
->next
)
473 int block_allocation_len(struct block_allocation
*alloc
)
476 struct region
*reg
= alloc
->list
.first
;
478 for (i
= 0; reg
!= NULL
; reg
= reg
->next
)
484 /* Returns the block number of the block'th block in an allocation */
485 u32
get_block(struct block_allocation
*alloc
, u32 block
)
487 struct region
*reg
= alloc
->list
.iter
;
488 block
+= alloc
->list
.partial_iter
;
490 for (; reg
; reg
= reg
->next
) {
491 if (block
< reg
->len
)
492 return reg
->block
+ block
;
495 return EXT4_ALLOCATE_FAILED
;
498 u32
get_oob_block(struct block_allocation
*alloc
, u32 block
)
500 struct region
*reg
= alloc
->oob_list
.iter
;
501 block
+= alloc
->oob_list
.partial_iter
;
503 for (; reg
; reg
= reg
->next
) {
504 if (block
< reg
->len
)
505 return reg
->block
+ block
;
508 return EXT4_ALLOCATE_FAILED
;
511 /* Gets the starting block and length in blocks of the first region
513 void get_region(struct block_allocation
*alloc
, u32
*block
, u32
*len
)
515 *block
= alloc
->list
.iter
->block
;
516 *len
= alloc
->list
.iter
->len
- alloc
->list
.partial_iter
;
519 /* Move to the next region in an allocation */
520 void get_next_region(struct block_allocation
*alloc
)
522 alloc
->list
.iter
= alloc
->list
.iter
->next
;
523 alloc
->list
.partial_iter
= 0;
526 /* Returns the number of free blocks in a block group */
527 u32
get_free_blocks(u32 bg
)
529 return aux_info
.bgs
[bg
].free_blocks
;
532 int last_region(struct block_allocation
*alloc
)
534 return (alloc
->list
.iter
== NULL
);
537 void rewind_alloc(struct block_allocation
*alloc
)
539 alloc
->list
.iter
= alloc
->list
.first
;
540 alloc
->list
.partial_iter
= 0;
543 static struct region
*do_split_allocation(struct block_allocation
*alloc
, u32 len
)
545 struct region
*reg
= alloc
->list
.iter
;
549 while (reg
&& len
>= reg
->len
) {
554 if (reg
== NULL
&& len
> 0)
558 new = malloc(sizeof(struct region
));
561 new->block
= reg
->block
+ len
;
562 new->len
= reg
->len
- len
;
563 new->next
= reg
->next
;
569 tmp
= alloc
->list
.iter
;
570 alloc
->list
.iter
= new;
577 /* Splits an allocation into two allocations. The returned allocation will
578 point to the first half, and the original allocation ptr will point to the
580 static struct region
*split_allocation(struct block_allocation
*alloc
, u32 len
)
582 /* First make sure there is a split at the current ptr */
583 do_split_allocation(alloc
, alloc
->list
.partial_iter
);
585 /* Then split off len blocks */
586 struct region
*middle
= do_split_allocation(alloc
, len
);
587 alloc
->list
.partial_iter
= 0;
591 /* Reserve the next blocks for oob data (indirect or extent blocks) */
592 int reserve_oob_blocks(struct block_allocation
*alloc
, int blocks
)
594 struct region
*oob
= split_allocation(alloc
, blocks
);
600 while (oob
&& oob
!= alloc
->list
.iter
) {
602 region_list_remove(&alloc
->list
, oob
);
603 region_list_append(&alloc
->oob_list
, oob
);
610 static int advance_list_ptr(struct region_list
*list
, int blocks
)
612 struct region
*reg
= list
->iter
;
614 while (reg
!= NULL
&& blocks
> 0) {
615 if (reg
->len
> list
->partial_iter
+ blocks
) {
616 list
->partial_iter
+= blocks
;
620 blocks
-= (reg
->len
- list
->partial_iter
);
621 list
->partial_iter
= 0;
631 /* Move the allocation pointer forward */
632 int advance_blocks(struct block_allocation
*alloc
, int blocks
)
634 return advance_list_ptr(&alloc
->list
, blocks
);
637 int advance_oob_blocks(struct block_allocation
*alloc
, int blocks
)
639 return advance_list_ptr(&alloc
->oob_list
, blocks
);
642 int append_oob_allocation(struct block_allocation
*alloc
, u32 len
)
644 struct region
*reg
= ext4_allocate_best_fit(len
);
647 error("failed to allocate %d blocks", len
);
651 for (; reg
; reg
= reg
->next
)
652 region_list_append(&alloc
->oob_list
, reg
);
657 /* Returns an ext4_inode structure for an inode number */
658 struct ext4_inode
*get_inode(u32 inode
)
661 int bg
= inode
/ info
.inodes_per_group
;
662 inode
%= info
.inodes_per_group
;
664 allocate_bg_inode_table(&aux_info
.bgs
[bg
]);
665 return (struct ext4_inode
*)(aux_info
.bgs
[bg
].inode_table
+ inode
*
669 struct ext4_xattr_header
*get_xattr_block_for_inode(struct ext4_inode
*inode
)
671 struct ext4_xattr_header
*block
= xattr_list_find(inode
);
675 u32 block_num
= allocate_block();
676 block
= calloc(info
.block_size
, 1);
678 error("get_xattr: failed to allocate %d", info
.block_size
);
682 block
->h_magic
= cpu_to_le32(EXT4_XATTR_MAGIC
);
683 block
->h_refcount
= cpu_to_le32(1);
684 block
->h_blocks
= cpu_to_le32(1);
685 inode
->i_blocks_lo
= cpu_to_le32(le32_to_cpu(inode
->i_blocks_lo
) + (info
.block_size
/ 512));
686 inode
->i_file_acl_lo
= cpu_to_le32(block_num
);
688 int result
= sparse_file_add_data(ext4_sparse_file
, block
, info
.block_size
, block_num
);
690 error("get_xattr: sparse_file_add_data failure %d", result
);
694 xattr_list_insert(inode
, block
);
698 /* Mark the first len inodes in a block group as used */
699 u32
reserve_inodes(int bg
, u32 num
)
704 if (get_free_inodes(bg
) < num
)
705 return EXT4_ALLOCATE_FAILED
;
707 for (i
= 0; i
< num
; i
++) {
708 inode
= aux_info
.bgs
[bg
].first_free_inode
+ i
- 1;
709 aux_info
.bgs
[bg
].inode_bitmap
[inode
/ 8] |= 1 << (inode
% 8);
712 inode
= aux_info
.bgs
[bg
].first_free_inode
;
714 aux_info
.bgs
[bg
].first_free_inode
+= num
;
715 aux_info
.bgs
[bg
].free_inodes
-= num
;
720 /* Returns the first free inode number
721 TODO: Inodes should be allocated in the block group of the data? */
727 for (bg
= 0; bg
< aux_info
.groups
; bg
++) {
728 inode
= reserve_inodes(bg
, 1);
729 if (inode
!= EXT4_ALLOCATE_FAILED
)
730 return bg
* info
.inodes_per_group
+ inode
;
733 return EXT4_ALLOCATE_FAILED
;
736 /* Returns the number of free inodes in a block group */
737 u32
get_free_inodes(u32 bg
)
739 return aux_info
.bgs
[bg
].free_inodes
;
742 /* Increments the directory count of the block group that contains inode */
743 void add_directory(u32 inode
)
745 int bg
= (inode
- 1) / info
.inodes_per_group
;
746 aux_info
.bgs
[bg
].used_dirs
+= 1;
749 /* Returns the number of inodes in a block group that are directories */
750 u16
get_directories(int bg
)
752 return aux_info
.bgs
[bg
].used_dirs
;
755 /* Returns the flags for a block group */
756 u16
get_bg_flags(int bg
)
758 return aux_info
.bgs
[bg
].flags
;
761 /* Frees the memory used by a linked list of allocation regions */
762 void free_alloc(struct block_allocation
*alloc
)
766 reg
= alloc
->list
.first
;
768 struct region
*next
= reg
->next
;
773 reg
= alloc
->oob_list
.first
;
775 struct region
*next
= reg
->next
;