2 * Copyright (c) 2015 Grzegorz Kostka (kostka.grzegorz@gmail.com)
3 * Copyright (c) 2015 Kaho Ng (ngkaho1234@gmail.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "ext4_config.h"
30 #include "ext4_blockdev.h"
32 #include "ext4_super.h"
33 #include "ext4_crc32c.h"
34 #include "ext4_balloc.h"
35 #include "ext4_debug.h"
42 #include "ext4_extent.h"
44 #if CONFIG_EXTENT_FULL
47 * used by extent splitting.
49 #define EXT4_EXT_MARK_UNWRIT1 0x02 /* mark first half unwritten */
50 #define EXT4_EXT_MARK_UNWRIT2 0x04 /* mark second half unwritten */
51 #define EXT4_EXT_DATA_VALID1 0x08 /* first half contains valid data */
52 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
53 #define EXT4_EXT_NO_COMBINE 0x20 /* do not combine two extents */
55 static struct ext4_extent_tail *
56 find_ext4_extent_tail(struct ext4_extent_header *eh)
58 return (struct ext4_extent_tail *)(((char *)eh) +
59 EXT4_EXTENT_TAIL_OFFSET(eh));
62 static struct ext4_extent_header *ext_inode_hdr(struct ext4_inode *inode)
64 return (struct ext4_extent_header *)inode->blocks;
67 static struct ext4_extent_header *ext_block_hdr(struct ext4_block *block)
69 return (struct ext4_extent_header *)block->data;
72 static uint16_t ext_depth(struct ext4_inode *inode)
74 return to_le16(ext_inode_hdr(inode)->depth);
77 static uint16_t ext4_ext_get_actual_len(struct ext4_extent *ext)
79 return (to_le16(ext->block_count) <= EXT_INIT_MAX_LEN
80 ? to_le16(ext->block_count)
81 : (to_le16(ext->block_count) - EXT_INIT_MAX_LEN));
84 static void ext4_ext_mark_initialized(struct ext4_extent *ext)
86 ext->block_count = to_le16(ext4_ext_get_actual_len(ext));
89 static void ext4_ext_mark_unwritten(struct ext4_extent *ext)
91 ext->block_count |= to_le16(EXT_INIT_MAX_LEN);
94 static int ext4_ext_is_unwritten(struct ext4_extent *ext)
96 /* Extent with ee_len of 0x8000 is treated as an initialized extent */
97 return (to_le16(ext->block_count) > EXT_INIT_MAX_LEN);
102 * combine low and high parts of physical block number into ext4_fsblk_t
104 static ext4_fsblk_t ext4_ext_pblock(struct ext4_extent *ex)
108 block = to_le32(ex->start_lo);
109 block |= ((ext4_fsblk_t)to_le16(ex->start_hi) << 31) << 1;
115 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
117 static ext4_fsblk_t ext4_idx_pblock(struct ext4_extent_index *ix)
121 block = to_le32(ix->leaf_lo);
122 block |= ((ext4_fsblk_t)to_le16(ix->leaf_hi) << 31) << 1;
127 * ext4_ext_store_pblock:
128 * stores a large physical block number into an extent struct,
129 * breaking it into parts
131 static void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
133 ex->start_lo = to_le32((uint32_t)(pb & 0xffffffff));
134 ex->start_hi = to_le16((uint16_t)((pb >> 32)) & 0xffff);
138 * ext4_idx_store_pblock:
139 * stores a large physical block number into an index struct,
140 * breaking it into parts
142 static void ext4_idx_store_pblock(struct ext4_extent_index *ix, ext4_fsblk_t pb)
144 ix->leaf_lo = to_le32((uint32_t)(pb & 0xffffffff));
145 ix->leaf_hi = to_le16((uint16_t)((pb >> 32)) & 0xffff);
148 static int ext4_allocate_single_block(struct ext4_inode_ref *inode_ref,
150 ext4_fsblk_t *blockp)
152 return ext4_balloc_alloc_block(inode_ref, goal, blockp);
155 static ext4_fsblk_t ext4_new_meta_blocks(struct ext4_inode_ref *inode_ref,
157 uint32_t flags __unused,
158 uint32_t *count, int *errp)
160 ext4_fsblk_t block = 0;
162 *errp = ext4_allocate_single_block(inode_ref, goal, &block);
168 static void ext4_ext_free_blocks(struct ext4_inode_ref *inode_ref,
169 ext4_fsblk_t block, uint32_t count,
170 uint32_t flags __unused)
172 ext4_balloc_free_blocks(inode_ref, block, count);
175 static size_t ext4_ext_space_block(struct ext4_inode_ref *inode_ref)
178 uint32_t block_size = ext4_sb_get_block_size(&inode_ref->fs->sb);
180 size = (block_size - sizeof(struct ext4_extent_header)) /
181 sizeof(struct ext4_extent);
182 #ifdef AGGRESSIVE_TEST
189 static size_t ext4_ext_space_block_idx(struct ext4_inode_ref *inode_ref)
192 uint32_t block_size = ext4_sb_get_block_size(&inode_ref->fs->sb);
194 size = (block_size - sizeof(struct ext4_extent_header)) /
195 sizeof(struct ext4_extent_index);
196 #ifdef AGGRESSIVE_TEST
203 static size_t ext4_ext_space_root(struct ext4_inode_ref *inode_ref)
207 size = sizeof(inode_ref->inode->blocks);
208 size -= sizeof(struct ext4_extent_header);
209 size /= sizeof(struct ext4_extent);
210 #ifdef AGGRESSIVE_TEST
217 static size_t ext4_ext_space_root_idx(struct ext4_inode_ref *inode_ref)
221 size = sizeof(inode_ref->inode->blocks);
222 size -= sizeof(struct ext4_extent_header);
223 size /= sizeof(struct ext4_extent_index);
224 #ifdef AGGRESSIVE_TEST
231 static size_t ext4_ext_max_entries(struct ext4_inode_ref *inode_ref,
236 if (depth == ext_depth(inode_ref->inode)) {
238 max = ext4_ext_space_root(inode_ref);
240 max = ext4_ext_space_root_idx(inode_ref);
243 max = ext4_ext_space_block(inode_ref);
245 max = ext4_ext_space_block_idx(inode_ref);
251 static ext4_fsblk_t ext4_ext_find_goal(struct ext4_inode_ref *inode_ref,
252 struct ext4_extent_path *path,
256 uint32_t depth = path->depth;
257 struct ext4_extent *ex;
260 * Try to predict block placement assuming that we are
261 * filling in a file which will eventually be
262 * non-sparse --- i.e., in the case of libbfd writing
263 * an ELF object sections out-of-order but in a way
264 * the eventually results in a contiguous object or
265 * executable file, or some database extending a table
266 * space file. However, this is actually somewhat
267 * non-ideal if we are writing a sparse file such as
268 * qemu or KVM writing a raw image file that is going
269 * to stay fairly sparse, since it will end up
270 * fragmenting the file system's free space. Maybe we
271 * should have some hueristics or some way to allow
272 * userspace to pass a hint to file system,
273 * especially if the latter case turns out to be
276 ex = path[depth].extent;
278 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
279 ext4_lblk_t ext_block = to_le32(ex->first_block);
281 if (block > ext_block)
282 return ext_pblk + (block - ext_block);
284 return ext_pblk - (ext_block - block);
287 /* it looks like index is empty;
288 * try to find starting block from index itself */
289 if (path[depth].block.lb_id)
290 return path[depth].block.lb_id;
293 /* OK. use inode's group */
294 return ext4_fs_inode_to_goal_block(inode_ref);
298 * Allocation for a meta data block
300 static ext4_fsblk_t ext4_ext_new_meta_block(struct ext4_inode_ref *inode_ref,
301 struct ext4_extent_path *path,
302 struct ext4_extent *ex, int *err,
305 ext4_fsblk_t goal, newblock;
307 goal = ext4_ext_find_goal(inode_ref, path, to_le32(ex->first_block));
308 newblock = ext4_new_meta_blocks(inode_ref, goal, flags, NULL, err);
312 #if CONFIG_META_CSUM_ENABLE
313 static uint32_t ext4_ext_block_csum(struct ext4_inode_ref *inode_ref,
314 struct ext4_extent_header *eh)
316 uint32_t checksum = 0;
317 struct ext4_sblock *sb = &inode_ref->fs->sb;
319 if (ext4_sb_feature_ro_com(sb, EXT4_FRO_COM_METADATA_CSUM)) {
320 uint32_t ino_index = to_le32(inode_ref->index);
322 to_le32(ext4_inode_get_generation(inode_ref->inode));
323 /* First calculate crc32 checksum against fs uuid */
324 checksum = ext4_crc32c(EXT4_CRC32_INIT, sb->uuid,
326 /* Then calculate crc32 checksum against inode number
327 * and inode generation */
328 checksum = ext4_crc32c(checksum, &ino_index,
330 checksum = ext4_crc32c(checksum, &ino_gen,
332 /* Finally calculate crc32 checksum against
333 * the entire extent block up to the checksum field */
334 checksum = ext4_crc32c(checksum, eh,
335 EXT4_EXTENT_TAIL_OFFSET(eh));
340 #define ext4_ext_block_csum(...) 0
343 static void ext4_extent_block_csum_set(struct ext4_inode_ref *inode_ref __unused,
344 struct ext4_extent_header *eh)
346 struct ext4_extent_tail *tail;
348 tail = find_ext4_extent_tail(eh);
349 tail->et_checksum = to_le32(ext4_ext_block_csum(inode_ref, eh));
352 static int ext4_ext_dirty(struct ext4_inode_ref *inode_ref,
353 struct ext4_extent_path *path)
355 if (path->block.lb_id)
356 path->block.dirty = true;
358 inode_ref->dirty = true;
363 static void ext4_ext_drop_refs(struct ext4_inode_ref *inode_ref,
364 struct ext4_extent_path *path, bool keep_other)
375 for (i = 0; i <= depth; i++, path++) {
376 if (path->block.lb_id) {
377 if (path->block.dirty)
378 ext4_extent_block_csum_set(inode_ref,
381 ext4_block_set(inode_ref->fs->bdev, &path->block);
387 * Check that whether the basic information inside the extent header
390 static int ext4_ext_check(struct ext4_inode_ref *inode_ref,
391 struct ext4_extent_header *eh, uint16_t depth,
392 ext4_fsblk_t pblk __unused)
394 struct ext4_extent_tail *tail;
395 const char *error_msg;
398 if (to_le16(eh->magic) != EXT4_EXTENT_MAGIC) {
399 error_msg = "invalid magic";
402 if (to_le16(eh->depth) != depth) {
403 error_msg = "unexpected eh_depth";
406 if (eh->max_entries_count == 0) {
407 error_msg = "invalid eh_max";
410 if (to_le16(eh->entries_count) > to_le16(eh->max_entries_count)) {
411 error_msg = "invalid eh_entries";
415 tail = find_ext4_extent_tail(eh);
416 struct ext4_sblock *sb = &inode_ref->fs->sb;
417 if (ext4_sb_feature_ro_com(sb, EXT4_FRO_COM_METADATA_CSUM)) {
418 if (tail->et_checksum != to_le32(ext4_ext_block_csum(inode_ref, eh))) {
419 /* FIXME: Warning: extent checksum damaged? */
426 ext4_dbg(DEBUG_EXTENT, "Bad extents B+ tree block: %s. "
427 "Blocknr: %" PRId64 "\n",
432 static int read_extent_tree_block(struct ext4_inode_ref *inode_ref,
433 ext4_fsblk_t pblk, int32_t depth,
434 struct ext4_block *bh,
435 uint32_t flags __unused)
439 err = ext4_block_get(inode_ref->fs->bdev, bh, pblk);
443 err = ext4_ext_check(inode_ref, ext_block_hdr(bh), depth, pblk);
450 ext4_block_set(inode_ref->fs->bdev, bh);
456 * ext4_ext_binsearch_idx:
457 * binary search for the closest index of the given block
458 * the header must be checked before calling this
460 static void ext4_ext_binsearch_idx(struct ext4_extent_path *path,
463 struct ext4_extent_header *eh = path->header;
464 struct ext4_extent_index *r, *l, *m;
466 l = EXT_FIRST_INDEX(eh) + 1;
467 r = EXT_LAST_INDEX(eh);
470 if (block < to_le32(m->first_block))
480 * ext4_ext_binsearch:
481 * binary search for closest extent of the given block
482 * the header must be checked before calling this
484 static void ext4_ext_binsearch(struct ext4_extent_path *path, ext4_lblk_t block)
486 struct ext4_extent_header *eh = path->header;
487 struct ext4_extent *r, *l, *m;
489 if (eh->entries_count == 0) {
491 * this leaf is empty:
492 * we get such a leaf in split/add case
497 l = EXT_FIRST_EXTENT(eh) + 1;
498 r = EXT_LAST_EXTENT(eh);
502 if (block < to_le32(m->first_block))
508 path->extent = l - 1;
511 static int ext4_find_extent(struct ext4_inode_ref *inode_ref, ext4_lblk_t block,
512 struct ext4_extent_path **orig_path, uint32_t flags)
514 struct ext4_extent_header *eh;
515 struct ext4_block bh = EXT4_BLOCK_ZERO();
516 ext4_fsblk_t buf_block = 0;
517 struct ext4_extent_path *path = *orig_path;
518 int32_t depth, ppos = 0;
522 eh = ext_inode_hdr(inode_ref->inode);
523 depth = ext_depth(inode_ref->inode);
526 ext4_ext_drop_refs(inode_ref, path, 0);
527 if (depth > path[0].maxdepth) {
529 *orig_path = path = NULL;
533 int32_t path_depth = depth + 1;
534 /* account possible depth increase */
535 path = calloc(1, sizeof(struct ext4_extent_path) *
539 path[0].maxdepth = path_depth;
545 /* walk through the tree */
547 ext4_ext_binsearch_idx(path + ppos, block);
548 path[ppos].p_block = ext4_idx_pblock(path[ppos].index);
549 path[ppos].depth = i;
550 path[ppos].extent = NULL;
551 buf_block = path[ppos].p_block;
555 if (!path[ppos].block.lb_id ||
556 path[ppos].block.lb_id != buf_block) {
557 ret = read_extent_tree_block(inode_ref, buf_block, i,
563 ext4_block_set(inode_ref->fs->bdev, &bh);
568 eh = ext_block_hdr(&bh);
569 path[ppos].block = bh;
570 path[ppos].header = eh;
574 path[ppos].depth = i;
575 path[ppos].extent = NULL;
576 path[ppos].index = NULL;
579 ext4_ext_binsearch(path + ppos, block);
580 /* if not an empty leaf */
581 if (path[ppos].extent)
582 path[ppos].p_block = ext4_ext_pblock(path[ppos].extent);
590 ext4_ext_drop_refs(inode_ref, path, 0);
597 static void ext4_ext_init_header(struct ext4_inode_ref *inode_ref,
598 struct ext4_extent_header *eh, int32_t depth)
600 eh->entries_count = 0;
601 eh->max_entries_count = to_le16(ext4_ext_max_entries(inode_ref, depth));
602 eh->magic = to_le16(EXT4_EXTENT_MAGIC);
607 * Be cautious, the buffer_head returned is not yet mark dirtied. */
608 static int ext4_ext_split_node(struct ext4_inode_ref *inode_ref,
609 struct ext4_extent_path *path, int32_t at,
610 struct ext4_extent *newext,
611 ext4_fsblk_t *sibling, struct ext4_block *new_bh)
614 ext4_fsblk_t newblock;
615 struct ext4_block bh = EXT4_BLOCK_ZERO();
616 int32_t depth = ext_depth(inode_ref->inode);
618 ext4_assert(sibling);
620 /* FIXME: currently we split at the point after the current extent. */
621 newblock = ext4_ext_new_meta_block(inode_ref, path, newext, &ret, 0);
625 /* For write access.# */
626 ret = ext4_block_get(inode_ref->fs->bdev, &bh, newblock);
631 /* start copy from next extent */
632 ptrdiff_t m = EXT_MAX_EXTENT(path[at].header) - path[at].extent;
633 struct ext4_extent_header *neh;
634 neh = ext_block_hdr(&bh);
635 ext4_ext_init_header(inode_ref, neh, 0);
637 struct ext4_extent *ex;
638 ex = EXT_FIRST_EXTENT(neh);
639 memmove(ex, path[at].extent + 1,
640 sizeof(struct ext4_extent) * m);
642 to_le16(to_le16(neh->entries_count) + m);
643 path[at].header->entries_count = to_le16(
644 to_le16(path[at].header->entries_count) - m);
645 ret = ext4_ext_dirty(inode_ref, path + at);
650 ptrdiff_t m = EXT_MAX_INDEX(path[at].header) - path[at].index;
651 struct ext4_extent_header *neh;
652 neh = ext_block_hdr(&bh);
653 ext4_ext_init_header(inode_ref, neh, depth - at);
655 struct ext4_extent_index *ix;
656 ix = EXT_FIRST_INDEX(neh);
657 memmove(ix, path[at].index + 1,
658 sizeof(struct ext4_extent) * m);
660 to_le16(to_le16(neh->entries_count) + m);
661 path[at].header->entries_count = to_le16(
662 to_le16(path[at].header->entries_count) - m);
663 ret = ext4_ext_dirty(inode_ref, path + at);
671 ext4_block_set(inode_ref->fs->bdev, &bh);
674 ext4_ext_free_blocks(inode_ref, newblock, 1, 0);
683 static ext4_lblk_t ext4_ext_block_index(struct ext4_extent_header *eh)
686 return to_le32(EXT_FIRST_INDEX(eh)->first_block);
688 return to_le32(EXT_FIRST_EXTENT(eh)->first_block);
691 struct ext_split_trans {
693 struct ext4_extent_path path;
697 static int ext4_ext_insert_index(struct ext4_inode_ref *inode_ref,
698 struct ext4_extent_path *path,
700 struct ext4_extent *newext,
701 ext4_lblk_t insert_index,
702 ext4_fsblk_t insert_block,
703 struct ext_split_trans *spt,
706 struct ext4_extent_index *ix;
707 struct ext4_extent_path *curp = path + at;
708 struct ext4_block bh = EXT4_BLOCK_ZERO();
711 struct ext4_extent_header *eh;
715 if (curp->index && insert_index == to_le32(curp->index->first_block))
718 if (to_le16(curp->header->entries_count) ==
719 to_le16(curp->header->max_entries_count)) {
721 struct ext4_extent_header *neh;
722 err = ext4_ext_split_node(inode_ref, path, at, newext,
727 neh = ext_block_hdr(&bh);
728 if (insert_index > to_le32(curp->index->first_block)) {
729 /* Make decision which node should be used to
730 * insert the index.*/
731 if (to_le16(neh->entries_count) >
732 to_le16(curp->header->entries_count)) {
735 ix = EXT_LAST_INDEX(eh) + 1;
738 ix = EXT_FIRST_INDEX(eh);
743 ix = EXT_LAST_INDEX(eh);
752 if (curp->index == NULL) {
753 ix = EXT_FIRST_INDEX(eh);
755 } else if (insert_index > to_le32(curp->index->first_block)) {
757 ix = curp->index + 1;
764 len = EXT_LAST_INDEX(eh) - ix + 1;
765 ext4_assert(len >= 0);
767 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_index));
769 if (ix > EXT_MAX_INDEX(eh)) {
774 ix->first_block = to_le32(insert_index);
775 ext4_idx_store_pblock(ix, insert_block);
776 eh->entries_count = to_le16(to_le16(eh->entries_count) + 1);
778 if (ix > EXT_LAST_INDEX(eh)) {
783 if (eh == curp->header)
784 err = ext4_ext_dirty(inode_ref, curp);
789 if (err != EOK || *need_grow) {
791 ext4_block_set(inode_ref->fs->bdev, &bh);
794 } else if (bh.lb_id) {
795 /* If we got a sibling leaf. */
796 ext4_extent_block_csum_set(inode_ref, ext_block_hdr(&bh));
799 spt->path.p_block = ext4_idx_pblock(ix);
800 spt->path.depth = to_le16(eh->depth);
801 spt->path.maxdepth = 0;
802 spt->path.extent = NULL;
803 spt->path.index = ix;
804 spt->path.header = eh;
805 spt->path.block = bh;
808 * If newext->ee_block can be included into the
811 if (to_le32(newext->first_block) >=
812 ext4_ext_block_index(ext_block_hdr(&bh)))
816 curp->p_block = ext4_idx_pblock(ix);
822 curp->p_block = ext4_idx_pblock(ix);
828 * ext4_ext_correct_indexes:
829 * if leaf gets modified and modified extent is first in the leaf,
830 * then we have to correct all indexes above.
832 static int ext4_ext_correct_indexes(struct ext4_inode_ref *inode_ref,
833 struct ext4_extent_path *path)
835 struct ext4_extent_header *eh;
836 int32_t depth = ext_depth(inode_ref->inode);
837 struct ext4_extent *ex;
842 eh = path[depth].header;
843 ex = path[depth].extent;
845 if (ex == NULL || eh == NULL) {
850 /* there is no tree at all */
854 if (ex != EXT_FIRST_EXTENT(eh)) {
855 /* we correct tree if first leaf got modified only */
860 * TODO: we need correction if border is smaller than current one
863 border = path[depth].extent->first_block;
864 path[k].index->first_block = border;
865 err = ext4_ext_dirty(inode_ref, path + k);
870 /* change all left-side indexes */
871 if (path[k + 1].index != EXT_FIRST_INDEX(path[k + 1].header))
873 path[k].index->first_block = border;
874 err = ext4_ext_dirty(inode_ref, path + k);
882 static bool ext4_ext_can_prepend(struct ext4_extent *ex1,
883 struct ext4_extent *ex2)
885 if (ext4_ext_pblock(ex2) + ext4_ext_get_actual_len(ex2) !=
886 ext4_ext_pblock(ex1))
889 #ifdef AGGRESSIVE_TEST
890 if (ext4_ext_get_actual_len(ex1) + ext4_ext_get_actual_len(ex2) > 4)
893 if (ext4_ext_is_unwritten(ex1)) {
894 if (ext4_ext_get_actual_len(ex1) +
895 ext4_ext_get_actual_len(ex2) >
896 EXT_UNWRITTEN_MAX_LEN)
898 } else if (ext4_ext_get_actual_len(ex1) + ext4_ext_get_actual_len(ex2) >
903 if (to_le32(ex2->first_block) + ext4_ext_get_actual_len(ex2) !=
904 to_le32(ex1->first_block))
910 static bool ext4_ext_can_append(struct ext4_extent *ex1,
911 struct ext4_extent *ex2)
913 if (ext4_ext_pblock(ex1) + ext4_ext_get_actual_len(ex1) !=
914 ext4_ext_pblock(ex2))
917 #ifdef AGGRESSIVE_TEST
918 if (ext4_ext_get_actual_len(ex1) + ext4_ext_get_actual_len(ex2) > 4)
921 if (ext4_ext_is_unwritten(ex1)) {
922 if (ext4_ext_get_actual_len(ex1) +
923 ext4_ext_get_actual_len(ex2) >
924 EXT_UNWRITTEN_MAX_LEN)
926 } else if (ext4_ext_get_actual_len(ex1) + ext4_ext_get_actual_len(ex2) >
931 if (to_le32(ex1->first_block) + ext4_ext_get_actual_len(ex1) !=
932 to_le32(ex2->first_block))
938 static int ext4_ext_insert_leaf(struct ext4_inode_ref *inode_ref,
939 struct ext4_extent_path *path,
941 struct ext4_extent *newext,
942 struct ext_split_trans *spt,
946 struct ext4_extent_path *curp = path + at;
947 struct ext4_extent *ex = curp->extent;
948 struct ext4_block bh = EXT4_BLOCK_ZERO();
952 struct ext4_extent_header *eh = NULL;
957 to_le32(newext->first_block) == to_le32(curp->extent->first_block))
960 if (!(flags & EXT4_EXT_NO_COMBINE)) {
961 if (curp->extent && ext4_ext_can_append(curp->extent, newext)) {
962 unwritten = ext4_ext_is_unwritten(curp->extent);
963 curp->extent->block_count =
964 to_le16(ext4_ext_get_actual_len(curp->extent) +
965 ext4_ext_get_actual_len(newext));
967 ext4_ext_mark_unwritten(curp->extent);
968 err = ext4_ext_dirty(inode_ref, curp);
973 ext4_ext_can_prepend(curp->extent, newext)) {
974 unwritten = ext4_ext_is_unwritten(curp->extent);
975 curp->extent->first_block = newext->first_block;
976 curp->extent->block_count =
977 to_le16(ext4_ext_get_actual_len(curp->extent) +
978 ext4_ext_get_actual_len(newext));
980 ext4_ext_mark_unwritten(curp->extent);
981 err = ext4_ext_dirty(inode_ref, curp);
986 if (to_le16(curp->header->entries_count) ==
987 to_le16(curp->header->max_entries_count)) {
989 struct ext4_extent_header *neh;
990 err = ext4_ext_split_node(inode_ref, path, at, newext,
995 neh = ext_block_hdr(&bh);
996 if (to_le32(newext->first_block) >
997 to_le32(curp->extent->first_block)) {
998 if (to_le16(neh->entries_count) >
999 to_le16(curp->header->entries_count)) {
1002 ex = EXT_LAST_EXTENT(eh) + 1;
1005 ex = EXT_FIRST_EXTENT(eh);
1010 ex = EXT_LAST_EXTENT(eh);
1019 if (curp->extent == NULL) {
1020 ex = EXT_FIRST_EXTENT(eh);
1022 } else if (to_le32(newext->first_block) >
1023 to_le32(curp->extent->first_block)) {
1025 ex = curp->extent + 1;
1032 len = EXT_LAST_EXTENT(eh) - ex + 1;
1033 ext4_assert(len >= 0);
1035 memmove(ex + 1, ex, len * sizeof(struct ext4_extent));
1037 if (ex > EXT_MAX_EXTENT(eh)) {
1042 ex->first_block = newext->first_block;
1043 ex->block_count = newext->block_count;
1044 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
1045 eh->entries_count = to_le16(to_le16(eh->entries_count) + 1);
1047 if (ex > EXT_LAST_EXTENT(eh)) {
1052 if (eh == curp->header) {
1053 err = ext4_ext_correct_indexes(inode_ref, path);
1056 err = ext4_ext_dirty(inode_ref, curp);
1061 if (err != EOK || *need_grow) {
1063 ext4_block_set(inode_ref->fs->bdev, &bh);
1066 } else if (bh.lb_id) {
1067 /* If we got a sibling leaf. */
1068 ext4_extent_block_csum_set(inode_ref, ext_block_hdr(&bh));
1071 spt->path.p_block = ext4_ext_pblock(ex);
1072 spt->path.depth = to_le16(eh->depth);
1073 spt->path.maxdepth = 0;
1074 spt->path.extent = ex;
1075 spt->path.index = NULL;
1076 spt->path.header = eh;
1077 spt->path.block = bh;
1080 * If newext->ee_block can be included into the
1083 if (to_le32(newext->first_block) >=
1084 ext4_ext_block_index(ext_block_hdr(&bh)))
1088 curp->p_block = ext4_ext_pblock(ex);
1094 curp->p_block = ext4_ext_pblock(ex);
1101 * ext4_ext_grow_indepth:
1102 * implements tree growing procedure:
1103 * - allocates new block
1104 * - moves top-level data (index block or leaf) into the new block
1105 * - initializes new top-level, creating index that points to the
1106 * just created block
1108 static int ext4_ext_grow_indepth(struct ext4_inode_ref *inode_ref,
1111 struct ext4_extent_header *neh;
1112 struct ext4_block bh = EXT4_BLOCK_ZERO();
1113 ext4_fsblk_t newblock, goal = 0;
1116 /* Try to prepend new index to old one */
1117 if (ext_depth(inode_ref->inode))
1118 goal = ext4_idx_pblock(
1119 EXT_FIRST_INDEX(ext_inode_hdr(inode_ref->inode)));
1121 goal = ext4_fs_inode_to_goal_block(inode_ref);
1123 newblock = ext4_new_meta_blocks(inode_ref, goal, flags, NULL, &err);
1128 err = ext4_block_get(inode_ref->fs->bdev, &bh, newblock);
1130 ext4_ext_free_blocks(inode_ref, newblock, 1, 0);
1134 /* move top-level index/leaf into new block */
1135 memmove(bh.data, inode_ref->inode->blocks,
1136 sizeof(inode_ref->inode->blocks));
1138 /* set size of new block */
1139 neh = ext_block_hdr(&bh);
1140 /* old root could have indexes or leaves
1141 * so calculate e_max right way */
1142 if (ext_depth(inode_ref->inode))
1143 neh->max_entries_count =
1144 to_le16(ext4_ext_space_block_idx(inode_ref));
1146 neh->max_entries_count =
1147 to_le16(ext4_ext_space_block(inode_ref));
1149 neh->magic = to_le16(EXT4_EXTENT_MAGIC);
1151 /* Update top-level index: num,max,pointer */
1152 neh = ext_inode_hdr(inode_ref->inode);
1153 neh->entries_count = to_le16(1);
1154 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1155 if (neh->depth == 0) {
1156 /* Root extent block becomes index block */
1157 neh->max_entries_count =
1158 to_le16(ext4_ext_space_root_idx(inode_ref));
1159 EXT_FIRST_INDEX(neh)
1160 ->first_block = EXT_FIRST_EXTENT(neh)->first_block;
1162 neh->depth = to_le16(to_le16(neh->depth) + 1);
1164 ext4_extent_block_csum_set(inode_ref, neh);
1166 inode_ref->dirty = true;
1167 ext4_block_set(inode_ref->fs->bdev, &bh);
1172 __unused static void print_path(struct ext4_extent_path *path)
1174 int32_t i = path->depth;
1179 ? (path->extent - EXT_FIRST_EXTENT(path->header))
1183 ? (path->index - EXT_FIRST_INDEX(path->header))
1188 ext4_dbg(DEBUG_EXTENT,
1189 "depth %" PRId32 ", p_block: %" PRIu64 ","
1190 "p_ext offset: %td, p_idx offset: %td\n",
1191 i, path->p_block, a, b);
1197 static void ext4_ext_replace_path(struct ext4_inode_ref *inode_ref,
1198 struct ext4_extent_path *path,
1199 struct ext_split_trans *spt,
1202 int32_t depth = ext_depth(inode_ref->inode);
1203 int32_t i = depth - level;
1204 ext4_ext_drop_refs(inode_ref, path + i, 1);
1205 path[i] = spt[level].path;
1208 static int ext4_ext_insert_extent(struct ext4_inode_ref *inode_ref,
1209 struct ext4_extent_path **ppath,
1210 struct ext4_extent *newext, uint32_t flags)
1212 int32_t i, depth, level;
1214 ext4_fsblk_t ptr = 0;
1215 bool need_grow = false;
1216 struct ext4_extent_path *path = *ppath;
1217 struct ext_split_trans *spt = NULL;
1218 struct ext_split_trans newblock;
1220 memset(&newblock, 0, sizeof(newblock));
1222 depth = ext_depth(inode_ref->inode);
1223 for (i = depth, level = 0; i >= 0; i--, level++)
1224 if (EXT_HAS_FREE_INDEX(path + i))
1228 spt = calloc(1, sizeof(struct ext_split_trans) * (level));
1236 depth = ext_depth(inode_ref->inode);
1240 ret = ext4_ext_insert_leaf(inode_ref, path, depth - i,
1241 newext, &newblock, flags,
1244 ret = ext4_ext_insert_index(
1245 inode_ref, path, depth - i, newext,
1246 ext4_ext_block_index(
1247 ext_block_hdr(&spt[i - 1].path.block)),
1248 spt[i - 1].ptr, &newblock,
1256 else if (spt && ptr && !ret) {
1257 /* Prepare for the next iteration after splitting. */
1262 } while (ptr != 0 && i <= depth);
1265 ret = ext4_ext_grow_indepth(inode_ref, 0);
1268 ret = ext4_find_extent(inode_ref, to_le32(newext->first_block),
1279 ext4_ext_drop_refs(inode_ref, path, 0);
1281 while (--level >= 0 && spt) {
1282 if (spt[level].ptr) {
1283 ext4_ext_free_blocks(inode_ref, spt[level].ptr,
1285 ext4_ext_drop_refs(inode_ref, &spt[level].path,
1290 while (--level >= 0 && spt) {
1291 if (spt[level].switch_to)
1292 ext4_ext_replace_path(inode_ref, path, spt,
1294 else if (spt[level].ptr)
1295 ext4_ext_drop_refs(inode_ref, &spt[level].path,
1305 static void ext4_ext_remove_blocks(struct ext4_inode_ref *inode_ref,
1306 struct ext4_extent *ex, ext4_lblk_t from,
1309 ext4_lblk_t len = to - from + 1;
1312 num = from - to_le32(ex->first_block);
1313 start = ext4_ext_pblock(ex) + num;
1314 ext4_dbg(DEBUG_EXTENT,
1315 "Freeing %" PRIu32 " at %" PRIu64 ", %" PRIu32 "\n", from,
1318 ext4_ext_free_blocks(inode_ref, start, len, 0);
1321 static int ext4_ext_remove_idx(struct ext4_inode_ref *inode_ref,
1322 struct ext4_extent_path *path, int32_t depth)
1328 /* free index block */
1329 leaf = ext4_idx_pblock(path[i].index);
1331 if (path[i].index != EXT_LAST_INDEX(path[i].header)) {
1332 ptrdiff_t len = EXT_LAST_INDEX(path[i].header) - path[i].index;
1333 memmove(path[i].index, path[i].index + 1,
1334 len * sizeof(struct ext4_extent_index));
1337 path[i].header->entries_count =
1338 to_le16(to_le16(path[i].header->entries_count) - 1);
1339 err = ext4_ext_dirty(inode_ref, path + i);
1343 ext4_dbg(DEBUG_EXTENT, "IDX: Freeing %" PRIu32 " at %" PRIu64 ", %d\n",
1344 to_le32(path[i].index->first_block), leaf, 1);
1345 ext4_ext_free_blocks(inode_ref, leaf, 1, 0);
1348 if (path[i].index != EXT_FIRST_INDEX(path[i].header))
1351 path[i - 1].index->first_block = path[i].index->first_block;
1352 err = ext4_ext_dirty(inode_ref, path + i - 1);
1361 static int ext4_ext_remove_leaf(struct ext4_inode_ref *inode_ref,
1362 struct ext4_extent_path *path, ext4_lblk_t from,
1366 int32_t depth = ext_depth(inode_ref->inode);
1367 struct ext4_extent *ex = path[depth].extent;
1368 struct ext4_extent *start_ex, *ex2 = NULL;
1369 struct ext4_extent_header *eh = path[depth].header;
1372 uint16_t new_entries;
1375 new_entries = to_le16(eh->entries_count);
1376 while (ex <= EXT_LAST_EXTENT(path[depth].header) &&
1377 to_le32(ex->first_block) <= to) {
1378 int32_t new_len = 0;
1380 ext4_lblk_t start, new_start;
1381 ext4_fsblk_t newblock;
1382 new_start = start = to_le32(ex->first_block);
1383 len = ext4_ext_get_actual_len(ex);
1384 newblock = ext4_ext_pblock(ex);
1386 len -= from - start;
1387 new_len = from - start;
1391 if (start + len - 1 > to) {
1392 len -= start + len - 1 - to;
1393 new_len = start + len - 1 - to;
1395 newblock += to + 1 - start;
1400 ext4_ext_remove_blocks(inode_ref, ex, start, start + len - 1);
1401 ex->first_block = to_le32(new_start);
1405 unwritten = ext4_ext_is_unwritten(ex);
1406 ex->block_count = to_le16(new_len);
1407 ext4_ext_store_pblock(ex, newblock);
1409 ext4_ext_mark_unwritten(ex);
1418 if (ex2 <= EXT_LAST_EXTENT(eh))
1419 memmove(start_ex, ex2, EXT_LAST_EXTENT(eh) - ex2 + 1);
1421 eh->entries_count = to_le16(new_entries);
1422 ext4_ext_dirty(inode_ref, path + depth);
1423 if (path[depth].extent == EXT_FIRST_EXTENT(eh) && eh->entries_count)
1424 err = ext4_ext_correct_indexes(inode_ref, path);
1426 /* if this leaf is free, then we should
1427 * remove it from index block above */
1428 if (err == EOK && eh->entries_count == 0 && path[depth].block.lb_id)
1429 err = ext4_ext_remove_idx(inode_ref, path, depth - 1);
1431 path[depth - 1].index++;
1436 static bool ext4_ext_more_to_rm(struct ext4_extent_path *path, ext4_lblk_t to)
1438 if (!to_le16(path->header->entries_count))
1441 if (path->index > EXT_LAST_INDEX(path->header))
1444 if (to_le32(path->index->first_block) > to)
1450 int ext4_extent_remove_space(struct ext4_inode_ref *inode_ref, ext4_lblk_t from,
1453 struct ext4_extent_path *path = NULL;
1455 int32_t depth = ext_depth(inode_ref->inode);
1458 ret = ext4_find_extent(inode_ref, from, &path, 0);
1462 if (!path[depth].extent) {
1467 bool in_range = IN_RANGE(from, to_le32(path[depth].extent->first_block),
1468 ext4_ext_get_actual_len(path[depth].extent));
1475 /* If we do remove_space inside the range of an extent */
1476 if ((to_le32(path[depth].extent->first_block) < from) &&
1477 (to < to_le32(path[depth].extent->first_block) +
1478 ext4_ext_get_actual_len(path[depth].extent) - 1)) {
1480 struct ext4_extent *ex = path[depth].extent, newex;
1481 int unwritten = ext4_ext_is_unwritten(ex);
1482 ext4_lblk_t ee_block = to_le32(ex->first_block);
1483 int32_t len = ext4_ext_get_actual_len(ex);
1484 ext4_fsblk_t newblock =
1485 to + 1 - ee_block + ext4_ext_pblock(ex);
1487 ex->block_count = to_le16(from - ee_block);
1489 ext4_ext_mark_unwritten(ex);
1491 ext4_ext_dirty(inode_ref, path + depth);
1493 newex.first_block = to_le32(to + 1);
1494 newex.block_count = to_le16(ee_block + len - 1 - to);
1495 ext4_ext_store_pblock(&newex, newblock);
1497 ext4_ext_mark_unwritten(&newex);
1499 ret = ext4_ext_insert_extent(inode_ref, &path, &newex, 0);
1506 struct ext4_extent_header *eh;
1507 struct ext4_extent *first_ex, *last_ex;
1508 ext4_lblk_t leaf_from, leaf_to;
1509 eh = path[i].header;
1510 ext4_assert(to_le16(eh->entries_count) > 0);
1511 first_ex = EXT_FIRST_EXTENT(eh);
1512 last_ex = EXT_LAST_EXTENT(eh);
1513 leaf_from = to_le32(first_ex->first_block);
1514 leaf_to = to_le32(last_ex->first_block) +
1515 ext4_ext_get_actual_len(last_ex) - 1;
1516 if (leaf_from < from)
1522 ext4_ext_remove_leaf(inode_ref, path, leaf_from,
1524 ext4_ext_drop_refs(inode_ref, path + i, 0);
1529 struct ext4_extent_header *eh;
1530 eh = path[i].header;
1531 if (ext4_ext_more_to_rm(path + i, to)) {
1532 struct ext4_block bh = EXT4_BLOCK_ZERO();
1533 if (path[i + 1].block.lb_id)
1534 ext4_ext_drop_refs(inode_ref, path + i + 1, 0);
1536 ret = read_extent_tree_block(inode_ref,
1537 ext4_idx_pblock(path[i].index),
1538 depth - i - 1, &bh, 0);
1543 ext4_idx_pblock(path[i].index);
1544 path[i + 1].block = bh;
1545 path[i + 1].header = ext_block_hdr(&bh);
1546 path[i + 1].depth = depth - i - 1;
1548 path[i + 1].extent = EXT_FIRST_EXTENT(
1549 path[i + 1].header);
1552 EXT_FIRST_INDEX(path[i + 1].header);
1557 if (!eh->entries_count)
1558 ret = ext4_ext_remove_idx(inode_ref, path,
1561 path[i - 1].index++;
1566 ext4_block_set(inode_ref->fs->bdev,
1575 /* TODO: flexible tree reduction should be here */
1576 if (path->header->entries_count == 0) {
1578 * truncate to zero freed all the tree,
1579 * so we need to correct eh_depth
1581 ext_inode_hdr(inode_ref->inode)->depth = 0;
1582 ext_inode_hdr(inode_ref->inode)->max_entries_count =
1583 to_le16(ext4_ext_space_root(inode_ref));
1584 ret = ext4_ext_dirty(inode_ref, path);
1588 ext4_ext_drop_refs(inode_ref, path, 0);
1594 static int ext4_ext_split_extent_at(struct ext4_inode_ref *inode_ref,
1595 struct ext4_extent_path **ppath,
1596 ext4_lblk_t split, uint32_t split_flag)
1598 struct ext4_extent *ex, newex;
1599 ext4_fsblk_t newblock;
1600 ext4_lblk_t ee_block;
1602 int32_t depth = ext_depth(inode_ref->inode);
1605 ex = (*ppath)[depth].extent;
1606 ee_block = to_le32(ex->first_block);
1607 ee_len = ext4_ext_get_actual_len(ex);
1608 newblock = split - ee_block + ext4_ext_pblock(ex);
1610 if (split == ee_block) {
1612 * case b: block @split is the block that the extent begins with
1613 * then we just change the state of the extent, and splitting
1616 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
1617 ext4_ext_mark_unwritten(ex);
1619 ext4_ext_mark_initialized(ex);
1621 err = ext4_ext_dirty(inode_ref, *ppath + depth);
1625 ex->block_count = to_le16(split - ee_block);
1626 if (split_flag & EXT4_EXT_MARK_UNWRIT1)
1627 ext4_ext_mark_unwritten(ex);
1629 err = ext4_ext_dirty(inode_ref, *ppath + depth);
1633 newex.first_block = to_le32(split);
1634 newex.block_count = to_le16(ee_len - (split - ee_block));
1635 ext4_ext_store_pblock(&newex, newblock);
1636 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
1637 ext4_ext_mark_unwritten(&newex);
1638 err = ext4_ext_insert_extent(inode_ref, ppath, &newex,
1639 EXT4_EXT_NO_COMBINE);
1641 goto restore_extent_len;
1646 ex->block_count = to_le16(ee_len);
1647 err = ext4_ext_dirty(inode_ref, *ppath + depth);
1651 static int ext4_ext_convert_to_initialized(struct ext4_inode_ref *inode_ref,
1652 struct ext4_extent_path **ppath,
1653 ext4_lblk_t split, uint32_t blocks)
1655 int32_t depth = ext_depth(inode_ref->inode), err = EOK;
1656 struct ext4_extent *ex = (*ppath)[depth].extent;
1658 ext4_assert(to_le32(ex->first_block) <= split);
1660 if (split + blocks ==
1661 to_le32(ex->first_block) + ext4_ext_get_actual_len(ex)) {
1662 /* split and initialize right part */
1663 err = ext4_ext_split_extent_at(inode_ref, ppath, split,
1664 EXT4_EXT_MARK_UNWRIT1);
1665 } else if (to_le32(ex->first_block) == split) {
1666 /* split and initialize left part */
1667 err = ext4_ext_split_extent_at(inode_ref, ppath, split + blocks,
1668 EXT4_EXT_MARK_UNWRIT2);
1670 /* split 1 extent to 3 and initialize the 2nd */
1671 err = ext4_ext_split_extent_at(inode_ref, ppath, split + blocks,
1672 EXT4_EXT_MARK_UNWRIT1 |
1673 EXT4_EXT_MARK_UNWRIT2);
1675 err = ext4_ext_split_extent_at(inode_ref, ppath, split,
1676 EXT4_EXT_MARK_UNWRIT1);
1683 static ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_extent_path *path)
1687 depth = path->depth;
1689 if (depth == 0 && path->extent == NULL)
1690 return EXT_MAX_BLOCKS;
1692 while (depth >= 0) {
1693 if (depth == path->depth) {
1695 if (path[depth].extent &&
1696 path[depth].extent !=
1697 EXT_LAST_EXTENT(path[depth].header))
1699 path[depth].extent[1].first_block);
1702 if (path[depth].index !=
1703 EXT_LAST_INDEX(path[depth].header))
1705 path[depth].index[1].first_block);
1710 return EXT_MAX_BLOCKS;
1713 static int ext4_ext_zero_unwritten_range(struct ext4_inode_ref *inode_ref,
1715 uint32_t blocks_count)
1719 uint32_t block_size = ext4_sb_get_block_size(&inode_ref->fs->sb);
1720 for (i = 0; i < blocks_count; i++) {
1721 struct ext4_block bh = EXT4_BLOCK_ZERO();
1722 err = ext4_block_get(inode_ref->fs->bdev, &bh, block + i);
1726 memset(bh.data, 0, block_size);
1728 err = ext4_block_set(inode_ref->fs->bdev, &bh);
1735 int ext4_extent_get_blocks(struct ext4_inode_ref *inode_ref, ext4_fsblk_t iblock,
1736 uint32_t max_blocks, ext4_fsblk_t *result, bool create,
1737 uint32_t *blocks_count)
1739 struct ext4_extent_path *path = NULL;
1740 struct ext4_extent newex, *ex;
1744 uint32_t allocated = 0;
1745 ext4_fsblk_t next, newblock;
1753 /* find extent for this block */
1754 err = ext4_find_extent(inode_ref, iblock, &path, 0);
1760 depth = ext_depth(inode_ref->inode);
1763 * consistent leaf must not be empty
1764 * this situations is possible, though, _during_ tree modification
1765 * this is why assert can't be put in ext4_ext_find_extent()
1767 ex = path[depth].extent;
1769 ext4_lblk_t ee_block = to_le32(ex->first_block);
1770 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
1771 uint16_t ee_len = ext4_ext_get_actual_len(ex);
1772 /* if found exent covers block, simple return it */
1773 if (IN_RANGE(iblock, ee_block, ee_len)) {
1774 /* number of remain blocks in the extent */
1775 allocated = ee_len - (iblock - ee_block);
1777 if (!ext4_ext_is_unwritten(ex)) {
1778 newblock = iblock - ee_block + ee_start;
1787 uint32_t zero_range;
1788 zero_range = allocated;
1789 if (zero_range > max_blocks)
1790 zero_range = max_blocks;
1792 newblock = iblock - ee_block + ee_start;
1793 err = ext4_ext_zero_unwritten_range(inode_ref, newblock,
1798 err = ext4_ext_convert_to_initialized(inode_ref, &path,
1799 iblock, zero_range);
1808 * requested block isn't allocated yet
1809 * we couldn't try to create block if create flag is zero
1815 /* find next allocated block so that we know how many
1816 * blocks we can allocate without ovelapping next extent */
1817 next = ext4_ext_next_allocated_block(path);
1818 allocated = next - iblock;
1819 if (allocated > max_blocks)
1820 allocated = max_blocks;
1822 /* allocate new block */
1823 goal = ext4_ext_find_goal(inode_ref, path, iblock);
1824 newblock = ext4_new_meta_blocks(inode_ref, goal, 0, &allocated, &err);
1828 /* try to insert new extent into found leaf and return */
1829 newex.first_block = to_le32(iblock);
1830 ext4_ext_store_pblock(&newex, newblock);
1831 newex.block_count = to_le16(allocated);
1832 err = ext4_ext_insert_extent(inode_ref, &path, &newex, 0);
1834 /* free data blocks we just allocated */
1835 ext4_ext_free_blocks(inode_ref, ext4_ext_pblock(&newex),
1836 to_le16(newex.block_count), 0);
1840 /* previous routine could use block we allocated */
1841 newblock = ext4_ext_pblock(&newex);
1844 if (allocated > max_blocks)
1845 allocated = max_blocks;
1851 *blocks_count = allocated;
1855 ext4_ext_drop_refs(inode_ref, path, 0);