2 * Copyright (c) 2013 Grzegorz Kostka (kostka.grzegorz@gmail.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup lwext4
33 * @file ext4_blockdev.c
34 * @brief Block device module.
37 #include "ext4_config.h"
38 #include "ext4_blockdev.h"
39 #include "ext4_errno.h"
40 #include "ext4_debug.h"
45 int ext4_block_init(struct ext4_blockdev *bdev)
50 ext4_assert(bdev->open && bdev->close && bdev->bread && bdev->bwrite);
52 /*Low level block init*/
53 rc = bdev->open(bdev);
57 bdev->flags |= EXT4_BDEV_INITIALIZED;
62 int ext4_block_bind_bcache(struct ext4_blockdev *bdev, struct ext4_bcache *bc)
64 ext4_assert(bdev && bc);
69 void ext4_block_set_lb_size(struct ext4_blockdev *bdev, uint64_t lb_bsize)
71 /*Logical block size has to be multiply of physical */
72 ext4_assert(!(lb_bsize % bdev->ph_bsize));
74 bdev->lg_bsize = lb_bsize;
75 bdev->lg_bcnt = (bdev->ph_bcnt * bdev->ph_bsize) / lb_bsize;
78 int ext4_block_fini(struct ext4_blockdev *bdev)
82 bdev->flags &= ~(EXT4_BDEV_INITIALIZED);
84 /*Low level block fini*/
85 return bdev->close(bdev);
88 int ext4_block_get_noread(struct ext4_blockdev *bdev, struct ext4_block *b,
95 ext4_assert(bdev && b);
97 if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
100 if (!(lba < bdev->lg_bcnt))
106 /*If cache is full we have to flush it anyway :(*/
107 if (ext4_bcache_is_full(bdev->bc) && bdev->cache_write_back) {
109 uint32_t free_candidate = bdev->bc->cnt;
110 uint32_t min_lru = 0xFFFFFFFF;
112 for (i = 0; i < bdev->bc->cnt; ++i) {
113 /*Check if buffer free was delayed.*/
114 if (!bdev->bc->free_delay[i])
117 /*Check reference counter.*/
118 if (bdev->bc->refctr[i])
121 if (bdev->bc->lru_id[i] < min_lru) {
122 min_lru = bdev->bc->lru_id[i];
128 if (free_candidate < bdev->bc->cnt) {
129 /*Buffer free was delayed and have no reference. Flush
131 r = ext4_blocks_set_direct(
132 bdev, bdev->bc->data +
133 bdev->bc->itemsize * free_candidate,
134 bdev->bc->lba[free_candidate], 1);
138 /*No delayed anymore*/
139 bdev->bc->free_delay[free_candidate] = 0;
141 /*Reduce reference counter*/
142 bdev->bc->ref_blocks--;
146 r = ext4_bcache_alloc(bdev->bc, b, &is_new);
156 int ext4_block_get(struct ext4_blockdev *bdev, struct ext4_block *b,
161 int r = ext4_block_get_noread(bdev, b, lba);
166 /*Block is in cache. Read from physical device is not required*/
170 pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
171 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
173 r = bdev->bread(bdev, b->data, pba, pb_cnt);
176 ext4_bcache_free(bdev->bc, b, 0);
181 ext4_bcache_set_flag(bdev->bc, b->cache_id, BC_UPTODATE);
187 int ext4_block_set(struct ext4_blockdev *bdev, struct ext4_block *b)
193 ext4_assert(bdev && b);
195 if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
198 /*Buffer is not marked dirty and is stale*/
199 if (!b->uptodate && !b->dirty)
200 ext4_bcache_clear_flag(bdev->bc, b->cache_id, BC_UPTODATE);
202 /*No need to write.*/
204 !ext4_bcache_test_flag(bdev->bc, b->cache_id, BC_DIRTY)) {
205 ext4_bcache_free(bdev->bc, b, 0);
208 ext4_bcache_set_flag(bdev->bc, b->cache_id, BC_UPTODATE);
210 /*Free cache delay mode*/
211 if (bdev->cache_write_back) {
213 /*Free cache block and mark as free delayed*/
214 return ext4_bcache_free(bdev->bc, b, bdev->cache_write_back);
217 if (bdev->bc->refctr[b->cache_id] > 1) {
218 ext4_bcache_set_flag(bdev->bc, b->cache_id, BC_DIRTY);
219 return ext4_bcache_free(bdev->bc, b, 0);
222 pba = (b->lb_id * bdev->lg_bsize) / bdev->ph_bsize;
223 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
225 r = bdev->bwrite(bdev, b->data, pba, pb_cnt);
226 ext4_bcache_clear_flag(bdev->bc, b->cache_id, BC_DIRTY);
229 ext4_bcache_clear_flag(bdev->bc, b->cache_id, BC_UPTODATE);
230 ext4_bcache_free(bdev->bc, b, 0);
236 ext4_bcache_free(bdev->bc, b, 0);
240 int ext4_blocks_get_direct(struct ext4_blockdev *bdev, void *buf, uint64_t lba,
246 ext4_assert(bdev && buf);
248 pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
249 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
252 return bdev->bread(bdev, buf, pba, pb_cnt * cnt);
255 int ext4_blocks_set_direct(struct ext4_blockdev *bdev, const void *buf,
256 uint64_t lba, uint32_t cnt)
261 ext4_assert(bdev && buf);
263 pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
264 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
268 return bdev->bwrite(bdev, buf, pba, pb_cnt * cnt);
271 int ext4_block_writebytes(struct ext4_blockdev *bdev, uint64_t off,
272 const void *buf, uint32_t len)
280 const uint8_t *p = (void *)buf;
282 ext4_assert(bdev && buf);
284 if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
287 block_idx = off / bdev->ph_bsize;
288 block_end = block_idx + len / bdev->ph_bsize;
290 if (!(block_end < bdev->ph_bcnt))
291 return EINVAL; /*Ups. Out of range operation*/
293 /*OK lets deal with the first possible unaligned block*/
294 unalg = (off & (bdev->ph_bsize - 1));
297 uint32_t wlen = (bdev->ph_bsize - unalg) > len
299 : (bdev->ph_bsize - unalg);
301 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
306 memcpy(bdev->ph_bbuf + unalg, p, wlen);
308 r = bdev->bwrite(bdev, bdev->ph_bbuf, block_idx, 1);
318 blen = len / bdev->ph_bsize;
319 r = bdev->bwrite(bdev, p, block_idx, blen);
324 p += bdev->ph_bsize * blen;
325 len -= bdev->ph_bsize * blen;
331 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
335 memcpy(bdev->ph_bbuf, p, len);
337 r = bdev->bwrite(bdev, bdev->ph_bbuf, block_idx, 1);
346 int ext4_block_readbytes(struct ext4_blockdev *bdev, uint64_t off, void *buf,
355 uint8_t *p = (void *)buf;
357 ext4_assert(bdev && buf);
359 if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
362 block_idx = off / bdev->ph_bsize;
363 block_end = block_idx + len / bdev->ph_bsize;
365 if (!(block_end < bdev->ph_bcnt))
366 return EINVAL; /*Ups. Out of range operation*/
368 /*OK lets deal with the first possible unaligned block*/
369 unalg = (off & (bdev->ph_bsize - 1));
372 uint32_t rlen = (bdev->ph_bsize - unalg) > len
374 : (bdev->ph_bsize - unalg);
376 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
380 memcpy(p, bdev->ph_bbuf + unalg, rlen);
388 blen = len / bdev->ph_bsize;
390 r = bdev->bread(bdev, p, block_idx, blen);
395 p += bdev->ph_bsize * blen;
396 len -= bdev->ph_bsize * blen;
402 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
406 memcpy(p, bdev->ph_bbuf, len);
412 int ext4_block_cache_write_back(struct ext4_blockdev *bdev, uint8_t on_off)
418 bdev->cache_write_back++;
420 if (!on_off && bdev->cache_write_back)
421 bdev->cache_write_back--;
423 /*Flush all delayed cache blocks*/
424 if (!bdev->cache_write_back) {
425 for (i = 0; i < bdev->bc->cnt; ++i) {
427 /*Check if buffer free was delayed.*/
428 if (!bdev->bc->free_delay[i])
431 /*Check reference counter.*/
432 if (bdev->bc->refctr[i])
435 /*Buffer free was delayed and have no reference. Flush
437 r = ext4_blocks_set_direct(
438 bdev, bdev->bc->data + bdev->bc->itemsize * i,
439 bdev->bc->lba[i], 1);
443 /*No delayed anymore*/
444 bdev->bc->free_delay[i] = 0;
446 /*Reduce reference counter*/
447 bdev->bc->ref_blocks--;