2 * Copyright (c) 2013 Grzegorz Kostka (kostka.grzegorz@gmail.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup lwext4
33 * @file ext4_blockdev.c
34 * @brief Block device module.
37 #include "ext4_config.h"
38 #include "ext4_blockdev.h"
39 #include "ext4_errno.h"
40 #include "ext4_debug.h"
45 int ext4_block_init(struct ext4_blockdev *bdev)
50 ext4_assert(bdev->open && bdev->close && bdev->bread && bdev->bwrite);
52 /*Low level block init*/
53 rc = bdev->open(bdev);
57 bdev->flags |= EXT4_BDEV_INITIALIZED;
62 int ext4_block_bind_bcache(struct ext4_blockdev *bdev, struct ext4_bcache *bc)
64 ext4_assert(bdev && bc);
70 void ext4_block_set_lb_size(struct ext4_blockdev *bdev, uint64_t lb_bsize)
72 /*Logical block size has to be multiply of physical */
73 ext4_assert(!(lb_bsize % bdev->ph_bsize));
75 bdev->lg_bsize = lb_bsize;
76 bdev->lg_bcnt = (bdev->ph_bcnt * bdev->ph_bsize) / lb_bsize;
79 int ext4_block_fini(struct ext4_blockdev *bdev)
83 bdev->flags &= ~(EXT4_BDEV_INITIALIZED);
85 /*Low level block fini*/
86 return bdev->close(bdev);
89 int ext4_block_flush_buf(struct ext4_blockdev *bdev, struct ext4_buf *buf)
92 struct ext4_bcache *bc = bdev->bc;
93 /*Only flushing unreferenced buffer is allowed.*/
94 ext4_assert(!buf->refctr);
96 if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
97 r = ext4_blocks_set_direct(bdev, buf->data, buf->lba, 1);
101 buf->end_write(bc, buf, r, buf->end_write_arg);
106 ext4_bcache_remove_dirty_node(bc, buf);
107 ext4_bcache_clear_flag(buf, BC_DIRTY);
109 buf->end_write(bc, buf, r, buf->end_write_arg);
115 int ext4_block_cache_shake(struct ext4_blockdev *bdev)
117 struct ext4_buf *buf;
118 while (!RB_EMPTY(&bdev->bc->lru_root) &&
119 ext4_bcache_is_full(bdev->bc)) {
121 buf = ext4_buf_lowest_lru(bdev->bc);
123 if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
124 int r = ext4_block_flush_buf(bdev, buf);
130 ext4_bcache_drop_buf(bdev->bc, buf);
135 int ext4_block_get_noread(struct ext4_blockdev *bdev, struct ext4_block *b,
141 ext4_assert(bdev && b);
143 if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
146 if (!(lba < bdev->lg_bcnt))
151 /*If cache is full we have to (flush and) drop it anyway :(*/
152 r = ext4_block_cache_shake(bdev);
156 r = ext4_bcache_alloc(bdev->bc, b, &is_new);
166 int ext4_block_get(struct ext4_blockdev *bdev, struct ext4_block *b,
169 int r = ext4_block_get_noread(bdev, b, lba);
173 if (ext4_bcache_test_flag(b->buf, BC_UPTODATE)) {
174 /* Data in the cache is up-to-date.
175 * Reading from physical device is not required */
179 r = ext4_blocks_get_direct(bdev, b->data, lba, 1);
181 ext4_bcache_free(bdev->bc, b);
186 /* Mark buffer up-to-date, since
187 * fresh data is read from physical device just now. */
188 ext4_bcache_set_flag(b->buf, BC_UPTODATE);
192 int ext4_block_set(struct ext4_blockdev *bdev, struct ext4_block *b)
194 ext4_assert(bdev && b);
197 if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
200 return ext4_bcache_free(bdev->bc, b);
203 int ext4_blocks_get_direct(struct ext4_blockdev *bdev, void *buf, uint64_t lba,
209 ext4_assert(bdev && buf);
211 pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
212 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
215 return bdev->bread(bdev, buf, pba, pb_cnt * cnt);
218 int ext4_blocks_set_direct(struct ext4_blockdev *bdev, const void *buf,
219 uint64_t lba, uint32_t cnt)
224 ext4_assert(bdev && buf);
226 pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
227 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
231 return bdev->bwrite(bdev, buf, pba, pb_cnt * cnt);
234 int ext4_block_writebytes(struct ext4_blockdev *bdev, uint64_t off,
235 const void *buf, uint32_t len)
243 const uint8_t *p = (void *)buf;
245 ext4_assert(bdev && buf);
247 if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
250 block_idx = off / bdev->ph_bsize;
251 block_end = block_idx + len / bdev->ph_bsize;
253 if (!(block_end < bdev->ph_bcnt))
254 return EINVAL; /*Ups. Out of range operation*/
256 /*OK lets deal with the first possible unaligned block*/
257 unalg = (off & (bdev->ph_bsize - 1));
260 uint32_t wlen = (bdev->ph_bsize - unalg) > len
262 : (bdev->ph_bsize - unalg);
264 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
268 memcpy(bdev->ph_bbuf + unalg, p, wlen);
270 r = bdev->bwrite(bdev, bdev->ph_bbuf, block_idx, 1);
280 blen = len / bdev->ph_bsize;
281 r = bdev->bwrite(bdev, p, block_idx, blen);
285 p += bdev->ph_bsize * blen;
286 len -= bdev->ph_bsize * blen;
292 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
296 memcpy(bdev->ph_bbuf, p, len);
298 r = bdev->bwrite(bdev, bdev->ph_bbuf, block_idx, 1);
306 int ext4_block_readbytes(struct ext4_blockdev *bdev, uint64_t off, void *buf,
315 uint8_t *p = (void *)buf;
317 ext4_assert(bdev && buf);
319 if (!(bdev->flags & EXT4_BDEV_INITIALIZED))
322 block_idx = off / bdev->ph_bsize;
323 block_end = block_idx + len / bdev->ph_bsize;
325 if (!(block_end < bdev->ph_bcnt))
326 return EINVAL; /*Ups. Out of range operation*/
328 /*OK lets deal with the first possible unaligned block*/
329 unalg = (off & (bdev->ph_bsize - 1));
332 uint32_t rlen = (bdev->ph_bsize - unalg) > len
334 : (bdev->ph_bsize - unalg);
336 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
340 memcpy(p, bdev->ph_bbuf + unalg, rlen);
348 blen = len / bdev->ph_bsize;
350 r = bdev->bread(bdev, p, block_idx, blen);
354 p += bdev->ph_bsize * blen;
355 len -= bdev->ph_bsize * blen;
361 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
365 memcpy(p, bdev->ph_bbuf, len);
371 int ext4_block_cache_write_back(struct ext4_blockdev *bdev, uint8_t on_off)
374 struct ext4_buf *buf;
377 bdev->cache_write_back++;
379 if (!on_off && bdev->cache_write_back)
380 bdev->cache_write_back--;
382 if (bdev->cache_write_back)
385 /*Flush all delayed cache blocks*/
386 while (!SLIST_EMPTY(&bdev->bc->dirty_list)) {
388 buf = SLIST_FIRST(&bdev->bc->dirty_list);
390 r = ext4_block_flush_buf(bdev, buf);