2 * Copyright (c) 2013 Grzegorz Kostka (kostka.grzegorz@gmail.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup lwext4
33 * @file ext4_blockdev.c
34 * @brief Block device module.
37 #include "ext4_config.h"
38 #include "ext4_blockdev.h"
39 #include "ext4_errno.h"
40 #include "ext4_debug.h"
45 int ext4_block_init(struct ext4_blockdev *bdev)
49 ext4_assert(bdev->bdif);
50 ext4_assert(bdev->bdif->open &&
55 if (bdev->bdif->ph_refctr) {
56 bdev->bdif->ph_refctr++;
60 /*Low level block init*/
61 rc = bdev->bdif->open(bdev);
65 bdev->bdif->ph_refctr = 1;
69 int ext4_block_bind_bcache(struct ext4_blockdev *bdev, struct ext4_bcache *bc)
71 ext4_assert(bdev && bc);
77 void ext4_block_set_lb_size(struct ext4_blockdev *bdev, uint64_t lb_bsize)
79 /*Logical block size has to be multiply of physical */
80 ext4_assert(!(lb_bsize % bdev->bdif->ph_bsize));
82 bdev->lg_bsize = lb_bsize;
83 bdev->lg_bcnt = (bdev->bdif->ph_bcnt * bdev->bdif->ph_bsize) / lb_bsize;
86 int ext4_block_fini(struct ext4_blockdev *bdev)
90 if (!bdev->bdif->ph_refctr)
93 bdev->bdif->ph_refctr--;
94 if (bdev->bdif->ph_refctr)
97 /*Low level block fini*/
98 return bdev->bdif->close(bdev);
101 int ext4_block_flush_buf(struct ext4_blockdev *bdev, struct ext4_buf *buf)
104 struct ext4_bcache *bc = bdev->bc;
105 /*Only flushing unreferenced buffer is allowed.*/
106 ext4_assert(!buf->refctr);
108 if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
109 r = ext4_blocks_set_direct(bdev, buf->data, buf->lba, 1);
113 buf->end_write(bc, buf, r, buf->end_write_arg);
118 ext4_bcache_remove_dirty_node(bc, buf);
119 ext4_bcache_clear_flag(buf, BC_DIRTY);
121 buf->end_write(bc, buf, r, buf->end_write_arg);
127 int ext4_block_cache_shake(struct ext4_blockdev *bdev)
129 struct ext4_buf *buf;
130 while (!RB_EMPTY(&bdev->bc->lru_root) &&
131 ext4_bcache_is_full(bdev->bc)) {
133 buf = ext4_buf_lowest_lru(bdev->bc);
135 if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
136 int r = ext4_block_flush_buf(bdev, buf);
142 ext4_bcache_drop_buf(bdev->bc, buf);
147 int ext4_block_get_noread(struct ext4_blockdev *bdev, struct ext4_block *b,
153 ext4_assert(bdev && b);
155 if (!bdev->bdif->ph_refctr)
158 if (!(lba < bdev->lg_bcnt))
163 /*If cache is full we have to (flush and) drop it anyway :(*/
164 r = ext4_block_cache_shake(bdev);
168 r = ext4_bcache_alloc(bdev->bc, b, &is_new);
178 int ext4_block_get(struct ext4_blockdev *bdev, struct ext4_block *b,
181 int r = ext4_block_get_noread(bdev, b, lba);
185 if (ext4_bcache_test_flag(b->buf, BC_UPTODATE)) {
186 /* Data in the cache is up-to-date.
187 * Reading from physical device is not required */
191 r = ext4_blocks_get_direct(bdev, b->data, lba, 1);
193 ext4_bcache_free(bdev->bc, b);
198 /* Mark buffer up-to-date, since
199 * fresh data is read from physical device just now. */
200 ext4_bcache_set_flag(b->buf, BC_UPTODATE);
204 int ext4_block_set(struct ext4_blockdev *bdev, struct ext4_block *b)
206 ext4_assert(bdev && b);
209 if (!bdev->bdif->ph_refctr)
212 return ext4_bcache_free(bdev->bc, b);
215 int ext4_blocks_get_direct(struct ext4_blockdev *bdev, void *buf, uint64_t lba,
221 ext4_assert(bdev && buf);
223 pba = (lba * bdev->lg_bsize) / bdev->bdif->ph_bsize;
224 pba += bdev->ph_blk_offset;
225 pb_cnt = bdev->lg_bsize / bdev->bdif->ph_bsize;
228 return bdev->bdif->bread(bdev, buf, pba, pb_cnt * cnt);
231 int ext4_blocks_set_direct(struct ext4_blockdev *bdev, const void *buf,
232 uint64_t lba, uint32_t cnt)
237 ext4_assert(bdev && buf);
239 pba = (lba * bdev->lg_bsize) / bdev->bdif->ph_bsize;
240 pba += bdev->ph_blk_offset;
241 pb_cnt = bdev->lg_bsize / bdev->bdif->ph_bsize;
245 return bdev->bdif->bwrite(bdev, buf, pba, pb_cnt * cnt);
248 int ext4_block_writebytes(struct ext4_blockdev *bdev, uint64_t off,
249 const void *buf, uint32_t len)
257 const uint8_t *p = (void *)buf;
259 ext4_assert(bdev && buf);
261 if (!bdev->bdif->ph_refctr)
264 block_idx = (off / bdev->bdif->ph_bsize) + bdev->ph_blk_offset;
265 block_end = block_idx + len / bdev->bdif->ph_bsize;
267 if (!(block_end < bdev->bdif->ph_bcnt))
268 return EINVAL; /*Ups. Out of range operation*/
270 /*OK lets deal with the first possible unaligned block*/
271 unalg = (off & (bdev->bdif->ph_bsize - 1));
274 uint32_t wlen = (bdev->bdif->ph_bsize - unalg) > len
276 : (bdev->bdif->ph_bsize - unalg);
278 r = bdev->bdif->bread(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
282 memcpy(bdev->bdif->ph_bbuf + unalg, p, wlen);
284 r = bdev->bdif->bwrite(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
294 blen = len / bdev->bdif->ph_bsize;
295 r = bdev->bdif->bwrite(bdev, p, block_idx, blen);
299 p += bdev->bdif->ph_bsize * blen;
300 len -= bdev->bdif->ph_bsize * blen;
306 r = bdev->bdif->bread(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
310 memcpy(bdev->bdif->ph_bbuf, p, len);
312 r = bdev->bdif->bwrite(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
320 int ext4_block_readbytes(struct ext4_blockdev *bdev, uint64_t off, void *buf,
329 uint8_t *p = (void *)buf;
331 ext4_assert(bdev && buf);
333 if (!bdev->bdif->ph_refctr)
336 block_idx = (off / bdev->bdif->ph_bsize) + bdev->ph_blk_offset;
337 block_end = block_idx + len / bdev->bdif->ph_bsize;
339 if (!(block_end < bdev->bdif->ph_bcnt))
340 return EINVAL; /*Ups. Out of range operation*/
342 /*OK lets deal with the first possible unaligned block*/
343 unalg = (off & (bdev->bdif->ph_bsize - 1));
346 uint32_t rlen = (bdev->bdif->ph_bsize - unalg) > len
348 : (bdev->bdif->ph_bsize - unalg);
350 r = bdev->bdif->bread(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
354 memcpy(p, bdev->bdif->ph_bbuf + unalg, rlen);
362 blen = len / bdev->bdif->ph_bsize;
364 r = bdev->bdif->bread(bdev, p, block_idx, blen);
368 p += bdev->bdif->ph_bsize * blen;
369 len -= bdev->bdif->ph_bsize * blen;
375 r = bdev->bdif->bread(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
379 memcpy(p, bdev->bdif->ph_bbuf, len);
385 int ext4_block_cache_write_back(struct ext4_blockdev *bdev, uint8_t on_off)
388 struct ext4_buf *buf;
391 bdev->cache_write_back++;
393 if (!on_off && bdev->cache_write_back)
394 bdev->cache_write_back--;
396 if (bdev->cache_write_back)
399 /*Flush all delayed cache blocks*/
400 while (!SLIST_EMPTY(&bdev->bc->dirty_list)) {
402 buf = SLIST_FIRST(&bdev->bc->dirty_list);
404 r = ext4_block_flush_buf(bdev, buf);