2 * Copyright (c) 2013 Grzegorz Kostka (kostka.grzegorz@gmail.com)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup lwext4
33 * @file ext4_blockdev.c
34 * @brief Block device module.
37 #include <ext4_config.h>
38 #include <ext4_blockdev.h>
39 #include <ext4_errno.h>
40 #include <ext4_debug.h>
47 int ext4_block_init(struct ext4_blockdev *bdev)
52 ext4_assert(bdev->open && bdev->close && bdev->bread && bdev->bwrite);
54 /*Low level block init*/
55 rc = bdev->open(bdev);
59 bdev->flags |= EXT4_BDEV_INITIALIZED;
64 int ext4_block_bind_bcache(struct ext4_blockdev *bdev, struct ext4_bcache *bc)
66 ext4_assert(bdev && bc);
71 void ext4_block_set_lb_size(struct ext4_blockdev *bdev, uint64_t lb_bsize)
73 /*Logical block size has to be multiply of physical */
74 ext4_assert(!(lb_bsize % bdev->ph_bsize));
76 bdev->lg_bsize = lb_bsize;
77 bdev->lg_bcnt = (bdev->ph_bcnt * bdev->ph_bsize) / lb_bsize;
81 int ext4_block_fini(struct ext4_blockdev *bdev)
85 bdev->flags &= ~(EXT4_BDEV_INITIALIZED);
87 /*Low level block fini*/
88 return bdev->close(bdev);
92 int ext4_block_get(struct ext4_blockdev *bdev, struct ext4_block *b,
101 ext4_assert(bdev && b);
103 if(!(bdev->flags & EXT4_BDEV_INITIALIZED))
106 if(!(lba < bdev->lg_bcnt))
112 /*If cache is full we have to flush it anyway :(*/
113 if(ext4_bcache_is_full(bdev->bc) && bdev->cache_write_back){
115 uint32_t free_candidate = bdev->bc->cnt;
116 uint32_t min_lru = 0xFFFFFFFF;
118 for (i = 0; i < bdev->bc->cnt; ++i) {
119 /*Check if buffer free was delayed.*/
120 if(!bdev->bc->free_delay[i])
123 /*Check reference counter.*/
124 if(bdev->bc->refctr[i])
127 if(bdev->bc->lru_id[i] < min_lru){
128 min_lru = bdev->bc->lru_id[i];
134 if(free_candidate < bdev->bc->cnt){
135 /*Buffer free was delayed and have no reference. Flush it.*/
136 r = ext4_blocks_set_direct(bdev,
137 bdev->bc->data + bdev->bc->itemsize * free_candidate,
138 bdev->bc->lba[free_candidate], 1);
142 /*No delayed anymore*/
143 bdev->bc->free_delay[free_candidate] = 0;
145 /*Reduce refered block count*/
146 bdev->bc->ref_blocks--;
151 r = ext4_bcache_alloc(bdev->bc, b, &is_new);
157 /*Block is in cache. Read from physical device is not required*/
164 pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
165 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
167 r = bdev->bread(bdev, b->data, pba, pb_cnt);
170 ext4_bcache_free(bdev->bc, b, 0);
179 int ext4_block_set(struct ext4_blockdev *bdev, struct ext4_block *b)
185 ext4_assert(bdev && b);
187 if(!(bdev->flags & EXT4_BDEV_INITIALIZED))
190 /*Doesn,t need to write.*/
191 if(!b->dirty && !bdev->bc->dirty[b->cache_id]){
192 ext4_bcache_free(bdev->bc, b, 0);
196 /*Free cache delay mode*/
197 if(bdev->cache_write_back){
199 /*Free cahe block and mark as free delayed*/
200 return ext4_bcache_free(bdev->bc, b, bdev->cache_write_back);
203 if(bdev->bc->refctr[b->cache_id] > 1){
204 bdev->bc->dirty[b->cache_id] = true;
205 return ext4_bcache_free(bdev->bc, b, 0);
209 pba = (b->lb_id * bdev->lg_bsize) / bdev->ph_bsize;
210 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
212 r = bdev->bwrite(bdev, b->data, pba, pb_cnt);
213 bdev->bc->dirty[b->cache_id] = false;
216 ext4_bcache_free(bdev->bc, b, 0);
222 ext4_bcache_free(bdev->bc, b, 0);
226 int ext4_blocks_get_direct(struct ext4_blockdev *bdev, void *buf,
227 uint64_t lba, uint32_t cnt)
232 ext4_assert(bdev && buf);
234 pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
235 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
238 return bdev->bread(bdev, buf, pba, pb_cnt * cnt);
241 int ext4_blocks_set_direct(struct ext4_blockdev *bdev, const void *buf,
242 uint64_t lba, uint32_t cnt)
247 ext4_assert(bdev && buf);
249 pba = (lba * bdev->lg_bsize) / bdev->ph_bsize;
250 pb_cnt = bdev->lg_bsize / bdev->ph_bsize;
254 return bdev->bwrite(bdev, buf, pba, pb_cnt * cnt);
258 int ext4_block_writebytes(struct ext4_blockdev *bdev, uint64_t off,
259 const void *buf, uint32_t len)
267 const uint8_t *p = (void *)buf;
269 ext4_assert(bdev && buf);
271 if(!(bdev->flags & EXT4_BDEV_INITIALIZED))
274 block_idx = off / bdev->ph_bsize;
275 block_end = block_idx + len / bdev->ph_bsize;
277 if(!(block_end < bdev->ph_bcnt))
278 return EINVAL; /*Ups. Out of range operation*/
280 /*OK lets deal with the first possible unaligned block*/
281 unalg = (off & (bdev->ph_bsize - 1));
284 uint32_t wlen = (bdev->ph_bsize - unalg) > len ?
285 len : (bdev->ph_bsize - unalg);
287 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
292 memcpy(bdev->ph_bbuf + unalg, p, wlen);
294 r = bdev->bwrite(bdev, bdev->ph_bbuf, block_idx, 1);
305 blen = len / bdev->ph_bsize;
306 r = bdev->bwrite(bdev, p, block_idx, blen);
311 p += bdev->ph_bsize * blen;
312 len -= bdev->ph_bsize * blen;
319 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
323 memcpy(bdev->ph_bbuf, p, len);
325 r = bdev->bwrite(bdev, bdev->ph_bbuf, block_idx, 1);
335 int ext4_block_readbytes(struct ext4_blockdev *bdev, uint64_t off, void *buf,
344 uint8_t *p = (void *)buf;
346 ext4_assert(bdev && buf);
348 if(!(bdev->flags & EXT4_BDEV_INITIALIZED))
351 block_idx = off / bdev->ph_bsize;
352 block_end = block_idx + len / bdev->ph_bsize;
354 if(!(block_end < bdev->ph_bcnt))
355 return EINVAL; /*Ups. Out of range operation*/
357 /*OK lets deal with the first possible unaligned block*/
358 unalg = (off & (bdev->ph_bsize - 1));
361 uint32_t rlen = (bdev->ph_bsize - unalg) > len ?
362 len : (bdev->ph_bsize - unalg);
364 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
368 memcpy(p, bdev->ph_bbuf + unalg, rlen);
376 blen = len / bdev->ph_bsize;
378 r = bdev->bread(bdev, p, block_idx, blen);
383 p += bdev->ph_bsize * blen;
384 len -= bdev->ph_bsize * blen;
391 r = bdev->bread(bdev, bdev->ph_bbuf, block_idx, 1);
395 memcpy(p, bdev->ph_bbuf, len);
401 int ext4_block_cache_write_back(struct ext4_blockdev *bdev,
408 bdev->cache_write_back++;
410 if(!on_off && bdev->cache_write_back)
411 bdev->cache_write_back--;
413 /*Flush all delayed cache blocks*/
414 if(!bdev->cache_write_back){
415 for (i = 0; i < bdev->bc->cnt; ++i) {
417 /*Check if buffer free was delayed.*/
418 if(!bdev->bc->free_delay[i])
421 /*Check reference counter.*/
422 if(bdev->bc->refctr[i])
425 /*Buffer free was delayed and have no reference. Flush it.*/
426 r = ext4_blocks_set_direct(bdev,
427 bdev->bc->data + bdev->bc->itemsize * i,
428 bdev->bc->lba[i], 1);
432 /*No delayed anymore*/
433 bdev->bc->free_delay[i] = 0;
435 /*Reduce refered block count*/
436 bdev->bc->ref_blocks--;