ext4_bcache: do not allow cache shaking when callback is invoked.
[lwext4.git] / lwext4 / ext4_blockdev.c
1 /*
2  * Copyright (c) 2013 Grzegorz Kostka (kostka.grzegorz@gmail.com)
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * - Redistributions of source code must retain the above copyright
10  *   notice, this list of conditions and the following disclaimer.
11  * - Redistributions in binary form must reproduce the above copyright
12  *   notice, this list of conditions and the following disclaimer in the
13  *   documentation and/or other materials provided with the distribution.
14  * - The name of the author may not be used to endorse or promote products
15  *   derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28
29 /** @addtogroup lwext4
30  * @{
31  */
32 /**
33  * @file  ext4_blockdev.c
34  * @brief Block device module.
35  */
36
37 #include "ext4_config.h"
38 #include "ext4_blockdev.h"
39 #include "ext4_errno.h"
40 #include "ext4_debug.h"
41
42 #include <string.h>
43 #include <stdlib.h>
44
45 static void ext4_bdif_lock(struct ext4_blockdev *bdev)
46 {
47         if (!bdev->bdif->lock)
48                 return;
49
50         int r = bdev->bdif->lock(bdev);
51         ext4_assert(r == EOK);
52 }
53
54 static void ext4_bdif_unlock(struct ext4_blockdev *bdev)
55 {
56         if (!bdev->bdif->unlock)
57                 return;
58
59         int r = bdev->bdif->unlock(bdev);
60         ext4_assert(r == EOK);
61 }
62
63 static int ext4_bdif_bread(struct ext4_blockdev *bdev, void *buf,
64                            uint64_t blk_id, uint32_t blk_cnt)
65 {
66         ext4_bdif_lock(bdev);
67         int r = bdev->bdif->bread(bdev, buf, blk_id, blk_cnt);
68         bdev->bdif->bread_ctr++;
69         ext4_bdif_unlock(bdev);
70         return r;
71 }
72
73 static int ext4_bdif_bwrite(struct ext4_blockdev *bdev, const void *buf,
74                             uint64_t blk_id, uint32_t blk_cnt)
75 {
76         ext4_bdif_lock(bdev);
77         int r = bdev->bdif->bwrite(bdev, buf, blk_id, blk_cnt);
78         bdev->bdif->bwrite_ctr++;
79         ext4_bdif_unlock(bdev);
80         return r;
81 }
82
83 int ext4_block_init(struct ext4_blockdev *bdev)
84 {
85         int rc;
86         ext4_assert(bdev);
87         ext4_assert(bdev->bdif);
88         ext4_assert(bdev->bdif->open &&
89                    bdev->bdif->close &&
90                    bdev->bdif->bread &&
91                    bdev->bdif->bwrite);
92
93         if (bdev->bdif->ph_refctr) {
94                 bdev->bdif->ph_refctr++;
95                 return EOK;
96         }
97
98         /*Low level block init*/
99         rc = bdev->bdif->open(bdev);
100         if (rc != EOK)
101                 return rc;
102
103         bdev->bdif->ph_refctr = 1;
104         return EOK;
105 }
106
107 int ext4_block_bind_bcache(struct ext4_blockdev *bdev, struct ext4_bcache *bc)
108 {
109         ext4_assert(bdev && bc);
110         bdev->bc = bc;
111         bc->bdev = bdev;
112         return EOK;
113 }
114
115 void ext4_block_set_lb_size(struct ext4_blockdev *bdev, uint64_t lb_bsize)
116 {
117         /*Logical block size has to be multiply of physical */
118         ext4_assert(!(lb_bsize % bdev->bdif->ph_bsize));
119
120         bdev->lg_bsize = lb_bsize;
121         bdev->lg_bcnt = bdev->part_size / lb_bsize;
122 }
123
124 int ext4_block_fini(struct ext4_blockdev *bdev)
125 {
126         ext4_assert(bdev);
127
128         if (!bdev->bdif->ph_refctr)
129                 return EOK;
130
131         bdev->bdif->ph_refctr--;
132         if (bdev->bdif->ph_refctr)
133                 return EOK;
134
135         /*Low level block fini*/
136         return bdev->bdif->close(bdev);
137 }
138
139 int ext4_block_flush_buf(struct ext4_blockdev *bdev, struct ext4_buf *buf)
140 {
141         int r;
142         struct ext4_bcache *bc = bdev->bc;
143
144         if (ext4_bcache_test_flag(buf, BC_DIRTY) &&
145             ext4_bcache_test_flag(buf, BC_UPTODATE)) {
146                 r = ext4_blocks_set_direct(bdev, buf->data, buf->lba, 1);
147                 if (r) {
148                         if (buf->end_write) {
149                                 bc->dont_shake = true;
150                                 buf->end_write(bc, buf, r, buf->end_write_arg);
151                                 bc->dont_shake = false;
152                         }
153
154                         return r;
155                 }
156
157                 ext4_bcache_remove_dirty_node(bc, buf);
158                 ext4_bcache_clear_flag(buf, BC_DIRTY);
159                 if (buf->end_write) {
160                         bc->dont_shake = true;
161                         buf->end_write(bc, buf, r, buf->end_write_arg);
162                         bc->dont_shake = false;
163                 }
164         }
165         return EOK;
166 }
167
168 int ext4_block_flush_lba(struct ext4_blockdev *bdev, uint64_t lba)
169 {
170         int r = EOK;
171         struct ext4_buf *buf;
172         struct ext4_block b;
173         buf = ext4_bcache_find_get(bdev->bc, &b, lba);
174         if (buf) {
175                 r = ext4_block_flush_buf(bdev, buf);
176                 ext4_bcache_free(bdev->bc, &b);
177         }
178         return r;
179 }
180
181 int ext4_block_cache_shake(struct ext4_blockdev *bdev)
182 {
183         int r = EOK;
184         struct ext4_buf *buf;
185         if (bdev->bc->dont_shake)
186                 return EOK;
187
188         while (!RB_EMPTY(&bdev->bc->lru_root) &&
189                 ext4_bcache_is_full(bdev->bc)) {
190
191                 buf = ext4_buf_lowest_lru(bdev->bc);
192                 ext4_assert(buf);
193                 if (ext4_bcache_test_flag(buf, BC_DIRTY)) {
194                         r = ext4_block_flush_buf(bdev, buf);
195                         if (r != EOK)
196                                 break;
197
198                 }
199
200                 ext4_bcache_drop_buf(bdev->bc, buf);
201         }
202         return r;
203 }
204
205 int ext4_block_get_noread(struct ext4_blockdev *bdev, struct ext4_block *b,
206                           uint64_t lba)
207 {
208         bool is_new;
209         int r;
210
211         ext4_assert(bdev && b);
212
213         if (!bdev->bdif->ph_refctr)
214                 return EIO;
215
216         if (!(lba < bdev->lg_bcnt))
217                 return ERANGE;
218
219         b->lb_id = lba;
220
221         /*If cache is full we have to (flush and) drop it anyway :(*/
222         r = ext4_block_cache_shake(bdev);
223         if (r != EOK)
224                 return r;
225
226         r = ext4_bcache_alloc(bdev->bc, b, &is_new);
227         if (r != EOK)
228                 return r;
229
230         if (!b->data)
231                 return ENOMEM;
232
233         return EOK;
234 }
235
236 int ext4_block_get(struct ext4_blockdev *bdev, struct ext4_block *b,
237                    uint64_t lba)
238 {
239         int r = ext4_block_get_noread(bdev, b, lba);
240         if (r != EOK)
241                 return r;
242
243         if (ext4_bcache_test_flag(b->buf, BC_UPTODATE)) {
244                 /* Data in the cache is up-to-date.
245                  * Reading from physical device is not required */
246                 return EOK;
247         }
248
249         r = ext4_blocks_get_direct(bdev, b->data, lba, 1);
250         if (r != EOK) {
251                 ext4_bcache_free(bdev->bc, b);
252                 b->lb_id = 0;
253                 return r;
254         }
255
256         /* Mark buffer up-to-date, since
257          * fresh data is read from physical device just now. */
258         ext4_bcache_set_flag(b->buf, BC_UPTODATE);
259         return EOK;
260 }
261
262 int ext4_block_set(struct ext4_blockdev *bdev, struct ext4_block *b)
263 {
264         ext4_assert(bdev && b);
265         ext4_assert(b->buf);
266
267         if (!bdev->bdif->ph_refctr)
268                 return EIO;
269
270         return ext4_bcache_free(bdev->bc, b);
271 }
272
273 int ext4_blocks_get_direct(struct ext4_blockdev *bdev, void *buf, uint64_t lba,
274                            uint32_t cnt)
275 {
276         uint64_t pba;
277         uint32_t pb_cnt;
278
279         ext4_assert(bdev && buf);
280
281         pba = (lba * bdev->lg_bsize + bdev->part_offset) / bdev->bdif->ph_bsize;
282         pb_cnt = bdev->lg_bsize / bdev->bdif->ph_bsize;
283
284         return ext4_bdif_bread(bdev, buf, pba, pb_cnt * cnt);
285 }
286
287 int ext4_blocks_set_direct(struct ext4_blockdev *bdev, const void *buf,
288                            uint64_t lba, uint32_t cnt)
289 {
290         uint64_t pba;
291         uint32_t pb_cnt;
292
293         ext4_assert(bdev && buf);
294
295         pba = (lba * bdev->lg_bsize + bdev->part_offset) / bdev->bdif->ph_bsize;
296         pb_cnt = bdev->lg_bsize / bdev->bdif->ph_bsize;
297
298         return ext4_bdif_bwrite(bdev, buf, pba, pb_cnt * cnt);
299 }
300
301 int ext4_block_writebytes(struct ext4_blockdev *bdev, uint64_t off,
302                           const void *buf, uint32_t len)
303 {
304         uint64_t block_idx;
305         uint32_t blen;
306         uint32_t unalg;
307         int r = EOK;
308
309         const uint8_t *p = (void *)buf;
310
311         ext4_assert(bdev && buf);
312
313         if (!bdev->bdif->ph_refctr)
314                 return EIO;
315
316         if (off + len > bdev->part_size)
317                 return EINVAL; /*Ups. Out of range operation*/
318
319         block_idx = ((off + bdev->part_offset) / bdev->bdif->ph_bsize);
320
321         /*OK lets deal with the first possible unaligned block*/
322         unalg = (off & (bdev->bdif->ph_bsize - 1));
323         if (unalg) {
324
325                 uint32_t wlen = (bdev->bdif->ph_bsize - unalg) > len
326                                     ? len
327                                     : (bdev->bdif->ph_bsize - unalg);
328
329                 r = ext4_bdif_bread(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
330                 if (r != EOK)
331                         return r;
332
333                 memcpy(bdev->bdif->ph_bbuf + unalg, p, wlen);
334                 r = ext4_bdif_bwrite(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
335                 if (r != EOK)
336                         return r;
337
338                 p += wlen;
339                 len -= wlen;
340                 block_idx++;
341         }
342
343         /*Aligned data*/
344         blen = len / bdev->bdif->ph_bsize;
345         r = ext4_bdif_bwrite(bdev, p, block_idx, blen);
346         if (r != EOK)
347                 return r;
348
349         p += bdev->bdif->ph_bsize * blen;
350         len -= bdev->bdif->ph_bsize * blen;
351
352         block_idx += blen;
353
354         /*Rest of the data*/
355         if (len) {
356                 r = ext4_bdif_bread(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
357                 if (r != EOK)
358                         return r;
359
360                 memcpy(bdev->bdif->ph_bbuf, p, len);
361                 r = ext4_bdif_bwrite(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
362                 if (r != EOK)
363                         return r;
364         }
365
366         return r;
367 }
368
369 int ext4_block_readbytes(struct ext4_blockdev *bdev, uint64_t off, void *buf,
370                          uint32_t len)
371 {
372         uint64_t block_idx;
373         uint32_t blen;
374         uint32_t unalg;
375         int r = EOK;
376
377         uint8_t *p = (void *)buf;
378
379         ext4_assert(bdev && buf);
380
381         if (!bdev->bdif->ph_refctr)
382                 return EIO;
383
384         if (off + len > bdev->part_size)
385                 return EINVAL; /*Ups. Out of range operation*/
386
387         block_idx = ((off + bdev->part_offset) / bdev->bdif->ph_bsize);
388
389         /*OK lets deal with the first possible unaligned block*/
390         unalg = (off & (bdev->bdif->ph_bsize - 1));
391         if (unalg) {
392
393                 uint32_t rlen = (bdev->bdif->ph_bsize - unalg) > len
394                                     ? len
395                                     : (bdev->bdif->ph_bsize - unalg);
396
397                 r = ext4_bdif_bread(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
398                 if (r != EOK)
399                         return r;
400
401                 memcpy(p, bdev->bdif->ph_bbuf + unalg, rlen);
402
403                 p += rlen;
404                 len -= rlen;
405                 block_idx++;
406         }
407
408         /*Aligned data*/
409         blen = len / bdev->bdif->ph_bsize;
410
411         r = ext4_bdif_bread(bdev, p, block_idx, blen);
412         if (r != EOK)
413                 return r;
414
415         p += bdev->bdif->ph_bsize * blen;
416         len -= bdev->bdif->ph_bsize * blen;
417
418         block_idx += blen;
419
420         /*Rest of the data*/
421         if (len) {
422                 r = ext4_bdif_bread(bdev, bdev->bdif->ph_bbuf, block_idx, 1);
423                 if (r != EOK)
424                         return r;
425
426                 memcpy(p, bdev->bdif->ph_bbuf, len);
427         }
428
429         return r;
430 }
431
432 int ext4_block_cache_flush(struct ext4_blockdev *bdev)
433 {
434         while (!SLIST_EMPTY(&bdev->bc->dirty_list)) {
435                 int r;
436                 struct ext4_buf *buf = SLIST_FIRST(&bdev->bc->dirty_list);
437                 ext4_assert(buf);
438                 r = ext4_block_flush_buf(bdev, buf);
439                 if (r != EOK)
440                         return r;
441
442         }
443         return EOK;
444 }
445
446 int ext4_block_cache_write_back(struct ext4_blockdev *bdev, uint8_t on_off)
447 {
448         if (on_off)
449                 bdev->cache_write_back++;
450
451         if (!on_off && bdev->cache_write_back)
452                 bdev->cache_write_back--;
453
454         if (bdev->cache_write_back)
455                 return EOK;
456
457         /*Flush data in all delayed cache blocks*/
458         return ext4_block_cache_flush(bdev);
459 }
460
461 /**
462  * @}
463  */