First really working version
This commit is contained in:
parent
9a543670aa
commit
ee7eacd4a6
13
TODO
13
TODO
|
@ -23,8 +23,18 @@ Cache object sizing:
|
||||||
|
|
||||||
|
|
||||||
TODO:
|
TODO:
|
||||||
|
- Consistency in lbatview_elem_realloc().
|
||||||
|
If alloc fails, roll back.
|
||||||
|
- Allocate lbd pages using allocv and vmap them.
|
||||||
|
Same for pbat, when pbat_len() > 1.
|
||||||
|
Can we vmap compression workspace?
|
||||||
|
- In *cache_get, call *_reset outside cache lock.
|
||||||
|
- Implement vectorized lbatview_elem_pblk().
|
||||||
|
- Dirty flag in compress header.
|
||||||
|
- Implement stats.
|
||||||
|
- Keep working lbd, flush on timer.
|
||||||
|
Need per-cpu lbd.
|
||||||
- Move back to module based build system.
|
- Move back to module based build system.
|
||||||
- Make compression algorithm and speed/level selectable.
|
|
||||||
- Create utilities for:
|
- Create utilities for:
|
||||||
- Resizing a compressed device.
|
- Resizing a compressed device.
|
||||||
- Checking and repairing a compressed device.
|
- Checking and repairing a compressed device.
|
||||||
|
@ -32,4 +42,3 @@ TODO:
|
||||||
- Compressed device must be large enough.
|
- Compressed device must be large enough.
|
||||||
- Backing device must be large enough.
|
- Backing device must be large enough.
|
||||||
- Remove workqueue.
|
- Remove workqueue.
|
||||||
- (?) Function ptrs for reading and writing lblk_alloc.
|
|
||||||
|
|
|
@ -213,12 +213,6 @@ compress_lbdcache_swap(struct compress* c, u64 lblk, struct lbd* oldlbd)
|
||||||
lbdcache_put(c->lc, oldlbd);
|
lbdcache_put(c->lc, oldlbd);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (lbd_read(lbd) != 0) {
|
|
||||||
printk(KERN_ERR "%s: lbd_read failed\n", __func__);
|
|
||||||
lbdcache_put(c->lc, lbd);
|
|
||||||
lbdcache_put(c->lc, oldlbd);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
if (lbdcache_put(c->lc, oldlbd) != 0) {
|
if (lbdcache_put(c->lc, oldlbd) != 0) {
|
||||||
printk(KERN_ERR "%s: failed to put oldlbd\n", __func__);
|
printk(KERN_ERR "%s: failed to put oldlbd\n", __func__);
|
||||||
lbdcache_put(c->lc, lbd);
|
lbdcache_put(c->lc, lbd);
|
||||||
|
|
|
@ -35,7 +35,6 @@ struct lbatpage {
|
||||||
unsigned int ref;
|
unsigned int ref;
|
||||||
|
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
enum cache_state state;
|
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
struct page* page;
|
struct page* page;
|
||||||
u8* buf;
|
u8* buf;
|
||||||
|
@ -49,7 +48,6 @@ lbatpage_ctr(struct lbatpage* lp, struct cbd_params* params)
|
||||||
mutex_init(&lp->reflock);
|
mutex_init(&lp->reflock);
|
||||||
lp->ref = 0;
|
lp->ref = 0;
|
||||||
mutex_init(&lp->lock);
|
mutex_init(&lp->lock);
|
||||||
lp->state = CACHE_STATE_UNCACHED;
|
|
||||||
lp->params = params;
|
lp->params = params;
|
||||||
lp->page = cbd_alloc_page();
|
lp->page = cbd_alloc_page();
|
||||||
if (!lp->page) {
|
if (!lp->page) {
|
||||||
|
@ -69,18 +67,10 @@ lbatpage_dtr(struct lbatpage* lp)
|
||||||
lp->page = NULL;
|
lp->page = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static bool
|
||||||
lbatpage_flush_endio(struct bio* bio)
|
lbatpage_error(struct lbatpage* lp)
|
||||||
{
|
{
|
||||||
struct lbatpage* lp = bio->bi_private;
|
return PageError(lp->page);
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = pblk_endio(bio);
|
|
||||||
if (ret) {
|
|
||||||
printk(KERN_ERR "%s: I/O failed\n", __func__);
|
|
||||||
lp->state = CACHE_STATE_ERROR;
|
|
||||||
}
|
|
||||||
unlock_page(lp->page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -90,52 +80,54 @@ lbatpage_flush(struct lbatpage* lp)
|
||||||
struct page* iopagev[1];
|
struct page* iopagev[1];
|
||||||
|
|
||||||
mutex_lock(&lp->lock);
|
mutex_lock(&lp->lock);
|
||||||
if (lp->state != CACHE_STATE_DIRTY) {
|
if (!PageDirty(lp->page)) {
|
||||||
if (lp->state == CACHE_STATE_ERROR) {
|
goto unlock;
|
||||||
ret = -EIO;
|
}
|
||||||
}
|
if (lbatpage_error(lp)) {
|
||||||
unlock_page(lp->page);
|
ret = -EIO;
|
||||||
goto out;
|
goto unlock;
|
||||||
}
|
}
|
||||||
iopagev[0] = lp->page;
|
iopagev[0] = lp->page;
|
||||||
pblk_write(lp->params, lp->pblk, 1, iopagev, lbatpage_flush_endio, lp);
|
pblk_write(lp->params, lp->pblk, 1, iopagev);
|
||||||
lp->state = CACHE_STATE_CLEAN;
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&lp->lock);
|
mutex_unlock(&lp->lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
unlock_page(lp->page);
|
||||||
|
mutex_unlock(&lp->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static int
|
||||||
lbatpage_reset(struct lbatpage* lp, u64 pblk)
|
|
||||||
{
|
|
||||||
lock_page(lp->page);
|
|
||||||
if (lp->pblk != pblk) {
|
|
||||||
lp->pblk = pblk;
|
|
||||||
lp->state = CACHE_STATE_UNCACHED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
lbatpage_read(struct lbatpage* lp)
|
lbatpage_read(struct lbatpage* lp)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct page* pagev[1];
|
struct page* pagev[1];
|
||||||
|
|
||||||
mutex_lock(&lp->lock);
|
|
||||||
if (lp->state != CACHE_STATE_UNCACHED) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
pagev[0] = lp->page;
|
pagev[0] = lp->page;
|
||||||
ret = pblk_read_wait(lp->params, lp->pblk, 1, pagev);
|
ret = pblk_read_wait(lp->params, lp->pblk, 1, pagev);
|
||||||
if (ret) {
|
|
||||||
printk(KERN_ERR "%s: failed, pblk=%lu\n", __func__, (unsigned long)lp->pblk);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
lp->state = CACHE_STATE_CLEAN;
|
|
||||||
|
|
||||||
out:
|
return ret;
|
||||||
mutex_unlock(&lp->lock);
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
lbatpage_reset(struct lbatpage* lp, u64 pblk)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
lock_page(lp->page);
|
||||||
|
if (lp->pblk != pblk) {
|
||||||
|
lp->pblk = pblk;
|
||||||
|
ret = lbatpage_read(lp);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
unlock_page(lp->page);
|
||||||
|
lp->pblk = PBLK_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,17 +135,16 @@ u8*
|
||||||
lbatpage_get_buf(struct lbatpage* lp, bool rw)
|
lbatpage_get_buf(struct lbatpage* lp, bool rw)
|
||||||
{
|
{
|
||||||
mutex_lock(&lp->lock);
|
mutex_lock(&lp->lock);
|
||||||
BUG_ON(lp->state == CACHE_STATE_UNCACHED);
|
|
||||||
if (rw) {
|
if (rw) {
|
||||||
lp->state = CACHE_STATE_DIRTY;
|
SetPageDirty(lp->page);
|
||||||
}
|
}
|
||||||
|
|
||||||
return lp->buf;
|
return lp->buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
lbatpage_put_buf(struct lbatpage* lp)
|
lbatpage_put_buf(struct lbatpage* lp)
|
||||||
{
|
{
|
||||||
BUG_ON(lp->state == CACHE_STATE_UNCACHED);
|
|
||||||
mutex_unlock(&lp->lock);
|
mutex_unlock(&lp->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -268,7 +259,7 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
|
||||||
for (n = 0; n < lpc->len; ++n) {
|
for (n = 0; n < lpc->len; ++n) {
|
||||||
lp = lpc->cache[n];
|
lp = lpc->cache[n];
|
||||||
mutex_lock(&lp->reflock);
|
mutex_lock(&lp->reflock);
|
||||||
if (lp->ref == 0 && lp->state != CACHE_STATE_ERROR) {
|
if (lp->ref == 0 && !lbatpage_error(lp)) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lp->reflock);
|
mutex_unlock(&lp->reflock);
|
||||||
|
@ -283,7 +274,11 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
|
||||||
mutex_lock(&lp->reflock);
|
mutex_lock(&lp->reflock);
|
||||||
|
|
||||||
found:
|
found:
|
||||||
lbatpage_reset(lp, pblk);
|
if (lbatpage_reset(lp, pblk) != 0) {
|
||||||
|
mutex_unlock(&lp->reflock);
|
||||||
|
lp = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
lp->ref = 1;
|
lp->ref = 1;
|
||||||
mutex_unlock(&lp->reflock);
|
mutex_unlock(&lp->reflock);
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,6 @@ struct lbatview {
|
||||||
unsigned int ref;
|
unsigned int ref;
|
||||||
|
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
enum cache_state state;
|
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
struct pbatcache* pbatcache;
|
struct pbatcache* pbatcache;
|
||||||
struct pbat* pbat;
|
struct pbat* pbat;
|
||||||
|
@ -54,7 +53,6 @@ lbatview_ctr(struct lbatview* lv,
|
||||||
mutex_init(&lv->reflock);
|
mutex_init(&lv->reflock);
|
||||||
lv->ref = 0;
|
lv->ref = 0;
|
||||||
mutex_init(&lv->lock);
|
mutex_init(&lv->lock);
|
||||||
lv->state = CACHE_STATE_UNCACHED;
|
|
||||||
lv->params = params;
|
lv->params = params;
|
||||||
lv->pbatcache = pbatcache;
|
lv->pbatcache = pbatcache;
|
||||||
lv->pbat = NULL;
|
lv->pbat = NULL;
|
||||||
|
@ -67,13 +65,12 @@ lbatview_ctr(struct lbatview* lv,
|
||||||
static void
|
static void
|
||||||
lbatview_dtr(struct lbatview* lv)
|
lbatview_dtr(struct lbatview* lv)
|
||||||
{
|
{
|
||||||
if (pbatcache_put(lv->pbatcache, lv->pbat) != 0) {
|
pbatcache_put(lv->pbatcache, lv->pbat);
|
||||||
printk(KERN_ERR "%s: pbatcache_put failed\n", __func__);
|
|
||||||
}
|
|
||||||
lv->pbat = NULL;
|
lv->pbat = NULL;
|
||||||
lbatpagecache_put(lv->lpc, lv->pages[0]);
|
|
||||||
lbatpagecache_put(lv->lpc, lv->pages[1]);
|
lbatpagecache_put(lv->lpc, lv->pages[1]);
|
||||||
lv->pages[0] = lv->pages[1] = NULL;
|
lv->pages[1] = NULL;
|
||||||
|
lbatpagecache_put(lv->lpc, lv->pages[0]);
|
||||||
|
lv->pages[0] = NULL;
|
||||||
lv->lpc = NULL;
|
lv->lpc = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,94 +78,63 @@ static int
|
||||||
lbatview_flush(struct lbatview* lv)
|
lbatview_flush(struct lbatview* lv)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
int err;
|
||||||
|
|
||||||
mutex_lock(&lv->lock);
|
mutex_lock(&lv->lock);
|
||||||
if (lv->state == CACHE_STATE_ERROR) {
|
if (lv->pages[1]) {
|
||||||
ret = -EIO;
|
err = lbatpagecache_put(lv->lpc, lv->pages[1]);
|
||||||
goto out;
|
if (err) {
|
||||||
|
ret = err;
|
||||||
|
}
|
||||||
|
lv->pages[1] = NULL;
|
||||||
}
|
}
|
||||||
if (lv->pages[0]) {
|
if (lv->pages[0]) {
|
||||||
ret = lbatpagecache_put(lv->lpc, lv->pages[0]);
|
err = lbatpagecache_put(lv->lpc, lv->pages[0]);
|
||||||
|
if (err) {
|
||||||
|
ret = err;
|
||||||
|
}
|
||||||
lv->pages[0] = NULL;
|
lv->pages[0] = NULL;
|
||||||
if (ret) {
|
|
||||||
lv->state = CACHE_STATE_ERROR;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (lv->pages[1]) {
|
err = pbatcache_put(lv->pbatcache, lv->pbat);
|
||||||
ret = lbatpagecache_put(lv->lpc, lv->pages[1]);
|
|
||||||
lv->pages[1] = NULL;
|
|
||||||
if (ret) {
|
|
||||||
lv->state = CACHE_STATE_ERROR;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ret = pbatcache_put(lv->pbatcache, lv->pbat);
|
|
||||||
lv->pbat = NULL;
|
lv->pbat = NULL;
|
||||||
if (ret) {
|
if (err) {
|
||||||
lv->state = CACHE_STATE_ERROR;
|
ret = err;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
lv->state = CACHE_STATE_CLEAN;
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&lv->lock);
|
mutex_unlock(&lv->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static int
|
||||||
lbatview_reset(struct lbatview* lv, u64 pblk, u32 count)
|
lbatview_reset(struct lbatview* lv, u64 pblk, u32 count)
|
||||||
{
|
{
|
||||||
u32 zone;
|
int ret = 0;
|
||||||
|
|
||||||
if (lv->pbat) { printk(KERN_ERR "%s: pbat leak\n", __func__); }
|
if (lv->pbat) { printk(KERN_ERR "%s: pbat leak\n", __func__); }
|
||||||
if (lv->pages[0]) { printk(KERN_ERR "%s: lbatpage leak\n", __func__); }
|
if (lv->pages[0]) { printk(KERN_ERR "%s: lbatpage leak\n", __func__); }
|
||||||
if (lv->pages[1]) { printk(KERN_ERR "%s: lbatpage leak\n", __func__); }
|
if (lv->pages[1]) { printk(KERN_ERR "%s: lbatpage leak\n", __func__); }
|
||||||
|
|
||||||
zone = zone_for_pblk(lv->params, pblk);
|
lv->pblk = pblk;
|
||||||
if (count > 0) {
|
if (!ret && count > 0) {
|
||||||
lv->pages[0] = lbatpagecache_get(lv->lpc, pblk + 0);
|
lv->pages[0] = lbatpagecache_get(lv->lpc, pblk + 0);
|
||||||
if (!lv->pages[0]) {
|
if (!lv->pages[0]) {
|
||||||
return false;
|
ret = -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (count > 1) {
|
if (!ret && count > 1) {
|
||||||
lv->pages[1] = lbatpagecache_get(lv->lpc, pblk + 1);
|
lv->pages[1] = lbatpagecache_get(lv->lpc, pblk + 1);
|
||||||
if (!lv->pages[1]) {
|
if (!lv->pages[1]) {
|
||||||
return false;
|
ret = -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lv->pblk = pblk;
|
if (ret) {
|
||||||
lv->state = CACHE_STATE_UNCACHED;
|
lbatpagecache_put(lv->lpc, lv->pages[1]);
|
||||||
|
lv->pages[1] = NULL;
|
||||||
return true;
|
lbatpagecache_put(lv->lpc, lv->pages[0]);
|
||||||
}
|
lv->pages[0] = NULL;
|
||||||
|
lv->pblk = PBLK_NONE;
|
||||||
int
|
|
||||||
lbatview_read(struct lbatview* lv)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
mutex_lock(&lv->lock);
|
|
||||||
if (lv->state != CACHE_STATE_UNCACHED) {
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
if (lv->pages[0]) {
|
|
||||||
ret = lbatpage_read(lv->pages[0]);
|
|
||||||
if (ret) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (lv->pages[1]) {
|
|
||||||
ret = lbatpage_read(lv->pages[1]);
|
|
||||||
if (ret) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lv->state = CACHE_STATE_CLEAN;
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&lv->lock);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,9 +153,6 @@ lbatview_alloc_pblk(struct lbatview* lv)
|
||||||
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
|
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
|
||||||
return PBLK_NONE;
|
return PBLK_NONE;
|
||||||
}
|
}
|
||||||
if (pbat_read(lv->pbat) != 0) {
|
|
||||||
return PBLK_NONE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
pblk = pbat_alloc(lv->pbat);
|
pblk = pbat_alloc(lv->pbat);
|
||||||
if (pblk != PBLK_NONE) {
|
if (pblk != PBLK_NONE) {
|
||||||
|
@ -210,10 +173,6 @@ lbatview_alloc_pblk(struct lbatview* lv)
|
||||||
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
|
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
|
||||||
return PBLK_NONE;
|
return PBLK_NONE;
|
||||||
}
|
}
|
||||||
if (pbat_read(pbat) != 0) {
|
|
||||||
printk(KERN_ERR "%s: pbat_read failed\n", __func__);
|
|
||||||
return PBLK_NONE;
|
|
||||||
}
|
|
||||||
pblk = pbat_alloc(pbat);
|
pblk = pbat_alloc(pbat);
|
||||||
if (pblk != PBLK_NONE) {
|
if (pblk != PBLK_NONE) {
|
||||||
lv->pbat = pbat;
|
lv->pbat = pbat;
|
||||||
|
@ -231,10 +190,6 @@ lbatview_alloc_pblk(struct lbatview* lv)
|
||||||
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
|
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
|
||||||
return PBLK_NONE;
|
return PBLK_NONE;
|
||||||
}
|
}
|
||||||
if (pbat_read(pbat) != 0) {
|
|
||||||
printk(KERN_ERR "%s: pbat_read failed\n", __func__);
|
|
||||||
return PBLK_NONE;
|
|
||||||
}
|
|
||||||
pblk = pbat_alloc(pbat);
|
pblk = pbat_alloc(pbat);
|
||||||
if (pblk != PBLK_NONE) {
|
if (pblk != PBLK_NONE) {
|
||||||
lv->pbat = pbat;
|
lv->pbat = pbat;
|
||||||
|
@ -270,11 +225,6 @@ lbatview_free_pblk(struct lbatview* lv, u64 pblk)
|
||||||
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
|
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
ret = pbat_read(pbat);
|
|
||||||
if (ret != 0) {
|
|
||||||
printk(KERN_ERR "%s: pbat_read failed\n", __func__);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
ret = pbat_free(pbat, pblk);
|
ret = pbat_free(pbat, pblk);
|
||||||
BUG_ON(ret != 0);
|
BUG_ON(ret != 0);
|
||||||
if (lv->pbat && pbat_zone(lv->pbat) != zone && pblk_zone == zone) {
|
if (lv->pbat && pbat_zone(lv->pbat) != zone && pblk_zone == zone) {
|
||||||
|
@ -317,18 +267,6 @@ lbatview_rmem(struct lbatview* lv, u32 off, u32 len, void* buf)
|
||||||
printk(KERN_ERR "%s: *** out of bounds\n", __func__);
|
printk(KERN_ERR "%s: *** out of bounds\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (off < PAGE_SIZE) {
|
|
||||||
if (!lv->pages[0]) {
|
|
||||||
printk(KERN_ERR "%s *** no page0\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (off + len > PAGE_SIZE) {
|
|
||||||
if (!lv->pages[1]) {
|
|
||||||
printk(KERN_ERR "%s *** no page1\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (off < PAGE_SIZE && off + len > PAGE_SIZE) {
|
if (off < PAGE_SIZE && off + len > PAGE_SIZE) {
|
||||||
u32 len0 = PAGE_SIZE - off;
|
u32 len0 = PAGE_SIZE - off;
|
||||||
u8* pagebuf0 = lbatpage_get_buf(lv->pages[0], false);
|
u8* pagebuf0 = lbatpage_get_buf(lv->pages[0], false);
|
||||||
|
@ -355,18 +293,6 @@ lbatview_wmem(struct lbatview* lv, u32 off, u32 len, void* buf)
|
||||||
printk(KERN_ERR "%s: *** out of bounds\n", __func__);
|
printk(KERN_ERR "%s: *** out of bounds\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (off < PAGE_SIZE) {
|
|
||||||
if (!lv->pages[0]) {
|
|
||||||
printk(KERN_ERR "%s *** no page0\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (off + len > PAGE_SIZE) {
|
|
||||||
if (!lv->pages[1]) {
|
|
||||||
printk(KERN_ERR "%s *** no page1\n", __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (off < PAGE_SIZE && off + len > PAGE_SIZE) {
|
if (off < PAGE_SIZE && off + len > PAGE_SIZE) {
|
||||||
u32 len0 = PAGE_SIZE - off;
|
u32 len0 = PAGE_SIZE - off;
|
||||||
u8* pagebuf0 = lbatpage_get_buf(lv->pages[0], true);
|
u8* pagebuf0 = lbatpage_get_buf(lv->pages[0], true);
|
||||||
|
@ -383,7 +309,6 @@ lbatview_wmem(struct lbatview* lv, u32 off, u32 len, void* buf)
|
||||||
memcpy(pagebuf + bufoff, buf, len);
|
memcpy(pagebuf + bufoff, buf, len);
|
||||||
lbatpage_put_buf(lv->pages[bufidx]);
|
lbatpage_put_buf(lv->pages[bufidx]);
|
||||||
}
|
}
|
||||||
lv->state = CACHE_STATE_DIRTY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -405,7 +330,6 @@ lbatview_elem_realloc(struct lbatview* lv, u64 lblk, u32 len)
|
||||||
req_nalloc = DIV_ROUND_UP(len, PBLK_SIZE);
|
req_nalloc = DIV_ROUND_UP(len, PBLK_SIZE);
|
||||||
}
|
}
|
||||||
mutex_lock(&lv->lock);
|
mutex_lock(&lv->lock);
|
||||||
BUG_ON(lv->state == CACHE_STATE_UNCACHED);
|
|
||||||
elem_off = lbatview_elem_off(lv, lblk);
|
elem_off = lbatview_elem_off(lv, lblk);
|
||||||
elem_lelen = __cpu_to_le32(len);
|
elem_lelen = __cpu_to_le32(len);
|
||||||
lbatview_wmem(lv, elem_off, lba_elem_len_bytes(lv->params), &elem_lelen);
|
lbatview_wmem(lv, elem_off, lba_elem_len_bytes(lv->params), &elem_lelen);
|
||||||
|
@ -452,7 +376,6 @@ lbatview_elem_len(struct lbatview* lv, u64 lblk)
|
||||||
u32 elem_lelen;
|
u32 elem_lelen;
|
||||||
|
|
||||||
mutex_lock(&lv->lock);
|
mutex_lock(&lv->lock);
|
||||||
BUG_ON(lv->state == CACHE_STATE_UNCACHED);
|
|
||||||
off = lbatview_elem_off(lv, lblk);
|
off = lbatview_elem_off(lv, lblk);
|
||||||
elem_lelen = 0;
|
elem_lelen = 0;
|
||||||
lbatview_rmem(lv, off, lba_elem_len_bytes(lv->params), &elem_lelen);
|
lbatview_rmem(lv, off, lba_elem_len_bytes(lv->params), &elem_lelen);
|
||||||
|
@ -470,7 +393,6 @@ lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx)
|
||||||
u32 pblk_zone;
|
u32 pblk_zone;
|
||||||
|
|
||||||
mutex_lock(&lv->lock);
|
mutex_lock(&lv->lock);
|
||||||
BUG_ON(lv->state == CACHE_STATE_UNCACHED);
|
|
||||||
off = lbatview_elem_off(lv, lblk) +
|
off = lbatview_elem_off(lv, lblk) +
|
||||||
lba_elem_len_bytes(lv->params) +
|
lba_elem_len_bytes(lv->params) +
|
||||||
idx * lba_elem_pblk_bytes(lv->params);
|
idx * lba_elem_pblk_bytes(lv->params);
|
||||||
|
@ -642,7 +564,7 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
|
||||||
for (n = 0; n < lvc->len; ++n) {
|
for (n = 0; n < lvc->len; ++n) {
|
||||||
lv = lvc->cache[n];
|
lv = lvc->cache[n];
|
||||||
mutex_lock(&lv->reflock);
|
mutex_lock(&lv->reflock);
|
||||||
if (lv->ref == 0 && lv->state != CACHE_STATE_ERROR) {
|
if (lv->ref == 0) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lv->reflock);
|
mutex_unlock(&lv->reflock);
|
||||||
|
@ -657,9 +579,8 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
|
||||||
mutex_lock(&lv->reflock);
|
mutex_lock(&lv->reflock);
|
||||||
|
|
||||||
found:
|
found:
|
||||||
if (!lbatview_reset(lv, pblk, count)) {
|
if (lbatview_reset(lv, pblk, count) != 0) {
|
||||||
mutex_unlock(&lv->reflock);
|
mutex_unlock(&lv->reflock);
|
||||||
printk(KERN_ERR "%s: lbatview_reset failed\n", __func__);
|
|
||||||
lv = NULL;
|
lv = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,13 +36,13 @@ struct lbd {
|
||||||
unsigned int ref;
|
unsigned int ref;
|
||||||
|
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
enum cache_state state;
|
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
struct lbatviewcache* lvc;
|
struct lbatviewcache* lvc;
|
||||||
struct lbatview* lv;
|
struct lbatview* lv;
|
||||||
void* percpu;
|
void* percpu;
|
||||||
struct page* pages;
|
struct page* pages;
|
||||||
u8* buf;
|
u8* buf;
|
||||||
|
u32 c_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -142,7 +142,7 @@ lblk_compress_lz4(struct lbd* lbd)
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
lblk_decompress_lz4(struct lbd* lbd, u32 clen)
|
lblk_decompress_lz4(struct lbd* lbd)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
@ -157,7 +157,7 @@ lblk_decompress_lz4(struct lbd* lbd, u32 clen)
|
||||||
}
|
}
|
||||||
ret = LZ4_decompress_safe(lbd->buf,
|
ret = LZ4_decompress_safe(lbd->buf,
|
||||||
state->buf,
|
state->buf,
|
||||||
clen,
|
lbd->c_len,
|
||||||
dlen);
|
dlen);
|
||||||
if (ret != dlen) {
|
if (ret != dlen) {
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
@ -175,6 +175,7 @@ static size_t
|
||||||
lblk_compress_zlib(struct lbd* lbd)
|
lblk_compress_zlib(struct lbd* lbd)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
int clen;
|
||||||
int cpu;
|
int cpu;
|
||||||
struct lblk_compress_state* state;
|
struct lblk_compress_state* state;
|
||||||
z_stream* stream;
|
z_stream* stream;
|
||||||
|
@ -197,14 +198,15 @@ lblk_compress_zlib(struct lbd* lbd)
|
||||||
put_cpu();
|
put_cpu();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
memcpy(lbd->buf, state->buf, stream->total_out);
|
clen = stream->total_out;
|
||||||
|
memcpy(lbd->buf, state->buf, clen);
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
|
||||||
return stream->total_out;
|
return (size_t)clen;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
lblk_decompress_zlib(struct lbd* lbd, u32 clen)
|
lblk_decompress_zlib(struct lbd* lbd)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
@ -222,7 +224,7 @@ lblk_decompress_zlib(struct lbd* lbd, u32 clen)
|
||||||
ret = zlib_inflateReset(stream);
|
ret = zlib_inflateReset(stream);
|
||||||
BUG_ON(ret != Z_OK);
|
BUG_ON(ret != Z_OK);
|
||||||
stream->next_in = lbd->buf;
|
stream->next_in = lbd->buf;
|
||||||
stream->avail_in = clen;
|
stream->avail_in = lbd->c_len;
|
||||||
stream->next_out = state->buf;
|
stream->next_out = state->buf;
|
||||||
stream->avail_out = dlen;
|
stream->avail_out = dlen;
|
||||||
ret = zlib_inflate(stream, Z_SYNC_FLUSH);
|
ret = zlib_inflate(stream, Z_SYNC_FLUSH);
|
||||||
|
@ -267,20 +269,18 @@ lblk_compress(struct lbd* lbd)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decompress dc->lz4_cbuf of size clen into dc->lblk
|
* Decompress dc->lz4_cbuf of size clen into dc->lblk
|
||||||
*
|
|
||||||
* Returns 0 for success, <0 for failure.
|
|
||||||
*/
|
*/
|
||||||
static int
|
static bool
|
||||||
lblk_decompress(struct lbd* lbd, u32 clen)
|
lblk_decompress(struct lbd* lbd)
|
||||||
{
|
{
|
||||||
#ifdef COMPRESS_HAVE_LZ4
|
#ifdef COMPRESS_HAVE_LZ4
|
||||||
if (lbd->params->algorithm == CBD_ALG_LZ4) {
|
if (lbd->params->algorithm == CBD_ALG_LZ4) {
|
||||||
return lblk_decompress_lz4(lbd, clen);
|
return lblk_decompress_lz4(lbd);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifdef COMPRESS_HAVE_ZLIB
|
#ifdef COMPRESS_HAVE_ZLIB
|
||||||
if (lbd->params->algorithm == CBD_ALG_ZLIB) {
|
if (lbd->params->algorithm == CBD_ALG_ZLIB) {
|
||||||
return lblk_decompress_zlib(lbd, clen);
|
return lblk_decompress_zlib(lbd);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return false;
|
return false;
|
||||||
|
@ -297,7 +297,6 @@ lbd_ctr(struct lbd* lbd,
|
||||||
mutex_init(&lbd->reflock);
|
mutex_init(&lbd->reflock);
|
||||||
lbd->ref = 0;
|
lbd->ref = 0;
|
||||||
mutex_init(&lbd->lock);
|
mutex_init(&lbd->lock);
|
||||||
lbd->state = CACHE_STATE_UNCACHED;
|
|
||||||
lbd->params = params;
|
lbd->params = params;
|
||||||
lbd->lvc = lvc;
|
lbd->lvc = lvc;
|
||||||
lbd->lv = NULL;
|
lbd->lv = NULL;
|
||||||
|
@ -307,6 +306,7 @@ lbd_ctr(struct lbd* lbd,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
lbd->buf = page_address(lbd->pages);
|
lbd->buf = page_address(lbd->pages);
|
||||||
|
lbd->c_len = 0;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -317,6 +317,7 @@ lbd_dtr(struct lbd* lbd)
|
||||||
if (lbatviewcache_put(lbd->lvc, lbd->lv) != 0) {
|
if (lbatviewcache_put(lbd->lvc, lbd->lv) != 0) {
|
||||||
printk(KERN_ERR "%s: lbatviewcache_put failed\n", __func__);
|
printk(KERN_ERR "%s: lbatviewcache_put failed\n", __func__);
|
||||||
}
|
}
|
||||||
|
lbd->c_len = 0;
|
||||||
lbd->buf = NULL;
|
lbd->buf = NULL;
|
||||||
cbd_free_pages(lbd->pages, lblk_per_pblk(lbd->params));
|
cbd_free_pages(lbd->pages, lblk_per_pblk(lbd->params));
|
||||||
lbd->pages = NULL;
|
lbd->pages = NULL;
|
||||||
|
@ -325,166 +326,188 @@ lbd_dtr(struct lbd* lbd)
|
||||||
lbd->lvc = NULL;
|
lbd->lvc = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static bool
|
||||||
lbd_flush_endio(struct bio* bio)
|
lbd_error(struct lbd* lbd)
|
||||||
{
|
{
|
||||||
struct lbd* lbd = bio->bi_private;
|
u32 count = lblk_per_pblk(lbd->params);
|
||||||
int ret;
|
u32 n;
|
||||||
|
u8* p;
|
||||||
|
|
||||||
ret = pblk_endio(bio);
|
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
|
||||||
if (ret) {
|
if (PageError(virt_to_page(p))) {
|
||||||
printk(KERN_ERR "%s: I/O failed\n", __func__);
|
return true;
|
||||||
lbd->state = CACHE_STATE_ERROR;
|
}
|
||||||
}
|
}
|
||||||
cbd_free_page(bio->bi_io_vec[0].bv_page);
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
lbd_flush(struct lbd* lbd)
|
lbd_flush(struct lbd* lbd)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
u32 c_len;
|
int err;
|
||||||
u32 elem_len;
|
|
||||||
u8* p;
|
u8* p;
|
||||||
u32 n;
|
u32 n;
|
||||||
u64 pblk;
|
u64 pblk;
|
||||||
struct page* iopagev[1];
|
u32 count = lblk_per_pblk(lbd->params);
|
||||||
|
struct page* iopagev[count];
|
||||||
|
|
||||||
mutex_lock(&lbd->lock);
|
mutex_lock(&lbd->lock);
|
||||||
if (lbd->state != CACHE_STATE_DIRTY) {
|
if (!PageDirty(lbd->pages)) {
|
||||||
if (lbd->state == CACHE_STATE_ERROR) {
|
goto unlock;
|
||||||
ret = -EIO;
|
}
|
||||||
goto out;
|
if (lbd_error(lbd)) {
|
||||||
}
|
ret = -EIO;
|
||||||
goto clean;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lblk_is_zeros(lbd->params, lbd)) {
|
if (lblk_is_zeros(lbd->params, lbd)) {
|
||||||
c_len = 0;
|
lbd->c_len = CBD_UNCOMPRESSED;
|
||||||
elem_len = 0;
|
ret = lbatview_elem_realloc(lbd->lv, lbd->lblk, 0);
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
lbd->c_len = lblk_compress(lbd);
|
||||||
|
if (lbd->c_len > 0) {
|
||||||
|
u32 c_blkrem = lbd->c_len % PBLK_SIZE;
|
||||||
|
if (c_blkrem) {
|
||||||
|
memset(lbd->buf + lbd->c_len, 0, c_blkrem);
|
||||||
|
}
|
||||||
|
count = DIV_ROUND_UP(lbd->c_len, PBLK_SIZE);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
c_len = lblk_compress(lbd);
|
lbd->c_len = CBD_UNCOMPRESSED;
|
||||||
if (c_len > 0) {
|
count = lblk_per_pblk(lbd->params);
|
||||||
size_t c_blkrem = c_len % PBLK_SIZE;
|
|
||||||
if (c_blkrem) {
|
|
||||||
memset(lbd->buf + c_len, 0, c_blkrem);
|
|
||||||
}
|
|
||||||
elem_len = c_len;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
c_len = PBLK_SIZE * lblk_per_pblk(lbd->params);
|
|
||||||
elem_len = CBD_UNCOMPRESSED;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
ret = lbatview_elem_realloc(lbd->lv, lbd->lblk, lbd->c_len);
|
||||||
ret = lbatview_elem_realloc(lbd->lv, lbd->lblk, elem_len);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
goto out;
|
goto unlock;
|
||||||
}
|
}
|
||||||
p = lbd->buf;
|
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
|
||||||
for (n = 0; n * PBLK_SIZE < c_len; ++n, p += PBLK_SIZE) {
|
|
||||||
pblk = lbatview_elem_pblk(lbd->lv, lbd->lblk, n);
|
pblk = lbatview_elem_pblk(lbd->lv, lbd->lblk, n);
|
||||||
if (pblk == PBLK_NONE) {
|
BUG_ON(pblk == PBLK_NONE);
|
||||||
ret = -EIO;
|
iopagev[0] = virt_to_page(p);
|
||||||
goto out;
|
pblk_write(lbd->params, pblk, 1, iopagev);
|
||||||
}
|
|
||||||
iopagev[0] = cbd_alloc_page();
|
|
||||||
if (!iopagev[0]) {
|
|
||||||
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
memcpy(page_address(iopagev[0]), p, PBLK_SIZE);
|
|
||||||
pblk_write(lbd->params, pblk, 1, iopagev, lbd_flush_endio, lbd);
|
|
||||||
}
|
}
|
||||||
|
while (n < lblk_per_pblk(lbd->params)) {
|
||||||
|
unlock_page(virt_to_page(p));
|
||||||
|
++n;
|
||||||
|
p += PBLK_SIZE;
|
||||||
|
}
|
||||||
|
goto out;
|
||||||
|
|
||||||
clean:
|
unlock:
|
||||||
ret = lbatviewcache_put(lbd->lvc, lbd->lv);
|
count = lblk_per_pblk(lbd->params);
|
||||||
lbd->lv = NULL;
|
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
|
||||||
if (ret) {
|
unlock_page(virt_to_page(p));
|
||||||
lbd->state = CACHE_STATE_ERROR;
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
lbd->state = CACHE_STATE_CLEAN;
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
err = lbatviewcache_put(lbd->lvc, lbd->lv);
|
||||||
|
lbd->lv = NULL;
|
||||||
|
if (err) {
|
||||||
|
ret = err;
|
||||||
|
}
|
||||||
mutex_unlock(&lbd->lock);
|
mutex_unlock(&lbd->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static int
|
||||||
lbd_reset(struct lbd* lbd, u64 lblk)
|
|
||||||
{
|
|
||||||
if (lbd->lv) { printk(KERN_ERR "%s: lbatview leak\n", __func__); }
|
|
||||||
|
|
||||||
lbd->lv = lbatviewcache_get(lbd->lvc, lblk);
|
|
||||||
if (!lbd->lv) {
|
|
||||||
printk(KERN_ERR "%s: lbatviewcache_get failed\n", __func__);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
lbd->lblk = lblk;
|
|
||||||
lbd->state = CACHE_STATE_UNCACHED;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
lbd_read(struct lbd* lbd)
|
lbd_read(struct lbd* lbd)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
u32 c_len;
|
u32 count;
|
||||||
|
u32 n;
|
||||||
|
u8* p;
|
||||||
u64 pblk;
|
u64 pblk;
|
||||||
struct page* iopagev[1];
|
struct page* iopagev[1];
|
||||||
|
|
||||||
mutex_lock(&lbd->lock);
|
/* XXX: can't happen because lbdcache will not use a page with an error */
|
||||||
if (lbd->state != CACHE_STATE_UNCACHED) {
|
if (PageError(lbd->pages)) {
|
||||||
goto out;
|
return -EIO;
|
||||||
}
|
}
|
||||||
ret = lbatview_read(lbd->lv);
|
lbd->c_len = lbatview_elem_len(lbd->lv, lbd->lblk);
|
||||||
if (ret) {
|
if (lbd->c_len == 0) {
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
c_len = lbatview_elem_len(lbd->lv, lbd->lblk);
|
|
||||||
if (c_len == 0) {
|
|
||||||
memset(lbd->buf, 0, PBLK_SIZE * lblk_per_pblk(lbd->params));
|
memset(lbd->buf, 0, PBLK_SIZE * lblk_per_pblk(lbd->params));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
bool is_compressed = true;
|
count = (lbd->c_len == CBD_UNCOMPRESSED) ?
|
||||||
u32 d_len = PBLK_SIZE * lblk_per_pblk(lbd->params);
|
lblk_per_pblk(lbd->params) :
|
||||||
u32 n;
|
DIV_ROUND_UP(lbd->c_len, PBLK_SIZE);
|
||||||
u8* p;
|
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
|
||||||
|
|
||||||
if (c_len == CBD_UNCOMPRESSED) {
|
|
||||||
is_compressed = false;
|
|
||||||
c_len = d_len;
|
|
||||||
}
|
|
||||||
p = lbd->buf;
|
|
||||||
for (n = 0; n * PBLK_SIZE < c_len; ++n, p += PBLK_SIZE) {
|
|
||||||
pblk = lbatview_elem_pblk(lbd->lv, lbd->lblk, n);
|
pblk = lbatview_elem_pblk(lbd->lv, lbd->lblk, n);
|
||||||
if (pblk == PBLK_NONE) {
|
if (pblk == PBLK_NONE) {
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/* XXX: check pblk not in metadata? */
|
|
||||||
iopagev[0] = virt_to_page(p);
|
iopagev[0] = virt_to_page(p);
|
||||||
|
/* XXX: Issue non-blocking reads? */
|
||||||
ret = pblk_read_wait(lbd->params, pblk, 1, iopagev);
|
ret = pblk_read_wait(lbd->params, pblk, 1, iopagev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (is_compressed) {
|
if (lbd->c_len != CBD_UNCOMPRESSED) {
|
||||||
if (!lblk_decompress(lbd, c_len)) {
|
if (!lblk_decompress(lbd)) {
|
||||||
printk(KERN_ERR " decompress failed\n");
|
printk(KERN_ERR " decompress failed\n");
|
||||||
ret = -EIO;
|
ret = -EIO;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lbd->state = CACHE_STATE_CLEAN;
|
lbd->c_len = CBD_UNCOMPRESSED;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&lbd->lock);
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
lbd_reset(struct lbd* lbd, u64 lblk)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
u32 count = lblk_per_pblk(lbd->params);
|
||||||
|
u32 n;
|
||||||
|
u8* p;
|
||||||
|
|
||||||
|
if (lbd->lv) { printk(KERN_ERR "%s: lbatview leak\n", __func__); }
|
||||||
|
|
||||||
|
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
|
||||||
|
lock_page(virt_to_page(p));
|
||||||
|
}
|
||||||
|
lbd->lv = lbatviewcache_get(lbd->lvc, lblk);
|
||||||
|
if (!lbd->lv) {
|
||||||
|
printk(KERN_ERR "%s: lbatviewcache_get failed\n", __func__);
|
||||||
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (lbd->lblk != lblk) {
|
||||||
|
lbd->lblk = lblk;
|
||||||
|
ret = lbd_read(lbd);
|
||||||
|
if (ret) {
|
||||||
|
printk(KERN_ERR "%s: lbd_read failed\n", __func__);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
if (lbd->c_len != CBD_UNCOMPRESSED) {
|
||||||
|
if (!lblk_decompress(lbd)) {
|
||||||
|
printk(KERN_ERR "%s: lblk_decompress failed\n", __func__);
|
||||||
|
ret = -EIO;
|
||||||
|
}
|
||||||
|
lbd->c_len = CBD_UNCOMPRESSED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
if (ret) {
|
||||||
|
lbatviewcache_put(lbd->lvc, lbd->lv);
|
||||||
|
lbd->lv = NULL;
|
||||||
|
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
|
||||||
|
unlock_page(virt_to_page(p));
|
||||||
|
}
|
||||||
|
lbd->lblk = LBLK_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -497,7 +520,6 @@ lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&lbd->lock);
|
mutex_lock(&lbd->lock);
|
||||||
BUG_ON(lbd->state == CACHE_STATE_UNCACHED);
|
|
||||||
memcpy(buf, lbd->buf + off, len);
|
memcpy(buf, lbd->buf + off, len);
|
||||||
mutex_unlock(&lbd->lock);
|
mutex_unlock(&lbd->lock);
|
||||||
}
|
}
|
||||||
|
@ -511,9 +533,8 @@ lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&lbd->lock);
|
mutex_lock(&lbd->lock);
|
||||||
BUG_ON(lbd->state == CACHE_STATE_UNCACHED);
|
|
||||||
memcpy(lbd->buf + off, buf, len);
|
memcpy(lbd->buf + off, buf, len);
|
||||||
lbd->state = CACHE_STATE_DIRTY;
|
SetPageDirty(lbd->pages);
|
||||||
mutex_unlock(&lbd->lock);
|
mutex_unlock(&lbd->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -728,7 +749,7 @@ lbdcache_get(struct lbdcache* lc, u64 lblk)
|
||||||
for (n = 0; n < lc->len; ++n) {
|
for (n = 0; n < lc->len; ++n) {
|
||||||
lbd = lc->cache[n];
|
lbd = lc->cache[n];
|
||||||
mutex_lock(&lbd->reflock);
|
mutex_lock(&lbd->reflock);
|
||||||
if (lbd->ref == 0 && lbd->state != CACHE_STATE_ERROR) {
|
if (lbd->ref == 0 && !lbd_error(lbd)) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lbd->reflock);
|
mutex_unlock(&lbd->reflock);
|
||||||
|
@ -745,9 +766,8 @@ lbdcache_get(struct lbdcache* lc, u64 lblk)
|
||||||
mutex_lock(&lbd->reflock);
|
mutex_lock(&lbd->reflock);
|
||||||
|
|
||||||
found:
|
found:
|
||||||
if (!lbd_reset(lbd, lblk)) {
|
if (lbd_reset(lbd, lblk) != 0) {
|
||||||
mutex_unlock(&lbd->reflock);
|
mutex_unlock(&lbd->reflock);
|
||||||
printk(KERN_ERR "%s: lbd_reset failed\n", __func__);
|
|
||||||
lbd = NULL;
|
lbd = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,6 @@ struct pbat {
|
||||||
unsigned int ref;
|
unsigned int ref;
|
||||||
|
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
enum cache_state state;
|
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
struct page* pages;
|
struct page* pages;
|
||||||
u8* buf;
|
u8* buf;
|
||||||
|
@ -50,7 +49,6 @@ pbat_ctr(struct pbat* pbat,
|
||||||
mutex_init(&pbat->reflock);
|
mutex_init(&pbat->reflock);
|
||||||
pbat->ref = 0;
|
pbat->ref = 0;
|
||||||
mutex_init(&pbat->lock);
|
mutex_init(&pbat->lock);
|
||||||
pbat->state = CACHE_STATE_UNCACHED;
|
|
||||||
pbat->params = params;
|
pbat->params = params;
|
||||||
pbat->pages = cbd_alloc_pages(pbat_len(params));
|
pbat->pages = cbd_alloc_pages(pbat_len(params));
|
||||||
if (!pbat->pages) {
|
if (!pbat->pages) {
|
||||||
|
@ -65,23 +63,16 @@ pbat_ctr(struct pbat* pbat,
|
||||||
static void
|
static void
|
||||||
pbat_dtr(struct pbat* pbat)
|
pbat_dtr(struct pbat* pbat)
|
||||||
{
|
{
|
||||||
|
lock_page(pbat->pages);
|
||||||
pbat->buf = NULL;
|
pbat->buf = NULL;
|
||||||
cbd_free_pages(pbat->pages, pbat_len(pbat->params));
|
cbd_free_pages(pbat->pages, pbat_len(pbat->params));
|
||||||
pbat->pages = NULL;
|
pbat->pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static bool
|
||||||
pbat_flush_endio(struct bio* bio)
|
pbat_error(struct pbat* pbat)
|
||||||
{
|
{
|
||||||
struct pbat* pbat = bio->bi_private;
|
return PageError(pbat->pages);
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = pblk_endio(bio);
|
|
||||||
if (ret) {
|
|
||||||
printk(KERN_ERR "%s: I/O failed\n", __func__);
|
|
||||||
pbat->state = CACHE_STATE_ERROR;
|
|
||||||
}
|
|
||||||
unlock_page(pbat->pages);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -95,37 +86,31 @@ pbat_flush(struct pbat* pbat)
|
||||||
u8* p;
|
u8* p;
|
||||||
|
|
||||||
mutex_lock(&pbat->lock);
|
mutex_lock(&pbat->lock);
|
||||||
if (pbat->state != CACHE_STATE_DIRTY) {
|
if (!PageDirty(pbat->pages)) {
|
||||||
if (pbat->state == CACHE_STATE_ERROR) {
|
goto unlock;
|
||||||
ret = -EIO;
|
}
|
||||||
}
|
if (pbat_error(pbat)) {
|
||||||
unlock_page(pbat->pages);
|
ret = -EIO;
|
||||||
goto out;
|
goto unlock;
|
||||||
}
|
}
|
||||||
pblk = pbat_off(pbat->params, pbat->zone);
|
pblk = pbat_off(pbat->params, pbat->zone);
|
||||||
p = pbat->buf;
|
p = pbat->buf;
|
||||||
for (n = 0; n < count; ++n, p += PBLK_SIZE) {
|
for (n = 0; n < count; ++n, p += PBLK_SIZE) {
|
||||||
iopagev[n] = virt_to_page(p);
|
iopagev[n] = virt_to_page(p);
|
||||||
}
|
}
|
||||||
pblk_write(pbat->params, pblk, count, iopagev, pbat_flush_endio, pbat);
|
pblk_write(pbat->params, pblk, count, iopagev);
|
||||||
pbat->state = CACHE_STATE_CLEAN;
|
|
||||||
|
|
||||||
out:
|
|
||||||
mutex_unlock(&pbat->lock);
|
mutex_unlock(&pbat->lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
unlock:
|
||||||
|
unlock_page(pbat->pages);
|
||||||
|
mutex_unlock(&pbat->lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static int
|
||||||
pbat_reset(struct pbat* pbat, u32 zone)
|
|
||||||
{
|
|
||||||
lock_page(pbat->pages);
|
|
||||||
if (pbat->zone != zone) {
|
|
||||||
pbat->zone = zone;
|
|
||||||
pbat->state = CACHE_STATE_UNCACHED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int
|
|
||||||
pbat_read(struct pbat* pbat)
|
pbat_read(struct pbat* pbat)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -135,23 +120,35 @@ pbat_read(struct pbat* pbat)
|
||||||
u32 n;
|
u32 n;
|
||||||
u8* p;
|
u8* p;
|
||||||
|
|
||||||
mutex_lock(&pbat->lock);
|
/* XXX: can't happen because pbatcache will not use a page with an error */
|
||||||
if (pbat->state != CACHE_STATE_UNCACHED) {
|
if (PageError(pbat->pages)) {
|
||||||
goto out;
|
return -EIO;
|
||||||
}
|
}
|
||||||
pblk = pbat_off(pbat->params, pbat->zone);
|
pblk = pbat_off(pbat->params, pbat->zone);
|
||||||
p = pbat->buf;
|
for (n = 0, p = pbat->buf; n < count; ++n, p += PBLK_SIZE) {
|
||||||
for (n = 0; n < count; ++n, p += PBLK_SIZE) {
|
|
||||||
iopagev[n] = virt_to_page(p);
|
iopagev[n] = virt_to_page(p);
|
||||||
}
|
}
|
||||||
ret = pblk_read_wait(pbat->params, pblk, count, iopagev);
|
ret = pblk_read_wait(pbat->params, pblk, count, iopagev);
|
||||||
if (ret) {
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
pbat->state = CACHE_STATE_CLEAN;
|
|
||||||
|
|
||||||
out:
|
return ret;
|
||||||
mutex_unlock(&pbat->lock);
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
pbat_reset(struct pbat* pbat, u32 zone)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
lock_page(pbat->pages);
|
||||||
|
if (pbat->zone != zone) {
|
||||||
|
pbat->zone = zone;
|
||||||
|
ret = pbat_read(pbat);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
unlock_page(pbat->pages);
|
||||||
|
pbat->zone = ZONE_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,14 +166,13 @@ pbat_alloc(struct pbat* pbat)
|
||||||
u64 pblk;
|
u64 pblk;
|
||||||
|
|
||||||
mutex_lock(&pbat->lock);
|
mutex_lock(&pbat->lock);
|
||||||
BUG_ON(pbat->state == CACHE_STATE_UNCACHED);
|
|
||||||
idx = cbd_bitmap_alloc(pbat->buf, pblk_count);
|
idx = cbd_bitmap_alloc(pbat->buf, pblk_count);
|
||||||
if (idx == pblk_count) {
|
if (idx == pblk_count) {
|
||||||
pblk = PBLK_NONE;
|
pblk = PBLK_NONE;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
pblk = idx + zone_data_off(pbat->params, pbat->zone);
|
pblk = idx + zone_data_off(pbat->params, pbat->zone);
|
||||||
pbat->state = CACHE_STATE_DIRTY;
|
SetPageDirty(pbat->pages);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&pbat->lock);
|
mutex_unlock(&pbat->lock);
|
||||||
|
@ -199,9 +195,8 @@ pbat_free(struct pbat* pbat, u64 pblk)
|
||||||
idx = pblk - zone_data_off(pbat->params, zone);
|
idx = pblk - zone_data_off(pbat->params, zone);
|
||||||
BUG_ON(idx >= zone_pblk_count);
|
BUG_ON(idx >= zone_pblk_count);
|
||||||
mutex_lock(&pbat->lock);
|
mutex_lock(&pbat->lock);
|
||||||
BUG_ON(pbat->state == CACHE_STATE_UNCACHED);
|
|
||||||
cbd_bitmap_free(pbat->buf, idx);
|
cbd_bitmap_free(pbat->buf, idx);
|
||||||
pbat->state = CACHE_STATE_DIRTY;
|
SetPageDirty(pbat->pages);
|
||||||
mutex_unlock(&pbat->lock);
|
mutex_unlock(&pbat->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -319,7 +314,7 @@ pbatcache_get(struct pbatcache* pc, u32 zone)
|
||||||
for (n = 0; n < pc->len; ++n) {
|
for (n = 0; n < pc->len; ++n) {
|
||||||
pbat = pc->cache[n];
|
pbat = pc->cache[n];
|
||||||
mutex_lock(&pbat->reflock);
|
mutex_lock(&pbat->reflock);
|
||||||
if (pbat->ref == 0 && pbat->state != CACHE_STATE_ERROR) {
|
if (pbat->ref == 0 && !pbat_error(pbat)) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&pbat->reflock);
|
mutex_unlock(&pbat->reflock);
|
||||||
|
@ -334,7 +329,11 @@ pbatcache_get(struct pbatcache* pc, u32 zone)
|
||||||
mutex_lock(&pbat->reflock);
|
mutex_lock(&pbat->reflock);
|
||||||
|
|
||||||
found:
|
found:
|
||||||
pbat_reset(pbat, zone);
|
if (pbat_reset(pbat, zone) != 0) {
|
||||||
|
mutex_unlock(&pbat->reflock);
|
||||||
|
pbat = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
pbat->ref = 1;
|
pbat->ref = 1;
|
||||||
mutex_unlock(&pbat->reflock);
|
mutex_unlock(&pbat->reflock);
|
||||||
|
|
||||||
|
|
|
@ -180,10 +180,30 @@ pblk_read(struct cbd_params* params,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
pblk_endio(struct bio* bio)
|
||||||
|
{
|
||||||
|
u32 n;
|
||||||
|
struct page* page;
|
||||||
|
|
||||||
|
BUG_ON(!bio);
|
||||||
|
for (n = 0; n < bio->bi_max_vecs; ++n) {
|
||||||
|
page = bio->bi_io_vec[n].bv_page;
|
||||||
|
unlock_page(page);
|
||||||
|
ClearPageDirty(page);
|
||||||
|
}
|
||||||
|
if (bio->bi_status != BLK_STS_OK) {
|
||||||
|
for (n = 0; n < bio->bi_max_vecs; ++n) {
|
||||||
|
page = bio->bi_io_vec[n].bv_page;
|
||||||
|
SetPageError(page);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bio_put(bio);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
pblk_write(struct cbd_params* params,
|
pblk_write(struct cbd_params* params,
|
||||||
u64 pblk, u32 count, struct page** pagev,
|
u64 pblk, u32 count, struct page** pagev)
|
||||||
pblk_endio_t endio, void* endio_priv)
|
|
||||||
{
|
{
|
||||||
struct bio* bio;
|
struct bio* bio;
|
||||||
|
|
||||||
|
@ -192,30 +212,19 @@ pblk_write(struct cbd_params* params,
|
||||||
printk(KERN_ERR "%s: out of memory\n", __func__);
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bio->bi_end_io = endio;
|
bio->bi_end_io = pblk_endio;
|
||||||
bio->bi_private = endio_priv;
|
|
||||||
|
|
||||||
if (pblk < CBD_HEADER_BLOCKS) {
|
if (pblk < CBD_HEADER_BLOCKS) {
|
||||||
printk(KERN_ERR "%s: *** Attempt to write header\n", __func__);
|
printk(KERN_ERR "%s: *** Attempt to write header\n", __func__);
|
||||||
dump_stack();
|
dump_stack();
|
||||||
bio->bi_status = BLK_STS_IOERR;
|
bio->bi_status = BLK_STS_IOERR;
|
||||||
endio(bio);
|
pblk_endio(bio);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
submit_bio(bio);
|
submit_bio(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
pblk_endio(struct bio* bio)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = blk_status_to_errno(bio->bi_status);
|
|
||||||
bio_put(bio);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u32
|
static inline u32
|
||||||
cshift (u32 x, uint n)
|
cshift (u32 x, uint n)
|
||||||
{
|
{
|
||||||
|
|
|
@ -451,14 +451,6 @@ lba_put(const struct cbd_params* params,
|
||||||
#define COMPRESS_HAVE_ZLIB 1
|
#define COMPRESS_HAVE_ZLIB 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
enum cache_state {
|
|
||||||
CACHE_STATE_UNCACHED,
|
|
||||||
CACHE_STATE_CLEAN,
|
|
||||||
CACHE_STATE_DIRTY,
|
|
||||||
CACHE_STATE_ERROR,
|
|
||||||
CACHE_STATE_MAX
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef void (*pblk_endio_t)(struct bio*);
|
typedef void (*pblk_endio_t)(struct bio*);
|
||||||
|
|
||||||
/* Single page allocator */
|
/* Single page allocator */
|
||||||
|
@ -484,9 +476,7 @@ int pblk_read(struct cbd_params* params,
|
||||||
u64 pblk, u32 count, struct page** pagev,
|
u64 pblk, u32 count, struct page** pagev,
|
||||||
pblk_endio_t endio, void* endio_priv);
|
pblk_endio_t endio, void* endio_priv);
|
||||||
void pblk_write(struct cbd_params* params,
|
void pblk_write(struct cbd_params* params,
|
||||||
u64 pblk, u32 count, struct page** pagev,
|
u64 pblk, u32 count, struct page** pagev);
|
||||||
pblk_endio_t endio, void* endio_priv);
|
|
||||||
int pblk_endio(struct bio* bio);
|
|
||||||
|
|
||||||
/* Debug stuff */
|
/* Debug stuff */
|
||||||
typedef unsigned char byte;
|
typedef unsigned char byte;
|
||||||
|
@ -503,7 +493,6 @@ void md5_final(struct md5* ctx, byte* buf);
|
||||||
|
|
||||||
|
|
||||||
struct pbat;
|
struct pbat;
|
||||||
int pbat_read(struct pbat* pbat);
|
|
||||||
u32 pbat_zone(struct pbat* pbat);
|
u32 pbat_zone(struct pbat* pbat);
|
||||||
u64 pbat_alloc(struct pbat* pbat);
|
u64 pbat_alloc(struct pbat* pbat);
|
||||||
int pbat_free(struct pbat* pbat, u64 pblk);
|
int pbat_free(struct pbat* pbat, u64 pblk);
|
||||||
|
@ -519,7 +508,6 @@ int pbatcache_put(struct pbatcache* pbatcache, struct pbat* pbat);
|
||||||
|
|
||||||
|
|
||||||
struct lbatpage;
|
struct lbatpage;
|
||||||
int lbatpage_read(struct lbatpage* lp);
|
|
||||||
u8* lbatpage_get_buf(struct lbatpage* lp, bool rw);
|
u8* lbatpage_get_buf(struct lbatpage* lp, bool rw);
|
||||||
void lbatpage_put_buf(struct lbatpage* lp);
|
void lbatpage_put_buf(struct lbatpage* lp);
|
||||||
|
|
||||||
|
@ -533,7 +521,6 @@ struct lbatpage*
|
||||||
int lbatpagecache_put(struct lbatpagecache* lpc, struct lbatpage* lpi);
|
int lbatpagecache_put(struct lbatpagecache* lpc, struct lbatpage* lpi);
|
||||||
|
|
||||||
struct lbatview;
|
struct lbatview;
|
||||||
int lbatview_read(struct lbatview* lv);
|
|
||||||
int lbatview_elem_realloc(struct lbatview* lv, u64 lblk, u32 len);
|
int lbatview_elem_realloc(struct lbatview* lv, u64 lblk, u32 len);
|
||||||
u32 lbatview_elem_len(struct lbatview* lv, u64 lblk);
|
u32 lbatview_elem_len(struct lbatview* lv, u64 lblk);
|
||||||
u64 lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx);
|
u64 lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx);
|
||||||
|
@ -548,7 +535,6 @@ struct lbatview*
|
||||||
int lbatviewcache_put(struct lbatviewcache* lvc, struct lbatview* lbv);
|
int lbatviewcache_put(struct lbatviewcache* lvc, struct lbatview* lbv);
|
||||||
|
|
||||||
struct lbd;
|
struct lbd;
|
||||||
int lbd_read(struct lbd* lbd);
|
|
||||||
void lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf);
|
void lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf);
|
||||||
void lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf);
|
void lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue