First really working version

This commit is contained in:
Tom Marshall 2019-11-01 14:41:11 -07:00
parent 9a543670aa
commit ee7eacd4a6
8 changed files with 305 additions and 372 deletions

13
TODO
View File

@ -23,8 +23,18 @@ Cache object sizing:
TODO:
- Consistency in lbatview_elem_realloc().
If alloc fails, roll back.
- Allocate lbd pages using allocv and vmap them.
Same for pbat, when pbat_len() > 1.
Can we vmap compression workspace?
- In *cache_get, call *_reset outside cache lock.
- Implement vectorized lbatview_elem_pblk().
- Dirty flag in compress header.
- Implement stats.
- Keep working lbd, flush on timer.
Need per-cpu lbd.
- Move back to module based build system.
- Make compression algorithm and speed/level selectable.
- Create utilities for:
- Resizing a compressed device.
- Checking and repairing a compressed device.
@ -32,4 +42,3 @@ TODO:
- Compressed device must be large enough.
- Backing device must be large enough.
- Remove workqueue.
- (?) Function ptrs for reading and writing lblk_alloc.

View File

@ -213,12 +213,6 @@ compress_lbdcache_swap(struct compress* c, u64 lblk, struct lbd* oldlbd)
lbdcache_put(c->lc, oldlbd);
return NULL;
}
if (lbd_read(lbd) != 0) {
printk(KERN_ERR "%s: lbd_read failed\n", __func__);
lbdcache_put(c->lc, lbd);
lbdcache_put(c->lc, oldlbd);
return NULL;
}
if (lbdcache_put(c->lc, oldlbd) != 0) {
printk(KERN_ERR "%s: failed to put oldlbd\n", __func__);
lbdcache_put(c->lc, lbd);

View File

@ -35,7 +35,6 @@ struct lbatpage {
unsigned int ref;
struct mutex lock;
enum cache_state state;
struct cbd_params* params;
struct page* page;
u8* buf;
@ -49,7 +48,6 @@ lbatpage_ctr(struct lbatpage* lp, struct cbd_params* params)
mutex_init(&lp->reflock);
lp->ref = 0;
mutex_init(&lp->lock);
lp->state = CACHE_STATE_UNCACHED;
lp->params = params;
lp->page = cbd_alloc_page();
if (!lp->page) {
@ -69,18 +67,10 @@ lbatpage_dtr(struct lbatpage* lp)
lp->page = NULL;
}
static void
lbatpage_flush_endio(struct bio* bio)
static bool
lbatpage_error(struct lbatpage* lp)
{
struct lbatpage* lp = bio->bi_private;
int ret;
ret = pblk_endio(bio);
if (ret) {
printk(KERN_ERR "%s: I/O failed\n", __func__);
lp->state = CACHE_STATE_ERROR;
}
unlock_page(lp->page);
return PageError(lp->page);
}
static int
@ -90,52 +80,54 @@ lbatpage_flush(struct lbatpage* lp)
struct page* iopagev[1];
mutex_lock(&lp->lock);
if (lp->state != CACHE_STATE_DIRTY) {
if (lp->state == CACHE_STATE_ERROR) {
ret = -EIO;
}
unlock_page(lp->page);
goto out;
if (!PageDirty(lp->page)) {
goto unlock;
}
if (lbatpage_error(lp)) {
ret = -EIO;
goto unlock;
}
iopagev[0] = lp->page;
pblk_write(lp->params, lp->pblk, 1, iopagev, lbatpage_flush_endio, lp);
lp->state = CACHE_STATE_CLEAN;
out:
pblk_write(lp->params, lp->pblk, 1, iopagev);
mutex_unlock(&lp->lock);
return ret;
unlock:
unlock_page(lp->page);
mutex_unlock(&lp->lock);
return ret;
}
void
lbatpage_reset(struct lbatpage* lp, u64 pblk)
{
lock_page(lp->page);
if (lp->pblk != pblk) {
lp->pblk = pblk;
lp->state = CACHE_STATE_UNCACHED;
}
}
int
static int
lbatpage_read(struct lbatpage* lp)
{
int ret = 0;
struct page* pagev[1];
mutex_lock(&lp->lock);
if (lp->state != CACHE_STATE_UNCACHED) {
goto out;
}
pagev[0] = lp->page;
ret = pblk_read_wait(lp->params, lp->pblk, 1, pagev);
if (ret) {
printk(KERN_ERR "%s: failed, pblk=%lu\n", __func__, (unsigned long)lp->pblk);
goto out;
}
lp->state = CACHE_STATE_CLEAN;
out:
mutex_unlock(&lp->lock);
return ret;
}
static int
lbatpage_reset(struct lbatpage* lp, u64 pblk)
{
int ret = 0;
lock_page(lp->page);
if (lp->pblk != pblk) {
lp->pblk = pblk;
ret = lbatpage_read(lp);
}
if (ret) {
unlock_page(lp->page);
lp->pblk = PBLK_NONE;
}
return ret;
}
@ -143,17 +135,16 @@ u8*
lbatpage_get_buf(struct lbatpage* lp, bool rw)
{
mutex_lock(&lp->lock);
BUG_ON(lp->state == CACHE_STATE_UNCACHED);
if (rw) {
lp->state = CACHE_STATE_DIRTY;
SetPageDirty(lp->page);
}
return lp->buf;
}
void
lbatpage_put_buf(struct lbatpage* lp)
{
BUG_ON(lp->state == CACHE_STATE_UNCACHED);
mutex_unlock(&lp->lock);
}
@ -268,7 +259,7 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
for (n = 0; n < lpc->len; ++n) {
lp = lpc->cache[n];
mutex_lock(&lp->reflock);
if (lp->ref == 0 && lp->state != CACHE_STATE_ERROR) {
if (lp->ref == 0 && !lbatpage_error(lp)) {
goto found;
}
mutex_unlock(&lp->reflock);
@ -283,7 +274,11 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
mutex_lock(&lp->reflock);
found:
lbatpage_reset(lp, pblk);
if (lbatpage_reset(lp, pblk) != 0) {
mutex_unlock(&lp->reflock);
lp = NULL;
goto out;
}
lp->ref = 1;
mutex_unlock(&lp->reflock);

View File

@ -35,7 +35,6 @@ struct lbatview {
unsigned int ref;
struct mutex lock;
enum cache_state state;
struct cbd_params* params;
struct pbatcache* pbatcache;
struct pbat* pbat;
@ -54,7 +53,6 @@ lbatview_ctr(struct lbatview* lv,
mutex_init(&lv->reflock);
lv->ref = 0;
mutex_init(&lv->lock);
lv->state = CACHE_STATE_UNCACHED;
lv->params = params;
lv->pbatcache = pbatcache;
lv->pbat = NULL;
@ -67,13 +65,12 @@ lbatview_ctr(struct lbatview* lv,
static void
lbatview_dtr(struct lbatview* lv)
{
if (pbatcache_put(lv->pbatcache, lv->pbat) != 0) {
printk(KERN_ERR "%s: pbatcache_put failed\n", __func__);
}
pbatcache_put(lv->pbatcache, lv->pbat);
lv->pbat = NULL;
lbatpagecache_put(lv->lpc, lv->pages[0]);
lbatpagecache_put(lv->lpc, lv->pages[1]);
lv->pages[0] = lv->pages[1] = NULL;
lv->pages[1] = NULL;
lbatpagecache_put(lv->lpc, lv->pages[0]);
lv->pages[0] = NULL;
lv->lpc = NULL;
}
@ -81,94 +78,63 @@ static int
lbatview_flush(struct lbatview* lv)
{
int ret = 0;
int err;
mutex_lock(&lv->lock);
if (lv->state == CACHE_STATE_ERROR) {
ret = -EIO;
goto out;
if (lv->pages[1]) {
err = lbatpagecache_put(lv->lpc, lv->pages[1]);
if (err) {
ret = err;
}
lv->pages[1] = NULL;
}
if (lv->pages[0]) {
ret = lbatpagecache_put(lv->lpc, lv->pages[0]);
err = lbatpagecache_put(lv->lpc, lv->pages[0]);
if (err) {
ret = err;
}
lv->pages[0] = NULL;
if (ret) {
lv->state = CACHE_STATE_ERROR;
goto out;
}
}
if (lv->pages[1]) {
ret = lbatpagecache_put(lv->lpc, lv->pages[1]);
lv->pages[1] = NULL;
if (ret) {
lv->state = CACHE_STATE_ERROR;
goto out;
}
}
ret = pbatcache_put(lv->pbatcache, lv->pbat);
err = pbatcache_put(lv->pbatcache, lv->pbat);
lv->pbat = NULL;
if (ret) {
lv->state = CACHE_STATE_ERROR;
goto out;
if (err) {
ret = err;
}
lv->state = CACHE_STATE_CLEAN;
out:
mutex_unlock(&lv->lock);
return ret;
}
static bool
static int
lbatview_reset(struct lbatview* lv, u64 pblk, u32 count)
{
u32 zone;
int ret = 0;
if (lv->pbat) { printk(KERN_ERR "%s: pbat leak\n", __func__); }
if (lv->pages[0]) { printk(KERN_ERR "%s: lbatpage leak\n", __func__); }
if (lv->pages[1]) { printk(KERN_ERR "%s: lbatpage leak\n", __func__); }
zone = zone_for_pblk(lv->params, pblk);
if (count > 0) {
lv->pblk = pblk;
if (!ret && count > 0) {
lv->pages[0] = lbatpagecache_get(lv->lpc, pblk + 0);
if (!lv->pages[0]) {
return false;
ret = -EIO;
}
}
if (count > 1) {
if (!ret && count > 1) {
lv->pages[1] = lbatpagecache_get(lv->lpc, pblk + 1);
if (!lv->pages[1]) {
return false;
ret = -EIO;
}
}
lv->pblk = pblk;
lv->state = CACHE_STATE_UNCACHED;
return true;
}
int
lbatview_read(struct lbatview* lv)
{
int ret = 0;
mutex_lock(&lv->lock);
if (lv->state != CACHE_STATE_UNCACHED) {
goto out;
if (ret) {
lbatpagecache_put(lv->lpc, lv->pages[1]);
lv->pages[1] = NULL;
lbatpagecache_put(lv->lpc, lv->pages[0]);
lv->pages[0] = NULL;
lv->pblk = PBLK_NONE;
}
if (lv->pages[0]) {
ret = lbatpage_read(lv->pages[0]);
if (ret) {
goto out;
}
}
if (lv->pages[1]) {
ret = lbatpage_read(lv->pages[1]);
if (ret) {
goto out;
}
}
lv->state = CACHE_STATE_CLEAN;
out:
mutex_unlock(&lv->lock);
return ret;
}
@ -187,9 +153,6 @@ lbatview_alloc_pblk(struct lbatview* lv)
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
return PBLK_NONE;
}
if (pbat_read(lv->pbat) != 0) {
return PBLK_NONE;
}
}
pblk = pbat_alloc(lv->pbat);
if (pblk != PBLK_NONE) {
@ -210,10 +173,6 @@ lbatview_alloc_pblk(struct lbatview* lv)
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
return PBLK_NONE;
}
if (pbat_read(pbat) != 0) {
printk(KERN_ERR "%s: pbat_read failed\n", __func__);
return PBLK_NONE;
}
pblk = pbat_alloc(pbat);
if (pblk != PBLK_NONE) {
lv->pbat = pbat;
@ -231,10 +190,6 @@ lbatview_alloc_pblk(struct lbatview* lv)
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
return PBLK_NONE;
}
if (pbat_read(pbat) != 0) {
printk(KERN_ERR "%s: pbat_read failed\n", __func__);
return PBLK_NONE;
}
pblk = pbat_alloc(pbat);
if (pblk != PBLK_NONE) {
lv->pbat = pbat;
@ -270,11 +225,6 @@ lbatview_free_pblk(struct lbatview* lv, u64 pblk)
printk(KERN_ERR "%s: pbatcache_get failed\n", __func__);
return -EINVAL;
}
ret = pbat_read(pbat);
if (ret != 0) {
printk(KERN_ERR "%s: pbat_read failed\n", __func__);
return ret;
}
ret = pbat_free(pbat, pblk);
BUG_ON(ret != 0);
if (lv->pbat && pbat_zone(lv->pbat) != zone && pblk_zone == zone) {
@ -317,18 +267,6 @@ lbatview_rmem(struct lbatview* lv, u32 off, u32 len, void* buf)
printk(KERN_ERR "%s: *** out of bounds\n", __func__);
return;
}
if (off < PAGE_SIZE) {
if (!lv->pages[0]) {
printk(KERN_ERR "%s *** no page0\n", __func__);
return;
}
}
if (off + len > PAGE_SIZE) {
if (!lv->pages[1]) {
printk(KERN_ERR "%s *** no page1\n", __func__);
return;
}
}
if (off < PAGE_SIZE && off + len > PAGE_SIZE) {
u32 len0 = PAGE_SIZE - off;
u8* pagebuf0 = lbatpage_get_buf(lv->pages[0], false);
@ -355,18 +293,6 @@ lbatview_wmem(struct lbatview* lv, u32 off, u32 len, void* buf)
printk(KERN_ERR "%s: *** out of bounds\n", __func__);
return;
}
if (off < PAGE_SIZE) {
if (!lv->pages[0]) {
printk(KERN_ERR "%s *** no page0\n", __func__);
return;
}
}
if (off + len > PAGE_SIZE) {
if (!lv->pages[1]) {
printk(KERN_ERR "%s *** no page1\n", __func__);
return;
}
}
if (off < PAGE_SIZE && off + len > PAGE_SIZE) {
u32 len0 = PAGE_SIZE - off;
u8* pagebuf0 = lbatpage_get_buf(lv->pages[0], true);
@ -383,7 +309,6 @@ lbatview_wmem(struct lbatview* lv, u32 off, u32 len, void* buf)
memcpy(pagebuf + bufoff, buf, len);
lbatpage_put_buf(lv->pages[bufidx]);
}
lv->state = CACHE_STATE_DIRTY;
}
int
@ -405,7 +330,6 @@ lbatview_elem_realloc(struct lbatview* lv, u64 lblk, u32 len)
req_nalloc = DIV_ROUND_UP(len, PBLK_SIZE);
}
mutex_lock(&lv->lock);
BUG_ON(lv->state == CACHE_STATE_UNCACHED);
elem_off = lbatview_elem_off(lv, lblk);
elem_lelen = __cpu_to_le32(len);
lbatview_wmem(lv, elem_off, lba_elem_len_bytes(lv->params), &elem_lelen);
@ -452,7 +376,6 @@ lbatview_elem_len(struct lbatview* lv, u64 lblk)
u32 elem_lelen;
mutex_lock(&lv->lock);
BUG_ON(lv->state == CACHE_STATE_UNCACHED);
off = lbatview_elem_off(lv, lblk);
elem_lelen = 0;
lbatview_rmem(lv, off, lba_elem_len_bytes(lv->params), &elem_lelen);
@ -470,7 +393,6 @@ lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx)
u32 pblk_zone;
mutex_lock(&lv->lock);
BUG_ON(lv->state == CACHE_STATE_UNCACHED);
off = lbatview_elem_off(lv, lblk) +
lba_elem_len_bytes(lv->params) +
idx * lba_elem_pblk_bytes(lv->params);
@ -642,7 +564,7 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
for (n = 0; n < lvc->len; ++n) {
lv = lvc->cache[n];
mutex_lock(&lv->reflock);
if (lv->ref == 0 && lv->state != CACHE_STATE_ERROR) {
if (lv->ref == 0) {
goto found;
}
mutex_unlock(&lv->reflock);
@ -657,9 +579,8 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
mutex_lock(&lv->reflock);
found:
if (!lbatview_reset(lv, pblk, count)) {
if (lbatview_reset(lv, pblk, count) != 0) {
mutex_unlock(&lv->reflock);
printk(KERN_ERR "%s: lbatview_reset failed\n", __func__);
lv = NULL;
goto out;
}

View File

@ -36,13 +36,13 @@ struct lbd {
unsigned int ref;
struct mutex lock;
enum cache_state state;
struct cbd_params* params;
struct lbatviewcache* lvc;
struct lbatview* lv;
void* percpu;
struct page* pages;
u8* buf;
u32 c_len;
};
/*
@ -142,7 +142,7 @@ lblk_compress_lz4(struct lbd* lbd)
}
static bool
lblk_decompress_lz4(struct lbd* lbd, u32 clen)
lblk_decompress_lz4(struct lbd* lbd)
{
int ret;
int cpu;
@ -157,7 +157,7 @@ lblk_decompress_lz4(struct lbd* lbd, u32 clen)
}
ret = LZ4_decompress_safe(lbd->buf,
state->buf,
clen,
lbd->c_len,
dlen);
if (ret != dlen) {
put_cpu();
@ -175,6 +175,7 @@ static size_t
lblk_compress_zlib(struct lbd* lbd)
{
int ret;
int clen;
int cpu;
struct lblk_compress_state* state;
z_stream* stream;
@ -197,14 +198,15 @@ lblk_compress_zlib(struct lbd* lbd)
put_cpu();
return 0;
}
memcpy(lbd->buf, state->buf, stream->total_out);
clen = stream->total_out;
memcpy(lbd->buf, state->buf, clen);
put_cpu();
return stream->total_out;
return (size_t)clen;
}
static bool
lblk_decompress_zlib(struct lbd* lbd, u32 clen)
lblk_decompress_zlib(struct lbd* lbd)
{
int ret;
int cpu;
@ -222,7 +224,7 @@ lblk_decompress_zlib(struct lbd* lbd, u32 clen)
ret = zlib_inflateReset(stream);
BUG_ON(ret != Z_OK);
stream->next_in = lbd->buf;
stream->avail_in = clen;
stream->avail_in = lbd->c_len;
stream->next_out = state->buf;
stream->avail_out = dlen;
ret = zlib_inflate(stream, Z_SYNC_FLUSH);
@ -267,20 +269,18 @@ lblk_compress(struct lbd* lbd)
/*
* Decompress dc->lz4_cbuf of size clen into dc->lblk
*
* Returns 0 for success, <0 for failure.
*/
static int
lblk_decompress(struct lbd* lbd, u32 clen)
static bool
lblk_decompress(struct lbd* lbd)
{
#ifdef COMPRESS_HAVE_LZ4
if (lbd->params->algorithm == CBD_ALG_LZ4) {
return lblk_decompress_lz4(lbd, clen);
return lblk_decompress_lz4(lbd);
}
#endif
#ifdef COMPRESS_HAVE_ZLIB
if (lbd->params->algorithm == CBD_ALG_ZLIB) {
return lblk_decompress_zlib(lbd, clen);
return lblk_decompress_zlib(lbd);
}
#endif
return false;
@ -297,7 +297,6 @@ lbd_ctr(struct lbd* lbd,
mutex_init(&lbd->reflock);
lbd->ref = 0;
mutex_init(&lbd->lock);
lbd->state = CACHE_STATE_UNCACHED;
lbd->params = params;
lbd->lvc = lvc;
lbd->lv = NULL;
@ -307,6 +306,7 @@ lbd_ctr(struct lbd* lbd,
return false;
}
lbd->buf = page_address(lbd->pages);
lbd->c_len = 0;
return true;
}
@ -317,6 +317,7 @@ lbd_dtr(struct lbd* lbd)
if (lbatviewcache_put(lbd->lvc, lbd->lv) != 0) {
printk(KERN_ERR "%s: lbatviewcache_put failed\n", __func__);
}
lbd->c_len = 0;
lbd->buf = NULL;
cbd_free_pages(lbd->pages, lblk_per_pblk(lbd->params));
lbd->pages = NULL;
@ -325,166 +326,188 @@ lbd_dtr(struct lbd* lbd)
lbd->lvc = NULL;
}
static void
lbd_flush_endio(struct bio* bio)
static bool
lbd_error(struct lbd* lbd)
{
struct lbd* lbd = bio->bi_private;
int ret;
u32 count = lblk_per_pblk(lbd->params);
u32 n;
u8* p;
ret = pblk_endio(bio);
if (ret) {
printk(KERN_ERR "%s: I/O failed\n", __func__);
lbd->state = CACHE_STATE_ERROR;
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
if (PageError(virt_to_page(p))) {
return true;
}
}
cbd_free_page(bio->bi_io_vec[0].bv_page);
return false;
}
static int
lbd_flush(struct lbd* lbd)
{
int ret = 0;
u32 c_len;
u32 elem_len;
int err;
u8* p;
u32 n;
u64 pblk;
struct page* iopagev[1];
u32 count = lblk_per_pblk(lbd->params);
struct page* iopagev[count];
mutex_lock(&lbd->lock);
if (lbd->state != CACHE_STATE_DIRTY) {
if (lbd->state == CACHE_STATE_ERROR) {
ret = -EIO;
goto out;
}
goto clean;
if (!PageDirty(lbd->pages)) {
goto unlock;
}
if (lbd_error(lbd)) {
ret = -EIO;
goto unlock;
}
if (lblk_is_zeros(lbd->params, lbd)) {
c_len = 0;
elem_len = 0;
lbd->c_len = CBD_UNCOMPRESSED;
ret = lbatview_elem_realloc(lbd->lv, lbd->lblk, 0);
goto unlock;
}
lbd->c_len = lblk_compress(lbd);
if (lbd->c_len > 0) {
u32 c_blkrem = lbd->c_len % PBLK_SIZE;
if (c_blkrem) {
memset(lbd->buf + lbd->c_len, 0, c_blkrem);
}
count = DIV_ROUND_UP(lbd->c_len, PBLK_SIZE);
}
else {
c_len = lblk_compress(lbd);
if (c_len > 0) {
size_t c_blkrem = c_len % PBLK_SIZE;
if (c_blkrem) {
memset(lbd->buf + c_len, 0, c_blkrem);
}
elem_len = c_len;
}
else {
c_len = PBLK_SIZE * lblk_per_pblk(lbd->params);
elem_len = CBD_UNCOMPRESSED;
}
lbd->c_len = CBD_UNCOMPRESSED;
count = lblk_per_pblk(lbd->params);
}
ret = lbatview_elem_realloc(lbd->lv, lbd->lblk, elem_len);
ret = lbatview_elem_realloc(lbd->lv, lbd->lblk, lbd->c_len);
if (ret) {
goto out;
goto unlock;
}
p = lbd->buf;
for (n = 0; n * PBLK_SIZE < c_len; ++n, p += PBLK_SIZE) {
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
pblk = lbatview_elem_pblk(lbd->lv, lbd->lblk, n);
if (pblk == PBLK_NONE) {
ret = -EIO;
goto out;
}
iopagev[0] = cbd_alloc_page();
if (!iopagev[0]) {
printk(KERN_ERR "%s: out of memory\n", __func__);
ret = -ENOMEM;
goto out;
}
memcpy(page_address(iopagev[0]), p, PBLK_SIZE);
pblk_write(lbd->params, pblk, 1, iopagev, lbd_flush_endio, lbd);
BUG_ON(pblk == PBLK_NONE);
iopagev[0] = virt_to_page(p);
pblk_write(lbd->params, pblk, 1, iopagev);
}
while (n < lblk_per_pblk(lbd->params)) {
unlock_page(virt_to_page(p));
++n;
p += PBLK_SIZE;
}
goto out;
clean:
ret = lbatviewcache_put(lbd->lvc, lbd->lv);
lbd->lv = NULL;
if (ret) {
lbd->state = CACHE_STATE_ERROR;
goto out;
unlock:
count = lblk_per_pblk(lbd->params);
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
unlock_page(virt_to_page(p));
}
lbd->state = CACHE_STATE_CLEAN;
out:
err = lbatviewcache_put(lbd->lvc, lbd->lv);
lbd->lv = NULL;
if (err) {
ret = err;
}
mutex_unlock(&lbd->lock);
return ret;
}
static bool
lbd_reset(struct lbd* lbd, u64 lblk)
{
if (lbd->lv) { printk(KERN_ERR "%s: lbatview leak\n", __func__); }
lbd->lv = lbatviewcache_get(lbd->lvc, lblk);
if (!lbd->lv) {
printk(KERN_ERR "%s: lbatviewcache_get failed\n", __func__);
return false;
}
lbd->lblk = lblk;
lbd->state = CACHE_STATE_UNCACHED;
return true;
}
int
static int
lbd_read(struct lbd* lbd)
{
int ret = 0;
u32 c_len;
u32 count;
u32 n;
u8* p;
u64 pblk;
struct page* iopagev[1];
struct page* iopagev[1];
mutex_lock(&lbd->lock);
if (lbd->state != CACHE_STATE_UNCACHED) {
goto out;
/* XXX: can't happen because lbdcache will not use a page with an error */
if (PageError(lbd->pages)) {
return -EIO;
}
ret = lbatview_read(lbd->lv);
if (ret) {
goto out;
}
c_len = lbatview_elem_len(lbd->lv, lbd->lblk);
if (c_len == 0) {
lbd->c_len = lbatview_elem_len(lbd->lv, lbd->lblk);
if (lbd->c_len == 0) {
memset(lbd->buf, 0, PBLK_SIZE * lblk_per_pblk(lbd->params));
}
else {
bool is_compressed = true;
u32 d_len = PBLK_SIZE * lblk_per_pblk(lbd->params);
u32 n;
u8* p;
if (c_len == CBD_UNCOMPRESSED) {
is_compressed = false;
c_len = d_len;
}
p = lbd->buf;
for (n = 0; n * PBLK_SIZE < c_len; ++n, p += PBLK_SIZE) {
count = (lbd->c_len == CBD_UNCOMPRESSED) ?
lblk_per_pblk(lbd->params) :
DIV_ROUND_UP(lbd->c_len, PBLK_SIZE);
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
pblk = lbatview_elem_pblk(lbd->lv, lbd->lblk, n);
if (pblk == PBLK_NONE) {
ret = -EIO;
goto out;
}
/* XXX: check pblk not in metadata? */
iopagev[0] = virt_to_page(p);
/* XXX: Issue non-blocking reads? */
ret = pblk_read_wait(lbd->params, pblk, 1, iopagev);
if (ret) {
goto out;
}
}
if (is_compressed) {
if (!lblk_decompress(lbd, c_len)) {
if (lbd->c_len != CBD_UNCOMPRESSED) {
if (!lblk_decompress(lbd)) {
printk(KERN_ERR " decompress failed\n");
ret = -EIO;
goto out;
}
}
}
lbd->state = CACHE_STATE_CLEAN;
lbd->c_len = CBD_UNCOMPRESSED;
out:
mutex_unlock(&lbd->lock);
return ret;
}
static int
lbd_reset(struct lbd* lbd, u64 lblk)
{
int ret = 0;
u32 count = lblk_per_pblk(lbd->params);
u32 n;
u8* p;
if (lbd->lv) { printk(KERN_ERR "%s: lbatview leak\n", __func__); }
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
lock_page(virt_to_page(p));
}
lbd->lv = lbatviewcache_get(lbd->lvc, lblk);
if (!lbd->lv) {
printk(KERN_ERR "%s: lbatviewcache_get failed\n", __func__);
ret = -EIO;
goto out;
}
if (lbd->lblk != lblk) {
lbd->lblk = lblk;
ret = lbd_read(lbd);
if (ret) {
printk(KERN_ERR "%s: lbd_read failed\n", __func__);
}
}
else {
if (lbd->c_len != CBD_UNCOMPRESSED) {
if (!lblk_decompress(lbd)) {
printk(KERN_ERR "%s: lblk_decompress failed\n", __func__);
ret = -EIO;
}
lbd->c_len = CBD_UNCOMPRESSED;
}
}
out:
if (ret) {
lbatviewcache_put(lbd->lvc, lbd->lv);
lbd->lv = NULL;
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
unlock_page(virt_to_page(p));
}
lbd->lblk = LBLK_NONE;
}
return ret;
}
@ -497,7 +520,6 @@ lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf)
return;
}
mutex_lock(&lbd->lock);
BUG_ON(lbd->state == CACHE_STATE_UNCACHED);
memcpy(buf, lbd->buf + off, len);
mutex_unlock(&lbd->lock);
}
@ -511,9 +533,8 @@ lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf)
return;
}
mutex_lock(&lbd->lock);
BUG_ON(lbd->state == CACHE_STATE_UNCACHED);
memcpy(lbd->buf + off, buf, len);
lbd->state = CACHE_STATE_DIRTY;
SetPageDirty(lbd->pages);
mutex_unlock(&lbd->lock);
}
@ -728,7 +749,7 @@ lbdcache_get(struct lbdcache* lc, u64 lblk)
for (n = 0; n < lc->len; ++n) {
lbd = lc->cache[n];
mutex_lock(&lbd->reflock);
if (lbd->ref == 0 && lbd->state != CACHE_STATE_ERROR) {
if (lbd->ref == 0 && !lbd_error(lbd)) {
goto found;
}
mutex_unlock(&lbd->reflock);
@ -745,9 +766,8 @@ lbdcache_get(struct lbdcache* lc, u64 lblk)
mutex_lock(&lbd->reflock);
found:
if (!lbd_reset(lbd, lblk)) {
if (lbd_reset(lbd, lblk) != 0) {
mutex_unlock(&lbd->reflock);
printk(KERN_ERR "%s: lbd_reset failed\n", __func__);
lbd = NULL;
goto out;
}

View File

@ -35,7 +35,6 @@ struct pbat {
unsigned int ref;
struct mutex lock;
enum cache_state state;
struct cbd_params* params;
struct page* pages;
u8* buf;
@ -50,7 +49,6 @@ pbat_ctr(struct pbat* pbat,
mutex_init(&pbat->reflock);
pbat->ref = 0;
mutex_init(&pbat->lock);
pbat->state = CACHE_STATE_UNCACHED;
pbat->params = params;
pbat->pages = cbd_alloc_pages(pbat_len(params));
if (!pbat->pages) {
@ -65,23 +63,16 @@ pbat_ctr(struct pbat* pbat,
static void
pbat_dtr(struct pbat* pbat)
{
lock_page(pbat->pages);
pbat->buf = NULL;
cbd_free_pages(pbat->pages, pbat_len(pbat->params));
pbat->pages = NULL;
}
static void
pbat_flush_endio(struct bio* bio)
static bool
pbat_error(struct pbat* pbat)
{
struct pbat* pbat = bio->bi_private;
int ret;
ret = pblk_endio(bio);
if (ret) {
printk(KERN_ERR "%s: I/O failed\n", __func__);
pbat->state = CACHE_STATE_ERROR;
}
unlock_page(pbat->pages);
return PageError(pbat->pages);
}
static int
@ -95,37 +86,31 @@ pbat_flush(struct pbat* pbat)
u8* p;
mutex_lock(&pbat->lock);
if (pbat->state != CACHE_STATE_DIRTY) {
if (pbat->state == CACHE_STATE_ERROR) {
ret = -EIO;
}
unlock_page(pbat->pages);
goto out;
if (!PageDirty(pbat->pages)) {
goto unlock;
}
if (pbat_error(pbat)) {
ret = -EIO;
goto unlock;
}
pblk = pbat_off(pbat->params, pbat->zone);
p = pbat->buf;
for (n = 0; n < count; ++n, p += PBLK_SIZE) {
iopagev[n] = virt_to_page(p);
}
pblk_write(pbat->params, pblk, count, iopagev, pbat_flush_endio, pbat);
pbat->state = CACHE_STATE_CLEAN;
out:
pblk_write(pbat->params, pblk, count, iopagev);
mutex_unlock(&pbat->lock);
return ret;
unlock:
unlock_page(pbat->pages);
mutex_unlock(&pbat->lock);
return ret;
}
static void
pbat_reset(struct pbat* pbat, u32 zone)
{
lock_page(pbat->pages);
if (pbat->zone != zone) {
pbat->zone = zone;
pbat->state = CACHE_STATE_UNCACHED;
}
}
int
static int
pbat_read(struct pbat* pbat)
{
int ret = 0;
@ -135,23 +120,35 @@ pbat_read(struct pbat* pbat)
u32 n;
u8* p;
mutex_lock(&pbat->lock);
if (pbat->state != CACHE_STATE_UNCACHED) {
goto out;
/* XXX: can't happen because pbatcache will not use a page with an error */
if (PageError(pbat->pages)) {
return -EIO;
}
pblk = pbat_off(pbat->params, pbat->zone);
p = pbat->buf;
for (n = 0; n < count; ++n, p += PBLK_SIZE) {
for (n = 0, p = pbat->buf; n < count; ++n, p += PBLK_SIZE) {
iopagev[n] = virt_to_page(p);
}
ret = pblk_read_wait(pbat->params, pblk, count, iopagev);
if (ret) {
goto out;
}
pbat->state = CACHE_STATE_CLEAN;
out:
mutex_unlock(&pbat->lock);
return ret;
}
static int
pbat_reset(struct pbat* pbat, u32 zone)
{
int ret = 0;
lock_page(pbat->pages);
if (pbat->zone != zone) {
pbat->zone = zone;
ret = pbat_read(pbat);
}
if (ret) {
unlock_page(pbat->pages);
pbat->zone = ZONE_NONE;
}
return ret;
}
@ -169,14 +166,13 @@ pbat_alloc(struct pbat* pbat)
u64 pblk;
mutex_lock(&pbat->lock);
BUG_ON(pbat->state == CACHE_STATE_UNCACHED);
idx = cbd_bitmap_alloc(pbat->buf, pblk_count);
if (idx == pblk_count) {
pblk = PBLK_NONE;
goto out;
}
pblk = idx + zone_data_off(pbat->params, pbat->zone);
pbat->state = CACHE_STATE_DIRTY;
SetPageDirty(pbat->pages);
out:
mutex_unlock(&pbat->lock);
@ -199,9 +195,8 @@ pbat_free(struct pbat* pbat, u64 pblk)
idx = pblk - zone_data_off(pbat->params, zone);
BUG_ON(idx >= zone_pblk_count);
mutex_lock(&pbat->lock);
BUG_ON(pbat->state == CACHE_STATE_UNCACHED);
cbd_bitmap_free(pbat->buf, idx);
pbat->state = CACHE_STATE_DIRTY;
SetPageDirty(pbat->pages);
mutex_unlock(&pbat->lock);
return 0;
@ -319,7 +314,7 @@ pbatcache_get(struct pbatcache* pc, u32 zone)
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
if (pbat->ref == 0 && pbat->state != CACHE_STATE_ERROR) {
if (pbat->ref == 0 && !pbat_error(pbat)) {
goto found;
}
mutex_unlock(&pbat->reflock);
@ -334,7 +329,11 @@ pbatcache_get(struct pbatcache* pc, u32 zone)
mutex_lock(&pbat->reflock);
found:
pbat_reset(pbat, zone);
if (pbat_reset(pbat, zone) != 0) {
mutex_unlock(&pbat->reflock);
pbat = NULL;
goto out;
}
pbat->ref = 1;
mutex_unlock(&pbat->reflock);

View File

@ -180,10 +180,30 @@ pblk_read(struct cbd_params* params,
return ret;
}
void
pblk_endio(struct bio* bio)
{
u32 n;
struct page* page;
BUG_ON(!bio);
for (n = 0; n < bio->bi_max_vecs; ++n) {
page = bio->bi_io_vec[n].bv_page;
unlock_page(page);
ClearPageDirty(page);
}
if (bio->bi_status != BLK_STS_OK) {
for (n = 0; n < bio->bi_max_vecs; ++n) {
page = bio->bi_io_vec[n].bv_page;
SetPageError(page);
}
}
bio_put(bio);
}
void
pblk_write(struct cbd_params* params,
u64 pblk, u32 count, struct page** pagev,
pblk_endio_t endio, void* endio_priv)
u64 pblk, u32 count, struct page** pagev)
{
struct bio* bio;
@ -192,30 +212,19 @@ pblk_write(struct cbd_params* params,
printk(KERN_ERR "%s: out of memory\n", __func__);
return;
}
bio->bi_end_io = endio;
bio->bi_private = endio_priv;
bio->bi_end_io = pblk_endio;
if (pblk < CBD_HEADER_BLOCKS) {
printk(KERN_ERR "%s: *** Attempt to write header\n", __func__);
dump_stack();
bio->bi_status = BLK_STS_IOERR;
endio(bio);
pblk_endio(bio);
return;
}
submit_bio(bio);
}
int
pblk_endio(struct bio* bio)
{
int ret;
ret = blk_status_to_errno(bio->bi_status);
bio_put(bio);
return ret;
}
static inline u32
cshift (u32 x, uint n)
{

View File

@ -451,14 +451,6 @@ lba_put(const struct cbd_params* params,
#define COMPRESS_HAVE_ZLIB 1
#endif
enum cache_state {
CACHE_STATE_UNCACHED,
CACHE_STATE_CLEAN,
CACHE_STATE_DIRTY,
CACHE_STATE_ERROR,
CACHE_STATE_MAX
};
typedef void (*pblk_endio_t)(struct bio*);
/* Single page allocator */
@ -484,9 +476,7 @@ int pblk_read(struct cbd_params* params,
u64 pblk, u32 count, struct page** pagev,
pblk_endio_t endio, void* endio_priv);
void pblk_write(struct cbd_params* params,
u64 pblk, u32 count, struct page** pagev,
pblk_endio_t endio, void* endio_priv);
int pblk_endio(struct bio* bio);
u64 pblk, u32 count, struct page** pagev);
/* Debug stuff */
typedef unsigned char byte;
@ -503,7 +493,6 @@ void md5_final(struct md5* ctx, byte* buf);
struct pbat;
int pbat_read(struct pbat* pbat);
u32 pbat_zone(struct pbat* pbat);
u64 pbat_alloc(struct pbat* pbat);
int pbat_free(struct pbat* pbat, u64 pblk);
@ -519,7 +508,6 @@ int pbatcache_put(struct pbatcache* pbatcache, struct pbat* pbat);
struct lbatpage;
int lbatpage_read(struct lbatpage* lp);
u8* lbatpage_get_buf(struct lbatpage* lp, bool rw);
void lbatpage_put_buf(struct lbatpage* lp);
@ -533,7 +521,6 @@ struct lbatpage*
int lbatpagecache_put(struct lbatpagecache* lpc, struct lbatpage* lpi);
struct lbatview;
int lbatview_read(struct lbatview* lv);
int lbatview_elem_realloc(struct lbatview* lv, u64 lblk, u32 len);
u32 lbatview_elem_len(struct lbatview* lv, u64 lblk);
u64 lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx);
@ -548,7 +535,6 @@ struct lbatview*
int lbatviewcache_put(struct lbatviewcache* lvc, struct lbatview* lbv);
struct lbd;
int lbd_read(struct lbd* lbd);
void lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf);
void lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf);