Use page vector and vmap for lbd

This commit is contained in:
Tom Marshall 2019-11-02 07:49:09 -07:00
parent 8e1630b08c
commit 11cc8a229e
1 changed files with 35 additions and 28 deletions

View File

@ -40,7 +40,7 @@ struct lbd {
struct lbatviewcache* lvc;
struct lbatview* lv;
void* percpu;
struct page* pages;
struct page** pagev;
u8* buf;
u32 c_len;
};
@ -292,6 +292,8 @@ lbd_ctr(struct lbd* lbd,
struct lbatviewcache* lvc,
void* percpu)
{
u32 nr_pages = lblk_per_pblk(params);
memset(lbd, 0, sizeof(struct lbd));
lbd->lblk = LBLK_NONE;
mutex_init(&lbd->reflock);
@ -301,11 +303,17 @@ lbd_ctr(struct lbd* lbd,
lbd->lvc = lvc;
lbd->lv = NULL;
lbd->percpu = percpu;
lbd->pages = cbd_alloc_pages(lblk_per_pblk(lbd->params));
if (!lbd->pages) {
lbd->pagev = kzalloc(nr_pages * sizeof(struct page*), GFP_KERNEL);
if (!lbd->pagev) {
return false;
}
if (!cbd_alloc_pagev(lbd->pagev, nr_pages)) {
return false;
}
lbd->buf = vmap(lbd->pagev, nr_pages, VM_MAP, PAGE_KERNEL);
if (!lbd->buf) {
return false;
}
lbd->buf = page_address(lbd->pages);
lbd->c_len = 0;
return true;
@ -314,13 +322,17 @@ lbd_ctr(struct lbd* lbd,
static void
lbd_dtr(struct lbd* lbd)
{
u32 nr_pages = lblk_per_pblk(lbd->params);
if (lbatviewcache_put(lbd->lvc, lbd->lv) != 0) {
printk(KERN_ERR "%s: lbatviewcache_put failed\n", __func__);
}
lbd->c_len = 0;
vunmap(lbd->buf);
lbd->buf = NULL;
cbd_free_pages(lbd->pages, lblk_per_pblk(lbd->params));
lbd->pages = NULL;
cbd_free_pagev(lbd->pagev, nr_pages);
kfree(lbd->pagev);
lbd->pagev = NULL;
lbd->percpu = NULL;
lbd->lv = NULL;
lbd->lvc = NULL;
@ -331,10 +343,9 @@ lbd_error(struct lbd* lbd)
{
u32 count = lblk_per_pblk(lbd->params);
u32 n;
u8* p;
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
if (PageError(virt_to_page(p))) {
for (n = 0; n < count; ++n) {
if (PageError(lbd->pagev[n])) {
return true;
}
}
@ -347,14 +358,13 @@ lbd_flush(struct lbd* lbd)
{
int ret = 0;
int err;
u8* p;
u32 n;
u64 pblk;
u32 count = lblk_per_pblk(lbd->params);
struct page* iopagev[count];
mutex_lock(&lbd->lock);
if (!PageDirty(lbd->pages)) {
if (!PageDirty(lbd->pagev[0])) {
goto unlock;
}
if (lbd_error(lbd)) {
@ -383,23 +393,22 @@ lbd_flush(struct lbd* lbd)
if (ret) {
goto unlock;
}
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
for (n = 0; n < count; ++n) {
pblk = lbatview_elem_pblk(lbd->lv, lbd->lblk, n);
BUG_ON(pblk == PBLK_NONE);
iopagev[0] = virt_to_page(p);
iopagev[0] = lbd->pagev[n];
pblk_write(lbd->params, pblk, 1, iopagev);
}
while (n < lblk_per_pblk(lbd->params)) {
unlock_page(virt_to_page(p));
unlock_page(lbd->pagev[n]);
++n;
p += PBLK_SIZE;
}
goto out;
unlock:
count = lblk_per_pblk(lbd->params);
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
unlock_page(virt_to_page(p));
for (n = 0; n < count; ++n) {
unlock_page(lbd->pagev[n]);
}
out:
@ -419,12 +428,11 @@ lbd_read(struct lbd* lbd)
int ret = 0;
u32 count;
u32 n;
u8* p;
u64 pblk;
struct page* iopagev[1];
/* XXX: can't happen because lbdcache will not use a page with an error */
if (PageError(lbd->pages)) {
if (PageError(lbd->pagev[0])) {
return -EIO;
}
lbd->c_len = lbatview_elem_len(lbd->lv, lbd->lblk);
@ -435,13 +443,13 @@ lbd_read(struct lbd* lbd)
count = (lbd->c_len == CBD_UNCOMPRESSED) ?
lblk_per_pblk(lbd->params) :
DIV_ROUND_UP(lbd->c_len, PBLK_SIZE);
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
for (n = 0; n < count; ++n) {
pblk = lbatview_elem_pblk(lbd->lv, lbd->lblk, n);
if (pblk == PBLK_NONE) {
ret = -EIO;
goto out;
}
iopagev[0] = virt_to_page(p);
iopagev[0] = lbd->pagev[n];
/* XXX: Issue non-blocking reads? */
ret = pblk_read_wait(lbd->params, pblk, 1, iopagev);
if (ret) {
@ -468,12 +476,11 @@ lbd_reset(struct lbd* lbd, u64 lblk)
int ret = 0;
u32 count = lblk_per_pblk(lbd->params);
u32 n;
u8* p;
if (lbd->lv) { printk(KERN_ERR "%s: lbatview leak\n", __func__); }
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
lock_page(virt_to_page(p));
for (n = 0; n < count; ++n) {
lock_page(lbd->pagev[n]);
}
lbd->lv = lbatviewcache_get(lbd->lvc, lblk);
if (!lbd->lv) {
@ -502,8 +509,8 @@ out:
if (ret) {
lbatviewcache_put(lbd->lvc, lbd->lv);
lbd->lv = NULL;
for (n = 0, p = lbd->buf; n < count; ++n, p += PBLK_SIZE) {
unlock_page(virt_to_page(p));
for (n = 0; n < count; ++n) {
unlock_page(lbd->pagev[n]);
}
lbd->lblk = LBLK_NONE;
}
@ -534,7 +541,7 @@ lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf)
}
mutex_lock(&lbd->lock);
memcpy(lbd->buf + off, buf, len);
SetPageDirty(lbd->pages);
SetPageDirty(lbd->pagev[0]);
mutex_unlock(&lbd->lock);
}
@ -574,7 +581,7 @@ lbdcache_realloc(struct lbdcache* lc, unsigned int len)
lc->len = len;
lc->cache = cache;
while (n < len) {
lbd = kmalloc(sizeof(struct lbd), GFP_KERNEL);
lbd = kzalloc(sizeof(struct lbd), GFP_KERNEL);
if (!lbd) {
return false;
}