Rename lbatpage to lbatpblk

This is more accurate, and will matter if pblk size is variable.
This commit is contained in:
Tom Marshall 2019-11-11 21:07:11 +01:00
parent ceb0eb3230
commit c4aabad212
5 changed files with 76 additions and 76 deletions

View File

@ -87,7 +87,7 @@ BIN_SRCS := \
KMOD_SRCS := \
util.c \
pbat.c \
lbatpage.c \
lbatpblk.c \
lbatview.c \
lbd.c \
compress.c

View File

@ -3,7 +3,7 @@
dm-compress-y += \
util.o \
pbat.o \
lbatpage.o \
lbatpblk.o \
lbatview.o \
lbd.o \
compress.o

View File

@ -29,7 +29,7 @@
#include <linux/dm-compress.h>
struct lbatpage {
struct lbatpblk {
struct list_head list;
u64 pblk;
struct mutex reflock;
@ -43,9 +43,9 @@ struct lbatpage {
};
static bool
lbatpage_ctr(struct lbatpage* lp, struct compress_params* kparams)
lbatpblk_ctr(struct lbatpblk* lp, struct compress_params* kparams)
{
memset(lp, 0, sizeof(struct lbatpage));
memset(lp, 0, sizeof(struct lbatpblk));
INIT_LIST_HEAD(&lp->list);
lp->pblk = PBLK_NONE;
mutex_init(&lp->reflock);
@ -63,7 +63,7 @@ lbatpage_ctr(struct lbatpage* lp, struct compress_params* kparams)
}
static void
lbatpage_dtr(struct lbatpage* lp)
lbatpblk_dtr(struct lbatpblk* lp)
{
lp->buf = NULL;
cbd_free_page(lp->page);
@ -71,13 +71,13 @@ lbatpage_dtr(struct lbatpage* lp)
}
static bool
lbatpage_error(struct lbatpage* lp)
lbatpblk_error(struct lbatpblk* lp)
{
return PageError(lp->page);
}
static int
lbatpage_flush(struct lbatpage* lp)
lbatpblk_flush(struct lbatpblk* lp)
{
int ret = 0;
struct page* iopagev[1];
@ -86,7 +86,7 @@ lbatpage_flush(struct lbatpage* lp)
if (!PageDirty(lp->page)) {
goto unlock;
}
if (lbatpage_error(lp)) {
if (lbatpblk_error(lp)) {
ret = -EIO;
goto unlock;
}
@ -104,7 +104,7 @@ unlock:
}
static int
lbatpage_read(struct lbatpage* lp)
lbatpblk_read(struct lbatpblk* lp)
{
int ret = 0;
struct page* pagev[1];
@ -116,14 +116,14 @@ lbatpage_read(struct lbatpage* lp)
}
static int
lbatpage_reset(struct lbatpage* lp, u64 pblk)
lbatpblk_reset(struct lbatpblk* lp, u64 pblk)
{
int ret = 0;
lock_page(lp->page);
if (lp->pblk != pblk) {
lp->pblk = pblk;
ret = lbatpage_read(lp);
ret = lbatpblk_read(lp);
}
if (ret) {
@ -135,7 +135,7 @@ lbatpage_reset(struct lbatpage* lp, u64 pblk)
}
u8*
lbatpage_get_buf(struct lbatpage* lp, bool rw)
lbatpblk_get_buf(struct lbatpblk* lp, bool rw)
{
mutex_lock(&lp->lock);
if (rw) {
@ -146,42 +146,42 @@ lbatpage_get_buf(struct lbatpage* lp, bool rw)
}
void
lbatpage_put_buf(struct lbatpage* lp)
lbatpblk_put_buf(struct lbatpblk* lp)
{
mutex_unlock(&lp->lock);
}
struct lbatpagecache {
struct lbatpblkcache {
struct mutex cache_lock;
struct list_head cache_head;
unsigned int cache_len;
struct lbatpage* cache;
struct lbatpblk* cache;
};
size_t
lbatpagecache_size(void)
lbatpblkcache_size(void)
{
return sizeof(struct lbatpagecache);
return sizeof(struct lbatpblkcache);
}
bool
lbatpagecache_ctr(struct lbatpagecache* lpc,
lbatpblkcache_ctr(struct lbatpblkcache* lpc,
struct compress_params* kparams, u32 cache_pages)
{
struct lbatpage* cache;
struct lbatpblk* cache;
u32 cache_len;
u32 n;
memset(lpc, 0, sizeof(struct lbatpagecache));
memset(lpc, 0, sizeof(struct lbatpblkcache));
/* lbatpagecache gets 15/32 of cache pages */
/* lbatpblkcache gets 15/32 of cache pages */
cache_len = (cache_pages * 15 / 32);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;
}
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
cache = kzalloc(cache_len * sizeof(struct lbatpage), GFP_KERNEL);
cache = kzalloc(cache_len * sizeof(struct lbatpblk), GFP_KERNEL);
if (!cache) {
return false;
}
@ -190,7 +190,7 @@ lbatpagecache_ctr(struct lbatpagecache* lpc,
lpc->cache_len = cache_len;
lpc->cache = cache;
for (n = 0; n < cache_len; ++n) {
if (!lbatpage_ctr(&cache[n], kparams)) {
if (!lbatpblk_ctr(&cache[n], kparams)) {
return false;
}
list_add_tail(&cache[n].list, &lpc->cache_head);
@ -200,19 +200,19 @@ lbatpagecache_ctr(struct lbatpagecache* lpc,
}
void
lbatpagecache_dtr(struct lbatpagecache* lpc)
lbatpblkcache_dtr(struct lbatpblkcache* lpc)
{
unsigned int n;
struct lbatpage* lp;
struct lbatpblk* lp;
for (n = 0; n < lpc->cache_len; ++n) {
lp = &lpc->cache[n];
if (!lp) {
continue;
}
lbatpage_dtr(lp);
lbatpblk_dtr(lp);
if (lp->ref) {
printk(KERN_ERR "%s: lbatpage ref leak: n=%u ref=%u\n", __func__, n, lp->ref);
printk(KERN_ERR "%s: lbatpblk ref leak: n=%u ref=%u\n", __func__, n, lp->ref);
}
}
kfree(lpc->cache);
@ -221,10 +221,10 @@ lbatpagecache_dtr(struct lbatpagecache* lpc)
INIT_LIST_HEAD(&lpc->cache_head);
}
struct lbatpage*
lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
struct lbatpblk*
lbatpblkcache_get(struct lbatpblkcache* lpc, u64 pblk)
{
struct lbatpage* lp;
struct lbatpblk* lp;
mutex_lock(&lpc->cache_lock);
list_for_each_entry(lp, &lpc->cache_head, list) {
@ -248,7 +248,7 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
}
list_for_each_entry_reverse(lp, &lpc->cache_head, list) {
mutex_lock(&lp->reflock);
if (lp->ref == 0 && !lbatpage_error(lp)) {
if (lp->ref == 0 && !lbatpblk_error(lp)) {
list_move(&lp->list, &lpc->cache_head);
mutex_unlock(&lpc->cache_lock);
goto found;
@ -260,7 +260,7 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
return NULL;
found:
if (lbatpage_reset(lp, pblk) != 0) {
if (lbatpblk_reset(lp, pblk) != 0) {
mutex_unlock(&lp->reflock);
return NULL;
}
@ -271,7 +271,7 @@ found:
}
int
lbatpagecache_put(struct lbatpagecache* lpc, struct lbatpage* lp)
lbatpblkcache_put(struct lbatpblkcache* lpc, struct lbatpblk* lp)
{
int ret = 0;
@ -280,9 +280,9 @@ lbatpagecache_put(struct lbatpagecache* lpc, struct lbatpage* lp)
}
mutex_lock(&lp->reflock);
if (--lp->ref == 0) {
ret = lbatpage_flush(lp);
ret = lbatpblk_flush(lp);
if (ret) {
printk(KERN_ERR "%s: lbatpage_flush failed\n", __func__);
printk(KERN_ERR "%s: lbatpblk_flush failed\n", __func__);
}
}
mutex_unlock(&lp->reflock);

View File

@ -39,9 +39,9 @@ struct lbatview {
struct compress_params* kparams;
struct compress_stats* kstats;
struct pbatcache* pc;
struct lbatpagecache* lpc;
struct lbatpblkcache* lpc;
struct pbat* pbat;
struct lbatpage* pages[2];
struct lbatpblk* pages[2];
};
static bool
@ -49,7 +49,7 @@ lbatview_ctr(struct lbatview* lv,
struct compress_params* kparams,
struct compress_stats* kstats,
struct pbatcache* pc,
struct lbatpagecache* lpc)
struct lbatpblkcache* lpc)
{
memset(lv, 0, sizeof(struct lbatview));
INIT_LIST_HEAD(&lv->list);
@ -70,9 +70,9 @@ lbatview_ctr(struct lbatview* lv,
static void
lbatview_dtr(struct lbatview* lv)
{
lbatpagecache_put(lv->lpc, lv->pages[1]);
lbatpblkcache_put(lv->lpc, lv->pages[1]);
lv->pages[1] = NULL;
lbatpagecache_put(lv->lpc, lv->pages[0]);
lbatpblkcache_put(lv->lpc, lv->pages[0]);
lv->pages[0] = NULL;
pbatcache_put(lv->pc, lv->pbat);
lv->pbat = NULL;
@ -88,14 +88,14 @@ lbatview_flush(struct lbatview* lv)
mutex_lock(&lv->lock);
if (lv->pages[1]) {
err = lbatpagecache_put(lv->lpc, lv->pages[1]);
err = lbatpblkcache_put(lv->lpc, lv->pages[1]);
if (err) {
ret = err;
}
lv->pages[1] = NULL;
}
if (lv->pages[0]) {
err = lbatpagecache_put(lv->lpc, lv->pages[0]);
err = lbatpblkcache_put(lv->lpc, lv->pages[0]);
if (err) {
ret = err;
}
@ -117,26 +117,26 @@ lbatview_reset(struct lbatview* lv, u64 pblk, u32 count)
int ret = 0;
if (lv->pbat) { printk(KERN_ERR "%s: pbat leak\n", __func__); }
if (lv->pages[0]) { printk(KERN_ERR "%s: lbatpage leak\n", __func__); }
if (lv->pages[1]) { printk(KERN_ERR "%s: lbatpage leak\n", __func__); }
if (lv->pages[0]) { printk(KERN_ERR "%s: lbatpblk leak\n", __func__); }
if (lv->pages[1]) { printk(KERN_ERR "%s: lbatpblk leak\n", __func__); }
lv->pblk = pblk;
if (!ret && count > 0) {
lv->pages[0] = lbatpagecache_get(lv->lpc, pblk + 0);
lv->pages[0] = lbatpblkcache_get(lv->lpc, pblk + 0);
if (!lv->pages[0]) {
ret = -EIO;
}
}
if (!ret && count > 1) {
lv->pages[1] = lbatpagecache_get(lv->lpc, pblk + 1);
lv->pages[1] = lbatpblkcache_get(lv->lpc, pblk + 1);
if (!lv->pages[1]) {
ret = -EIO;
}
}
if (ret) {
lbatpagecache_put(lv->lpc, lv->pages[1]);
lbatpblkcache_put(lv->lpc, lv->pages[1]);
lv->pages[1] = NULL;
lbatpagecache_put(lv->lpc, lv->pages[0]);
lbatpblkcache_put(lv->lpc, lv->pages[0]);
lv->pages[0] = NULL;
lv->pblk = PBLK_NONE;
}
@ -279,19 +279,19 @@ lbatview_rmem(struct lbatview* lv, u32 off, u32 len, void* buf)
}
if (off < PAGE_SIZE && off + len > PAGE_SIZE) {
u32 len0 = PAGE_SIZE - off;
u8* pagebuf0 = lbatpage_get_buf(lv->pages[0], false);
u8* pagebuf1 = lbatpage_get_buf(lv->pages[1], false);
u8* pagebuf0 = lbatpblk_get_buf(lv->pages[0], false);
u8* pagebuf1 = lbatpblk_get_buf(lv->pages[1], false);
memcpy(buf, pagebuf0 + off, len0);
memcpy(buf + len0, pagebuf1, len - len0);
lbatpage_put_buf(lv->pages[1]);
lbatpage_put_buf(lv->pages[0]);
lbatpblk_put_buf(lv->pages[1]);
lbatpblk_put_buf(lv->pages[0]);
}
else {
u32 bufidx = off / PAGE_SIZE;
u32 bufoff = off % PAGE_SIZE;
u8* pagebuf = lbatpage_get_buf(lv->pages[bufidx], false);
u8* pagebuf = lbatpblk_get_buf(lv->pages[bufidx], false);
memcpy(buf, pagebuf + bufoff, len);
lbatpage_put_buf(lv->pages[bufidx]);
lbatpblk_put_buf(lv->pages[bufidx]);
}
}
@ -305,19 +305,19 @@ lbatview_wmem(struct lbatview* lv, u32 off, u32 len, void* buf)
}
if (off < PAGE_SIZE && off + len > PAGE_SIZE) {
u32 len0 = PAGE_SIZE - off;
u8* pagebuf0 = lbatpage_get_buf(lv->pages[0], true);
u8* pagebuf1 = lbatpage_get_buf(lv->pages[1], true);
u8* pagebuf0 = lbatpblk_get_buf(lv->pages[0], true);
u8* pagebuf1 = lbatpblk_get_buf(lv->pages[1], true);
memcpy(pagebuf0 + off, buf, len0);
memcpy(pagebuf1, buf + len0, len - len0);
lbatpage_put_buf(lv->pages[1]);
lbatpage_put_buf(lv->pages[0]);
lbatpblk_put_buf(lv->pages[1]);
lbatpblk_put_buf(lv->pages[0]);
}
else {
u32 bufidx = off / PAGE_SIZE;
u32 bufoff = off % PAGE_SIZE;
u8* pagebuf = lbatpage_get_buf(lv->pages[bufidx], true);
u8* pagebuf = lbatpblk_get_buf(lv->pages[bufidx], true);
memcpy(pagebuf + bufoff, buf, len);
lbatpage_put_buf(lv->pages[bufidx]);
lbatpblk_put_buf(lv->pages[bufidx]);
}
}
@ -449,7 +449,7 @@ lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx)
struct lbatviewcache {
struct cbd_params* params;
struct pbatcache* pc;
struct lbatpagecache* lpc;
struct lbatpblkcache* lpc;
struct mutex cache_lock;
struct list_head cache_head;
unsigned int cache_len;
@ -480,14 +480,14 @@ lbatviewcache_ctr(struct lbatviewcache* lvc,
if (!pbatcache_ctr(lvc->pc, kparams, cache_pages)) {
return false;
}
lvc->lpc = kmalloc(lbatpagecache_size(), GFP_KERNEL);
lvc->lpc = kmalloc(lbatpblkcache_size(), GFP_KERNEL);
if (!lvc->lpc) {
return false;
}
if (!lbatpagecache_ctr(lvc->lpc, kparams, cache_pages)) {
if (!lbatpblkcache_ctr(lvc->lpc, kparams, cache_pages)) {
return false;
}
/* lbatviewcache gets one entry per lbatpage (XXX: 5/6?) */
/* lbatviewcache gets one entry per lbatpblk (XXX: 5/6?) */
cache_len = (cache_pages * 15 / 32);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
@ -532,7 +532,7 @@ lbatviewcache_dtr(struct lbatviewcache* lvc)
lvc->cache = NULL;
lvc->cache_len = 0;
INIT_LIST_HEAD(&lvc->cache_head);
lbatpagecache_dtr(lvc->lpc);
lbatpblkcache_dtr(lvc->lpc);
kfree(lvc->lpc);
lvc->lpc = NULL;
pbatcache_dtr(lvc->pc);

View File

@ -558,18 +558,18 @@ struct pbat*
int pbatcache_put(struct pbatcache* pbatcache, struct pbat* pbat);
struct lbatpage;
u8* lbatpage_get_buf(struct lbatpage* lp, bool rw);
void lbatpage_put_buf(struct lbatpage* lp);
struct lbatpblk;
u8* lbatpblk_get_buf(struct lbatpblk* lp, bool rw);
void lbatpblk_put_buf(struct lbatpblk* lp);
struct lbatpagecache;
size_t lbatpagecache_size(void);
bool lbatpagecache_ctr(struct lbatpagecache* lpc,
struct lbatpblkcache;
size_t lbatpblkcache_size(void);
bool lbatpblkcache_ctr(struct lbatpblkcache* lpc,
struct compress_params* kparams, u32 cache_pages);
void lbatpagecache_dtr(struct lbatpagecache* lpc);
struct lbatpage*
lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk);
int lbatpagecache_put(struct lbatpagecache* lpc, struct lbatpage* lpi);
void lbatpblkcache_dtr(struct lbatpblkcache* lpc);
struct lbatpblk*
lbatpblkcache_get(struct lbatpblkcache* lpc, u64 pblk);
int lbatpblkcache_put(struct lbatpblkcache* lpc, struct lbatpblk* lpi);
struct lbatview;
int lbatview_elem_realloc(struct lbatview* lv, u64 lblk, u32 len);