Implement better caching

* Total cache size is the minimum of:
   - 1/1k of system RAM.
   - 1/64k of backing device.
   - 32 * 2 * CPUs.
   The latter ensures that we have at least 2*CPUs lbd objects
   which is required for the top level compress operations.

 * Implement LRU for all caches.

 * Track pblk full and last allocated block for better performance.

Still could use better parsing for cache_pages parameter and more
granularity, but this is workable for now.
This commit is contained in:
Tom Marshall 2019-11-06 13:18:16 -08:00
parent 3e81efb9f6
commit a2e4f303fd
6 changed files with 242 additions and 258 deletions

View File

@ -380,8 +380,9 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int ret;
unsigned int argn;
u32 cache_pages = 0;
struct compress *c = NULL;
u64 dev_nr_pblks;
u64 backing_nr_pblks;
printk(KERN_INFO "%s: enter: argc=%u\n", __func__, argc);
for (argn = 0; argn < argc; ++argn) {
@ -397,6 +398,7 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
const char* arg = argv[argn++];
const char* val = NULL;
const char* eq = strchr(arg, '=');
int err;
if (eq) {
val = eq + 1;
}
@ -410,6 +412,15 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
continue;
}
#endif
/* XXX: Parse suffixes */
if (!memcmp(arg, "cache_pages", 7)) {
err = kstrtouint(eq + 1, 0, &cache_pages);
if (err) {
ti->error = "Failed to parse cache_pages";
return -EINVAL;
}
continue;
}
ti->error = "Unrecognized argument";
return -EINVAL;
}
@ -428,14 +439,25 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = c;
dev_nr_pblks = dm_target_pblk_size(ti);
backing_nr_pblks = blkdev_pblk_size(c->dev->bdev);
if (get_order(dev_nr_pblks) >= 48) {
if ((backing_nr_pblks >> 48) != 0) {
ti->error = "Device too large";
ret = -EINVAL;
goto err;
}
if (!cache_pages) {
/* Minimum of 1/1k RAM and 1/64k device size */
cache_pages = min((unsigned int)(totalram_pages >> 10),
(unsigned int)(backing_nr_pblks >> 16));
if (cache_pages < 32 * 2 * num_online_cpus()) {
cache_pages = 32 * 2 * num_online_cpus();
}
}
printk(KERN_INFO "%s: pages=%lu pblks=%lu cache_pages=%u\n",
__func__, totalram_pages, (unsigned long)backing_nr_pblks, cache_pages);
ti->per_io_data_size = ALIGN(sizeof(struct compress_io), ARCH_KMALLOC_MINALIGN);
ret = compress_read_header(c);
@ -456,13 +478,13 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
/* XXX: validate minumum pblk using zone_off(max_zone+1) */
if (c->params.nr_pblk > dev_nr_pblks) {
if (c->params.nr_pblk > backing_nr_pblks) {
printk(KERN_ERR "%s: bad nr_pblk\n", __func__);
ret = -EINVAL;
goto err;
}
if (c->params.nr_zones > zone_for_pblk(&c->params, dev_nr_pblks)) {
if (c->params.nr_zones > zone_for_pblk(&c->params, backing_nr_pblks)) {
printk(KERN_ERR "%s: bad nr_zones\n", __func__);
ret = -EINVAL;
goto err;
@ -476,7 +498,7 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ret = -ENOMEM;
goto err;
}
if (!lbdcache_ctr(c->lc, &c->params)) {
if (!lbdcache_ctr(c->lc, &c->params, cache_pages)) {
printk(KERN_ERR "Failed to init logical block cache\n");
ret = -ENOMEM;
goto err;

View File

@ -30,6 +30,7 @@
#include <linux/dm-compress.h>
struct lbatpage {
struct list_head list;
u64 pblk;
struct mutex reflock;
unsigned int ref;
@ -44,6 +45,8 @@ struct lbatpage {
static bool
lbatpage_ctr(struct lbatpage* lp, struct cbd_params* params)
{
memset(lp, 0, sizeof(struct lbatpage));
INIT_LIST_HEAD(&lp->list);
lp->pblk = PBLK_NONE;
mutex_init(&lp->reflock);
lp->ref = 0;
@ -151,8 +154,9 @@ lbatpage_put_buf(struct lbatpage* lp)
struct lbatpagecache {
struct mutex lock;
struct cbd_params* params;
unsigned int len;
struct lbatpage** cache;
struct list_head cache_head;
unsigned int cache_len;
struct lbatpage* cache;
};
size_t
@ -161,48 +165,40 @@ lbatpagecache_size(void)
return sizeof(struct lbatpagecache);
}
static bool
lbatpagecache_realloc(struct lbatpagecache* lpc, unsigned int len)
{
struct lbatpage** cache;
unsigned int n;
struct lbatpage* lp;
cache = kzalloc(len * sizeof(struct lbatpage*), GFP_KERNEL);
if (!cache) {
return false;
}
n = 0;
if (lpc->len) {
memcpy(cache, lpc->cache, lpc->len * sizeof(struct lbatpage*));
n = lpc->len;
kfree(lpc->cache);
}
lpc->len = len;
lpc->cache = cache;
while (n < len) {
lp = kmalloc(sizeof(struct lbatpage), GFP_KERNEL);
if (!lp) {
return false;
}
cache[n++] = lp;
if (!lbatpage_ctr(lp, lpc->params)) {
return false;
}
}
return true;
}
bool
lbatpagecache_ctr(struct lbatpagecache* lpc,
struct cbd_params* params)
struct cbd_params* params, u32 cache_pages)
{
struct lbatpage* cache;
u32 cache_len;
u32 n;
memset(lpc, 0, sizeof(struct lbatpagecache));
mutex_init(&lpc->lock);
lpc->params = params;
return lbatpagecache_realloc(lpc, 1);
/* lbatpagecache gets 15/32 of cache pages */
cache_len = (cache_pages * 15 / 32);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;
}
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
cache = kzalloc(cache_len * sizeof(struct lbatpage), GFP_KERNEL);
if (!cache) {
return false;
}
INIT_LIST_HEAD(&lpc->cache_head);
lpc->cache_len = cache_len;
lpc->cache = cache;
for (n = 0; n < cache_len; ++n) {
if (!lbatpage_ctr(&cache[n], lpc->params)) {
return false;
}
list_add_tail(&cache[n].list, &lpc->cache_head);
}
return true;
}
void
@ -211,8 +207,8 @@ lbatpagecache_dtr(struct lbatpagecache* lpc)
unsigned int n;
struct lbatpage* lp;
for (n = 0; n < lpc->len; ++n) {
lp = lpc->cache[n];
for (n = 0; n < lpc->cache_len; ++n) {
lp = &lpc->cache[n];
if (!lp) {
continue;
}
@ -220,25 +216,24 @@ lbatpagecache_dtr(struct lbatpagecache* lpc)
if (lp->ref) {
printk(KERN_ERR "%s: lbatpage ref leak: n=%u ref=%u\n", __func__, n, lp->ref);
}
kfree(lp);
}
kfree(lpc->cache);
lpc->cache = NULL;
lpc->len = 0;
lpc->cache_len = 0;
INIT_LIST_HEAD(&lpc->cache_head);
lpc->params = NULL;
}
struct lbatpage*
lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
{
unsigned int n;
struct lbatpage* lp;
mutex_lock(&lpc->lock);
for (n = 0; n < lpc->len; ++n) {
lp = lpc->cache[n];
list_for_each_entry(lp, &lpc->cache_head, list) {
mutex_lock(&lp->reflock);
if (lp->pblk == pblk) {
list_move(&lp->list, &lpc->cache_head);
if (lp->ref == 0) {
goto found;
}
@ -247,32 +242,23 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
mutex_unlock(&lp->reflock);
return lp;
}
mutex_unlock(&lp->reflock);
}
for (n = 0; n < lpc->len; ++n) {
lp = lpc->cache[n];
mutex_lock(&lp->reflock);
if (lp->pblk == PBLK_NONE) {
list_move(&lp->list, &lpc->cache_head);
goto found;
}
mutex_unlock(&lp->reflock);
}
for (n = 0; n < lpc->len; ++n) {
lp = lpc->cache[n];
list_for_each_entry_reverse(lp, &lpc->cache_head, list) {
mutex_lock(&lp->reflock);
if (lp->ref == 0 && !lbatpage_error(lp)) {
list_move(&lp->list, &lpc->cache_head);
goto found;
}
mutex_unlock(&lp->reflock);
}
n = lpc->len;
if (!lbatpagecache_realloc(lpc, lpc->len * 2)) {
printk(KERN_ERR "%s: realloc failed\n", __func__);
mutex_unlock(&lpc->lock);
return NULL;
}
lp = lpc->cache[n];
mutex_lock(&lp->reflock);
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
mutex_unlock(&lpc->lock);
return NULL;
found:
mutex_unlock(&lpc->lock);

View File

@ -30,6 +30,7 @@
#include <linux/dm-compress.h>
struct lbatview {
struct list_head list;
u64 pblk;
struct mutex reflock;
unsigned int ref;
@ -49,6 +50,7 @@ lbatview_ctr(struct lbatview* lv,
struct lbatpagecache* lpc)
{
memset(lv, 0, sizeof(struct lbatview));
INIT_LIST_HEAD(&lv->list);
lv->pblk = PBLK_NONE;
mutex_init(&lv->reflock);
lv->ref = 0;
@ -429,8 +431,9 @@ struct lbatviewcache {
struct cbd_params* params;
struct pbatcache* pc;
struct lbatpagecache* lpc;
unsigned int len;
struct lbatview** cache;
struct list_head cache_head;
unsigned int cache_len;
struct lbatview* cache;
};
size_t
@ -439,43 +442,14 @@ lbatviewcache_size(void)
return sizeof(struct lbatviewcache);
}
static bool
lbatviewcache_realloc(struct lbatviewcache* lvc, unsigned int len)
{
struct lbatview** cache;
unsigned int n;
struct lbatview* lv;
cache = kzalloc(len * sizeof(struct lbatview*), GFP_KERNEL);
if (!cache) {
return false;
}
n = 0;
if (lvc->len) {
memcpy(cache, lvc->cache, lvc->len * sizeof(struct lbatview*));
n = lvc->len;
kfree(lvc->cache);
}
lvc->len = len;
lvc->cache = cache;
while (n < len) {
lv = kmalloc(sizeof(struct lbatview), GFP_KERNEL);
if (!lv) {
return false;
}
cache[n++] = lv;
if (!lbatview_ctr(lv, lvc->params, lvc->pc, lvc->lpc)) {
return false;
}
}
return true;
}
bool
lbatviewcache_ctr(struct lbatviewcache* lvc,
struct cbd_params* params)
struct cbd_params* params, u32 cache_pages)
{
struct lbatview* cache;
u32 cache_len;
u32 n;
memset(lvc, 0, sizeof(struct lbatviewcache));
mutex_init(&lvc->lock);
lvc->params = params;
@ -483,18 +457,38 @@ lbatviewcache_ctr(struct lbatviewcache* lvc,
if (!lvc->pc) {
return false;
}
if (!pbatcache_ctr(lvc->pc, params)) {
if (!pbatcache_ctr(lvc->pc, params, cache_pages)) {
return false;
}
lvc->lpc = kmalloc(lbatpagecache_size(), GFP_KERNEL);
if (!lvc->lpc) {
return false;
}
if (!lbatpagecache_ctr(lvc->lpc, params)) {
if (!lbatpagecache_ctr(lvc->lpc, params, cache_pages)) {
return false;
}
/* lbatviewcache gets one entry per lbatpage (XXX: 5/6?) */
cache_len = (cache_pages * 15 / 32);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;
}
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
cache = kzalloc(cache_len * sizeof(struct lbatview), GFP_KERNEL);
if (!cache) {
return false;
}
INIT_LIST_HEAD(&lvc->cache_head);
lvc->cache_len = cache_len;
lvc->cache = cache;
for (n = 0; n < cache_len; ++n) {
if (!lbatview_ctr(&cache[n], lvc->params, lvc->pc, lvc->lpc)) {
return false;
}
list_add_tail(&cache[n].list, &lvc->cache_head);
}
return lbatviewcache_realloc(lvc, 1);
return true;
}
void
@ -503,8 +497,8 @@ lbatviewcache_dtr(struct lbatviewcache* lvc)
unsigned int n;
struct lbatview* lv;
for (n = 0; n < lvc->len; ++n) {
lv = lvc->cache[n];
for (n = 0; n < lvc->cache_len; ++n) {
lv = &lvc->cache[n];
if (!lv) {
continue;
}
@ -512,11 +506,11 @@ lbatviewcache_dtr(struct lbatviewcache* lvc)
if (lv->ref) {
printk(KERN_ERR "%s: lbatview ref leak: n=%u ref=%u\n", __func__, n, lv->ref);
}
kfree(lv);
}
kfree(lvc->cache);
lvc->cache = NULL;
lvc->len = 0;
lvc->cache_len = 0;
INIT_LIST_HEAD(&lvc->cache_head);
lbatpagecache_dtr(lvc->lpc);
kfree(lvc->lpc);
lvc->lpc = NULL;
@ -537,7 +531,6 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
u64 pblk;
u32 count;
unsigned int n;
struct lbatview* lv;
zone = lblk / lvc->params->lblk_per_zone;
@ -549,10 +542,10 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
count = (rel_pblk == lbat_len(lvc->params) - 1) ? 1 : 2;
mutex_lock(&lvc->lock);
for (n = 0; n < lvc->len; ++n) {
lv = lvc->cache[n];
list_for_each_entry(lv, &lvc->cache_head, list) {
mutex_lock(&lv->reflock);
if (lv->pblk == pblk) {
list_move(&lv->list, &lvc->cache_head);
if (lv->ref == 0) {
goto found;
}
@ -561,32 +554,23 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
mutex_unlock(&lv->reflock);
return lv;
}
mutex_unlock(&lv->reflock);
}
for (n = 0; n < lvc->len; ++n) {
lv = lvc->cache[n];
mutex_lock(&lv->reflock);
if (lv->pblk == PBLK_NONE) {
list_move(&lv->list, &lvc->cache_head);
goto found;
}
mutex_unlock(&lv->reflock);
}
for (n = 0; n < lvc->len; ++n) {
lv = lvc->cache[n];
list_for_each_entry_reverse(lv, &lvc->cache_head, list) {
mutex_lock(&lv->reflock);
if (lv->ref == 0) {
list_move(&lv->list, &lvc->cache_head);
goto found;
}
mutex_unlock(&lv->reflock);
}
n = lvc->len;
if (!lbatviewcache_realloc(lvc, lvc->len * 2)) {
printk(KERN_ERR "%s: realloc failed\n", __func__);
mutex_unlock(&lvc->lock);
return NULL;
}
lv = lvc->cache[n];
mutex_lock(&lv->reflock);
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
mutex_unlock(&lvc->lock);
return NULL;
found:
mutex_unlock(&lvc->lock);

View File

@ -31,6 +31,7 @@
#include <linux/dm-compress.h>
struct lbd {
struct list_head list;
u64 lblk;
struct mutex reflock;
unsigned int ref;
@ -254,6 +255,7 @@ lbd_ctr(struct lbd* lbd,
u32 nr_pages = lblk_per_pblk(params);
memset(lbd, 0, sizeof(struct lbd));
INIT_LIST_HEAD(&lbd->list);
lbd->lblk = LBLK_NONE;
mutex_init(&lbd->reflock);
lbd->ref = 0;
@ -507,8 +509,9 @@ struct lbdcache
struct cbd_params* params;
void* percpu;
struct lbatviewcache* lvc;
unsigned int len;
struct lbd** cache;
struct list_head cache_head;
unsigned int cache_len;
struct lbd* cache;
};
size_t
@ -517,39 +520,6 @@ lbdcache_size(void)
return sizeof(struct lbdcache);
}
static bool
lbdcache_realloc(struct lbdcache* lc, unsigned int len)
{
struct lbd** cache;
unsigned int n;
struct lbd* lbd;
cache = kzalloc(len * sizeof(struct lbd*), GFP_KERNEL);
if (!cache) {
return false;
}
n = 0;
if (lc->len) {
memcpy(cache, lc->cache, lc->len * sizeof(struct lbd*));
n = lc->len;
kfree(lc->cache);
}
lc->len = len;
lc->cache = cache;
while (n < len) {
lbd = kzalloc(sizeof(struct lbd), GFP_KERNEL);
if (!lbd) {
return false;
}
cache[n++] = lbd;
if (!lbd_ctr(lbd, lc->params, lc->lvc, lc->percpu)) {
return false;
}
}
return true;
}
static bool
lbdcache_alloc_compress_state(void* percpu, const struct cbd_params* params, int cpu)
{
@ -624,9 +594,12 @@ lbdcache_free_compress_state(void* percpu, const struct cbd_params* params, int
bool
lbdcache_ctr(struct lbdcache* lc,
struct cbd_params* params)
struct cbd_params* params, u32 cache_pages)
{
int cpu;
struct lbd* cache;
u32 cache_len;
u32 n;
memset(lc, 0, sizeof(struct lbdcache));
mutex_init(&lc->lock);
@ -641,11 +614,32 @@ lbdcache_ctr(struct lbdcache* lc,
if (!lc->lvc) {
return false;
}
if (!lbatviewcache_ctr(lc->lvc, params)) {
if (!lbatviewcache_ctr(lc->lvc, params, cache_pages)) {
return false;
}
return lbdcache_realloc(lc, 1);
/* lbdcache gets 1/2 of cache_pages */
cache_len = (cache_pages * 1 / 2) / lblk_per_pblk(params);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;
}
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
cache = kzalloc(cache_len * sizeof(struct lbd), GFP_KERNEL);
if (!cache) {
return false;
}
INIT_LIST_HEAD(&lc->cache_head);
lc->cache_len = cache_len;
lc->cache = cache;
for (n = 0; n < cache_len; ++n) {
if (!lbd_ctr(&cache[n], params, lc->lvc, lc->percpu)) {
return false;
}
list_add_tail(&cache[n].list, &lc->cache_head);
}
return true;
}
void
@ -655,8 +649,8 @@ lbdcache_dtr(struct lbdcache* lc)
struct lbd* lbd;
int cpu;
for (n = 0; n < lc->len; ++n) {
lbd = lc->cache[n];
for (n = 0; n < lc->cache_len; ++n) {
lbd = &lc->cache[n];
if (!lbd) {
continue;
}
@ -664,11 +658,11 @@ lbdcache_dtr(struct lbdcache* lc)
if (lbd->ref) {
printk(KERN_ERR "%s: lbd ref leak: n=%u ref=%u\n", __func__, n, lbd->ref);
}
kfree(lbd);
}
kfree(lc->cache);
lc->cache = NULL;
lc->len = 0;
lc->cache_len = 0;
INIT_LIST_HEAD(&lc->cache_head);
lbatviewcache_dtr(lc->lvc);
kfree(lc->lvc);
lc->lvc = NULL;
@ -683,14 +677,13 @@ lbdcache_dtr(struct lbdcache* lc)
struct lbd*
lbdcache_get(struct lbdcache* lc, u64 lblk)
{
unsigned int n;
struct lbd* lbd;
mutex_lock(&lc->lock);
for (n = 0; n < lc->len; ++n) {
lbd = lc->cache[n];
list_for_each_entry(lbd, &lc->cache_head, list) {
mutex_lock(&lbd->reflock);
if (lbd->lblk == lblk) {
list_move(&lbd->list, &lc->cache_head);
if (lbd->ref == 0) {
goto found;
}
@ -699,32 +692,23 @@ lbdcache_get(struct lbdcache* lc, u64 lblk)
mutex_unlock(&lbd->reflock);
return lbd;
}
mutex_unlock(&lbd->reflock);
}
for (n = 0; n < lc->len; ++n) {
lbd = lc->cache[n];
mutex_lock(&lbd->reflock);
if (lbd->lblk == LBLK_NONE) {
list_move(&lbd->list, &lc->cache_head);
goto found;
}
mutex_unlock(&lbd->reflock);
}
for (n = 0; n < lc->len; ++n) {
lbd = lc->cache[n];
list_for_each_entry_reverse(lbd, &lc->cache_head, list) {
mutex_lock(&lbd->reflock);
if (lbd->ref == 0 && !lbd_error(lbd)) {
list_move(&lbd->list, &lc->cache_head);
goto found;
}
mutex_unlock(&lbd->reflock);
}
n = lc->len;
if (!lbdcache_realloc(lc, lc->len * 2)) {
printk(KERN_ERR "%s: realloc failed\n", __func__);
mutex_unlock(&lc->lock);
return NULL;
}
lbd = lc->cache[n];
mutex_lock(&lbd->reflock);
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
mutex_unlock(&lc->lock);
return NULL;
found:
mutex_unlock(&lc->lock);

View File

@ -30,12 +30,15 @@
#include <linux/dm-compress.h>
struct pbat {
struct list_head list;
u32 zone;
struct mutex reflock;
unsigned int ref;
struct mutex lock;
struct cbd_params* params;
bool full;
u32 last_alloc;
struct page** pagev;
u8* buf;
};
@ -47,11 +50,14 @@ pbat_ctr(struct pbat* pbat,
u32 nr_pages = pbat_len(params);
memset(pbat, 0, sizeof(struct pbat));
INIT_LIST_HEAD(&pbat->list);
pbat->zone = ZONE_NONE;
mutex_init(&pbat->reflock);
pbat->ref = 0;
mutex_init(&pbat->lock);
pbat->params = params;
pbat->full = false;
pbat->last_alloc = 0;
pbat->pagev = kzalloc(nr_pages * sizeof(struct page*), GFP_KERNEL);
if (!pbat->pagev) {
return false;
@ -149,6 +155,8 @@ pbat_reset(struct pbat* pbat, u32 zone)
}
if (pbat->zone != zone) {
pbat->zone = zone;
pbat->full = false;
pbat->last_alloc = 0;
ret = pbat_read(pbat);
}
@ -176,11 +184,17 @@ pbat_alloc(struct pbat* pbat)
u64 pblk;
mutex_lock(&pbat->lock);
idx = cbd_bitmap_alloc(pbat->buf, pblk_count);
if (idx == pblk_count) {
if (pbat->full) {
pblk = PBLK_NONE;
goto out;
}
idx = cbd_bitmap_alloc(pbat->buf, pblk_count, pbat->last_alloc);
if (idx == pblk_count) {
pbat->full = true;
pblk = PBLK_NONE;
goto out;
}
pbat->last_alloc = idx;
pblk = idx + zone_data_off(pbat->params, pbat->zone);
SetPageDirty(pbat->pagev[0]);
@ -206,6 +220,7 @@ pbat_free(struct pbat* pbat, u64 pblk)
BUG_ON(idx >= pblk_count);
mutex_lock(&pbat->lock);
cbd_bitmap_free(pbat->buf, idx);
pbat->full = false;
SetPageDirty(pbat->pagev[0]);
mutex_unlock(&pbat->lock);
@ -215,8 +230,9 @@ pbat_free(struct pbat* pbat, u64 pblk)
struct pbatcache {
struct mutex lock;
struct cbd_params* params;
unsigned int len;
struct pbat** cache;
struct list_head cache_head;
unsigned int cache_len;
struct pbat* cache;
};
size_t
@ -225,48 +241,40 @@ pbatcache_size(void)
return sizeof(struct pbatcache);
}
static bool
pbatcache_realloc(struct pbatcache* pc, unsigned int len)
{
struct pbat** cache;
unsigned int n;
struct pbat* pbat;
cache = kzalloc(len * sizeof(struct pbat*), GFP_KERNEL);
if (!cache) {
return false;
}
n = 0;
if (pc->len) {
memcpy(cache, pc->cache, pc->len * sizeof(struct pbat*));
n = pc->len;
kfree(pc->cache);
}
pc->len = len;
pc->cache = cache;
while (n < len) {
pbat = kmalloc(sizeof(struct pbat), GFP_KERNEL);
if (!pbat) {
return false;
}
cache[n++] = pbat;
if (!pbat_ctr(pbat, pc->params)) {
return false;
}
}
return true;
}
bool
pbatcache_ctr(struct pbatcache* pc,
struct cbd_params* params)
struct cbd_params* params, u32 cache_pages)
{
struct pbat* cache;
u32 cache_len;
u32 n;
memset(pc, 0, sizeof(struct pbatcache));
mutex_init(&pc->lock);
pc->params = params;
return pbatcache_realloc(pc, 1);
/* pbatcache gets 1/32 of cache_pages */
cache_len = (cache_pages * 1 / 32) / pbat_len(params);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;
}
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
cache = kzalloc(cache_len * sizeof(struct pbat), GFP_KERNEL);
if (!cache) {
return false;
}
INIT_LIST_HEAD(&pc->cache_head);
pc->cache_len = cache_len;
pc->cache = cache;
for (n = 0; n < cache_len; ++n) {
if (!pbat_ctr(&cache[n], pc->params)) {
return false;
}
list_add_tail(&cache[n].list, &pc->cache_head);
}
return true;
}
void
@ -275,8 +283,8 @@ pbatcache_dtr(struct pbatcache* pc)
unsigned int n;
struct pbat* pbat;
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
for (n = 0; n < pc->cache_len; ++n) {
pbat = &pc->cache[n];
if (!pbat) {
continue;
}
@ -284,25 +292,24 @@ pbatcache_dtr(struct pbatcache* pc)
if (pbat->ref) {
printk(KERN_ERR "%s: pbat ref leak: n=%u ref=%u\n", __func__, n, pbat->ref);
}
kfree(pbat);
}
kfree(pc->cache);
pc->cache = NULL;
pc->len = 0;
pc->cache_len = 0;
INIT_LIST_HEAD(&pc->cache_head);
pc->params = NULL;
}
struct pbat*
pbatcache_get(struct pbatcache* pc, u32 zone)
{
unsigned int n;
struct pbat* pbat;
mutex_lock(&pc->lock);
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
list_for_each_entry(pbat, &pc->cache_head, list) {
mutex_lock(&pbat->reflock);
if (pbat->zone == zone) {
list_move(&pbat->list, &pc->cache_head);
if (pbat->ref == 0) {
goto found;
}
@ -311,33 +318,23 @@ pbatcache_get(struct pbatcache* pc, u32 zone)
mutex_unlock(&pbat->reflock);
return pbat;
}
mutex_unlock(&pbat->reflock);
}
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
if (pbat->zone == ZONE_NONE) {
list_move(&pbat->list, &pc->cache_head);
goto found;
}
mutex_unlock(&pbat->reflock);
}
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
list_for_each_entry_reverse(pbat, &pc->cache_head, list) {
mutex_lock(&pbat->reflock);
if (pbat->ref == 0 && !pbat_error(pbat)) {
list_move(&pbat->list, &pc->cache_head);
goto found;
}
mutex_unlock(&pbat->reflock);
}
n = pc->len;
if (!pbatcache_realloc(pc, pc->len * 2)) {
printk(KERN_ERR "%s: realloc failed\n", __func__);
mutex_unlock(&pc->lock);
return NULL;
}
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
mutex_unlock(&pc->lock);
return NULL;
found:
mutex_unlock(&pc->lock);

View File

@ -167,22 +167,33 @@ put64_le(u8** raw, u64 val)
/* XXX: Use kernel bit functions */
static inline u32
cbd_bitmap_alloc(u8* buf, u32 bitsize)
cbd_bitmap_alloc(u8* buf, u32 bitsize, u32 hint)
{
u32 off = 0;
u32 bit = 0;
u32 off;
u32 bit;
for (off = 0; off < bitsize / BITS_PER_BYTE; ++off) {
for (off = hint / BITS_PER_BYTE; off < bitsize / BITS_PER_BYTE; ++off) {
if (buf[off] != 0xff) {
bit = 0;
while (buf[off] & (1 << bit)) {
++bit;
}
buf[off] |= (1 << bit);
break;
return off * BITS_PER_BYTE + bit;
}
}
for (off = 0; off < hint / BITS_PER_BYTE; ++off) {
if (buf[off] != 0xff) {
bit = 0;
while (buf[off] & (1 << bit)) {
++bit;
}
buf[off] |= (1 << bit);
return off * BITS_PER_BYTE + bit;
}
}
return off * BITS_PER_BYTE + bit;
return bitsize;
}
/* XXX: Use kernel bit functions */
@ -498,7 +509,7 @@ int pbat_free(struct pbat* pbat, u64 pblk);
struct pbatcache;
size_t pbatcache_size(void);
bool pbatcache_ctr(struct pbatcache* pbatcache,
struct cbd_params* params);
struct cbd_params* params, u32 cache_pages);
void pbatcache_dtr(struct pbatcache* pbatcache);
struct pbat*
pbatcache_get(struct pbatcache* pbatcache, u32 zone);
@ -512,7 +523,7 @@ void lbatpage_put_buf(struct lbatpage* lp);
struct lbatpagecache;
size_t lbatpagecache_size(void);
bool lbatpagecache_ctr(struct lbatpagecache* lpc,
struct cbd_params* params);
struct cbd_params* params, u32 cache_pages);
void lbatpagecache_dtr(struct lbatpagecache* lpc);
struct lbatpage*
lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk);
@ -526,7 +537,7 @@ u64 lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx);
struct lbatviewcache;
size_t lbatviewcache_size(void);
bool lbatviewcache_ctr(struct lbatviewcache* lvc,
struct cbd_params* params);
struct cbd_params* params, u32 cache_pages);
void lbatviewcache_dtr(struct lbatviewcache* lvc);
struct lbatview*
lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk);
@ -540,7 +551,7 @@ void lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf);
struct lbdcache;
size_t lbdcache_size(void);
bool lbdcache_ctr(struct lbdcache* lc,
struct cbd_params* params);
struct cbd_params* params, u32 cache_pages);
void lbdcache_dtr(struct lbdcache* lc);
struct lbd*
lbdcache_get(struct lbdcache* lc, u64 lblk);