Implement better caching
* Total cache size is the minimum of: - 1/1k of system RAM. - 1/64k of backing device. - 32 * 2 * CPUs. The latter ensures that we have at least 2*CPUs lbd objects which is required for the top level compress operations. * Implement LRU for all caches. * Track pblk full and last allocated block for better performance. Still could use better parsing for cache_pages parameter and more granularity, but this is workable for now.
This commit is contained in:
parent
3e81efb9f6
commit
a2e4f303fd
|
@ -380,8 +380,9 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int argn;
|
unsigned int argn;
|
||||||
|
u32 cache_pages = 0;
|
||||||
struct compress *c = NULL;
|
struct compress *c = NULL;
|
||||||
u64 dev_nr_pblks;
|
u64 backing_nr_pblks;
|
||||||
|
|
||||||
printk(KERN_INFO "%s: enter: argc=%u\n", __func__, argc);
|
printk(KERN_INFO "%s: enter: argc=%u\n", __func__, argc);
|
||||||
for (argn = 0; argn < argc; ++argn) {
|
for (argn = 0; argn < argc; ++argn) {
|
||||||
|
@ -397,6 +398,7 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
const char* arg = argv[argn++];
|
const char* arg = argv[argn++];
|
||||||
const char* val = NULL;
|
const char* val = NULL;
|
||||||
const char* eq = strchr(arg, '=');
|
const char* eq = strchr(arg, '=');
|
||||||
|
int err;
|
||||||
if (eq) {
|
if (eq) {
|
||||||
val = eq + 1;
|
val = eq + 1;
|
||||||
}
|
}
|
||||||
|
@ -410,6 +412,15 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
/* XXX: Parse suffixes */
|
||||||
|
if (!memcmp(arg, "cache_pages", 7)) {
|
||||||
|
err = kstrtouint(eq + 1, 0, &cache_pages);
|
||||||
|
if (err) {
|
||||||
|
ti->error = "Failed to parse cache_pages";
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
ti->error = "Unrecognized argument";
|
ti->error = "Unrecognized argument";
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -428,14 +439,25 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
|
|
||||||
ti->private = c;
|
ti->private = c;
|
||||||
|
|
||||||
dev_nr_pblks = dm_target_pblk_size(ti);
|
backing_nr_pblks = blkdev_pblk_size(c->dev->bdev);
|
||||||
|
|
||||||
if (get_order(dev_nr_pblks) >= 48) {
|
if ((backing_nr_pblks >> 48) != 0) {
|
||||||
ti->error = "Device too large";
|
ti->error = "Device too large";
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!cache_pages) {
|
||||||
|
/* Minimum of 1/1k RAM and 1/64k device size */
|
||||||
|
cache_pages = min((unsigned int)(totalram_pages >> 10),
|
||||||
|
(unsigned int)(backing_nr_pblks >> 16));
|
||||||
|
if (cache_pages < 32 * 2 * num_online_cpus()) {
|
||||||
|
cache_pages = 32 * 2 * num_online_cpus();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printk(KERN_INFO "%s: pages=%lu pblks=%lu cache_pages=%u\n",
|
||||||
|
__func__, totalram_pages, (unsigned long)backing_nr_pblks, cache_pages);
|
||||||
|
|
||||||
ti->per_io_data_size = ALIGN(sizeof(struct compress_io), ARCH_KMALLOC_MINALIGN);
|
ti->per_io_data_size = ALIGN(sizeof(struct compress_io), ARCH_KMALLOC_MINALIGN);
|
||||||
|
|
||||||
ret = compress_read_header(c);
|
ret = compress_read_header(c);
|
||||||
|
@ -456,13 +478,13 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX: validate minumum pblk using zone_off(max_zone+1) */
|
/* XXX: validate minumum pblk using zone_off(max_zone+1) */
|
||||||
if (c->params.nr_pblk > dev_nr_pblks) {
|
if (c->params.nr_pblk > backing_nr_pblks) {
|
||||||
printk(KERN_ERR "%s: bad nr_pblk\n", __func__);
|
printk(KERN_ERR "%s: bad nr_pblk\n", __func__);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c->params.nr_zones > zone_for_pblk(&c->params, dev_nr_pblks)) {
|
if (c->params.nr_zones > zone_for_pblk(&c->params, backing_nr_pblks)) {
|
||||||
printk(KERN_ERR "%s: bad nr_zones\n", __func__);
|
printk(KERN_ERR "%s: bad nr_zones\n", __func__);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -476,7 +498,7 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
if (!lbdcache_ctr(c->lc, &c->params)) {
|
if (!lbdcache_ctr(c->lc, &c->params, cache_pages)) {
|
||||||
printk(KERN_ERR "Failed to init logical block cache\n");
|
printk(KERN_ERR "Failed to init logical block cache\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include <linux/dm-compress.h>
|
#include <linux/dm-compress.h>
|
||||||
|
|
||||||
struct lbatpage {
|
struct lbatpage {
|
||||||
|
struct list_head list;
|
||||||
u64 pblk;
|
u64 pblk;
|
||||||
struct mutex reflock;
|
struct mutex reflock;
|
||||||
unsigned int ref;
|
unsigned int ref;
|
||||||
|
@ -44,6 +45,8 @@ struct lbatpage {
|
||||||
static bool
|
static bool
|
||||||
lbatpage_ctr(struct lbatpage* lp, struct cbd_params* params)
|
lbatpage_ctr(struct lbatpage* lp, struct cbd_params* params)
|
||||||
{
|
{
|
||||||
|
memset(lp, 0, sizeof(struct lbatpage));
|
||||||
|
INIT_LIST_HEAD(&lp->list);
|
||||||
lp->pblk = PBLK_NONE;
|
lp->pblk = PBLK_NONE;
|
||||||
mutex_init(&lp->reflock);
|
mutex_init(&lp->reflock);
|
||||||
lp->ref = 0;
|
lp->ref = 0;
|
||||||
|
@ -151,8 +154,9 @@ lbatpage_put_buf(struct lbatpage* lp)
|
||||||
struct lbatpagecache {
|
struct lbatpagecache {
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
unsigned int len;
|
struct list_head cache_head;
|
||||||
struct lbatpage** cache;
|
unsigned int cache_len;
|
||||||
|
struct lbatpage* cache;
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
|
@ -161,48 +165,40 @@ lbatpagecache_size(void)
|
||||||
return sizeof(struct lbatpagecache);
|
return sizeof(struct lbatpagecache);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
|
||||||
lbatpagecache_realloc(struct lbatpagecache* lpc, unsigned int len)
|
|
||||||
{
|
|
||||||
struct lbatpage** cache;
|
|
||||||
unsigned int n;
|
|
||||||
struct lbatpage* lp;
|
|
||||||
|
|
||||||
cache = kzalloc(len * sizeof(struct lbatpage*), GFP_KERNEL);
|
|
||||||
if (!cache) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
n = 0;
|
|
||||||
if (lpc->len) {
|
|
||||||
memcpy(cache, lpc->cache, lpc->len * sizeof(struct lbatpage*));
|
|
||||||
n = lpc->len;
|
|
||||||
kfree(lpc->cache);
|
|
||||||
}
|
|
||||||
lpc->len = len;
|
|
||||||
lpc->cache = cache;
|
|
||||||
while (n < len) {
|
|
||||||
lp = kmalloc(sizeof(struct lbatpage), GFP_KERNEL);
|
|
||||||
if (!lp) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cache[n++] = lp;
|
|
||||||
if (!lbatpage_ctr(lp, lpc->params)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
lbatpagecache_ctr(struct lbatpagecache* lpc,
|
lbatpagecache_ctr(struct lbatpagecache* lpc,
|
||||||
struct cbd_params* params)
|
struct cbd_params* params, u32 cache_pages)
|
||||||
{
|
{
|
||||||
|
struct lbatpage* cache;
|
||||||
|
u32 cache_len;
|
||||||
|
u32 n;
|
||||||
|
|
||||||
memset(lpc, 0, sizeof(struct lbatpagecache));
|
memset(lpc, 0, sizeof(struct lbatpagecache));
|
||||||
mutex_init(&lpc->lock);
|
mutex_init(&lpc->lock);
|
||||||
lpc->params = params;
|
lpc->params = params;
|
||||||
|
|
||||||
return lbatpagecache_realloc(lpc, 1);
|
/* lbatpagecache gets 15/32 of cache pages */
|
||||||
|
cache_len = (cache_pages * 15 / 32);
|
||||||
|
if (!cache_len) {
|
||||||
|
printk(KERN_ERR "%s: Cache too small\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
|
||||||
|
cache = kzalloc(cache_len * sizeof(struct lbatpage), GFP_KERNEL);
|
||||||
|
if (!cache) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
INIT_LIST_HEAD(&lpc->cache_head);
|
||||||
|
lpc->cache_len = cache_len;
|
||||||
|
lpc->cache = cache;
|
||||||
|
for (n = 0; n < cache_len; ++n) {
|
||||||
|
if (!lbatpage_ctr(&cache[n], lpc->params)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
list_add_tail(&cache[n].list, &lpc->cache_head);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -211,8 +207,8 @@ lbatpagecache_dtr(struct lbatpagecache* lpc)
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
struct lbatpage* lp;
|
struct lbatpage* lp;
|
||||||
|
|
||||||
for (n = 0; n < lpc->len; ++n) {
|
for (n = 0; n < lpc->cache_len; ++n) {
|
||||||
lp = lpc->cache[n];
|
lp = &lpc->cache[n];
|
||||||
if (!lp) {
|
if (!lp) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -220,25 +216,24 @@ lbatpagecache_dtr(struct lbatpagecache* lpc)
|
||||||
if (lp->ref) {
|
if (lp->ref) {
|
||||||
printk(KERN_ERR "%s: lbatpage ref leak: n=%u ref=%u\n", __func__, n, lp->ref);
|
printk(KERN_ERR "%s: lbatpage ref leak: n=%u ref=%u\n", __func__, n, lp->ref);
|
||||||
}
|
}
|
||||||
kfree(lp);
|
|
||||||
}
|
}
|
||||||
kfree(lpc->cache);
|
kfree(lpc->cache);
|
||||||
lpc->cache = NULL;
|
lpc->cache = NULL;
|
||||||
lpc->len = 0;
|
lpc->cache_len = 0;
|
||||||
|
INIT_LIST_HEAD(&lpc->cache_head);
|
||||||
lpc->params = NULL;
|
lpc->params = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct lbatpage*
|
struct lbatpage*
|
||||||
lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
|
lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
|
||||||
{
|
{
|
||||||
unsigned int n;
|
|
||||||
struct lbatpage* lp;
|
struct lbatpage* lp;
|
||||||
|
|
||||||
mutex_lock(&lpc->lock);
|
mutex_lock(&lpc->lock);
|
||||||
for (n = 0; n < lpc->len; ++n) {
|
list_for_each_entry(lp, &lpc->cache_head, list) {
|
||||||
lp = lpc->cache[n];
|
|
||||||
mutex_lock(&lp->reflock);
|
mutex_lock(&lp->reflock);
|
||||||
if (lp->pblk == pblk) {
|
if (lp->pblk == pblk) {
|
||||||
|
list_move(&lp->list, &lpc->cache_head);
|
||||||
if (lp->ref == 0) {
|
if (lp->ref == 0) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
|
@ -247,32 +242,23 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
|
||||||
mutex_unlock(&lp->reflock);
|
mutex_unlock(&lp->reflock);
|
||||||
return lp;
|
return lp;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lp->reflock);
|
|
||||||
}
|
|
||||||
for (n = 0; n < lpc->len; ++n) {
|
|
||||||
lp = lpc->cache[n];
|
|
||||||
mutex_lock(&lp->reflock);
|
|
||||||
if (lp->pblk == PBLK_NONE) {
|
if (lp->pblk == PBLK_NONE) {
|
||||||
|
list_move(&lp->list, &lpc->cache_head);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lp->reflock);
|
mutex_unlock(&lp->reflock);
|
||||||
}
|
}
|
||||||
for (n = 0; n < lpc->len; ++n) {
|
list_for_each_entry_reverse(lp, &lpc->cache_head, list) {
|
||||||
lp = lpc->cache[n];
|
|
||||||
mutex_lock(&lp->reflock);
|
mutex_lock(&lp->reflock);
|
||||||
if (lp->ref == 0 && !lbatpage_error(lp)) {
|
if (lp->ref == 0 && !lbatpage_error(lp)) {
|
||||||
|
list_move(&lp->list, &lpc->cache_head);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lp->reflock);
|
mutex_unlock(&lp->reflock);
|
||||||
}
|
}
|
||||||
n = lpc->len;
|
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||||
if (!lbatpagecache_realloc(lpc, lpc->len * 2)) {
|
mutex_unlock(&lpc->lock);
|
||||||
printk(KERN_ERR "%s: realloc failed\n", __func__);
|
return NULL;
|
||||||
mutex_unlock(&lpc->lock);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
lp = lpc->cache[n];
|
|
||||||
mutex_lock(&lp->reflock);
|
|
||||||
|
|
||||||
found:
|
found:
|
||||||
mutex_unlock(&lpc->lock);
|
mutex_unlock(&lpc->lock);
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include <linux/dm-compress.h>
|
#include <linux/dm-compress.h>
|
||||||
|
|
||||||
struct lbatview {
|
struct lbatview {
|
||||||
|
struct list_head list;
|
||||||
u64 pblk;
|
u64 pblk;
|
||||||
struct mutex reflock;
|
struct mutex reflock;
|
||||||
unsigned int ref;
|
unsigned int ref;
|
||||||
|
@ -49,6 +50,7 @@ lbatview_ctr(struct lbatview* lv,
|
||||||
struct lbatpagecache* lpc)
|
struct lbatpagecache* lpc)
|
||||||
{
|
{
|
||||||
memset(lv, 0, sizeof(struct lbatview));
|
memset(lv, 0, sizeof(struct lbatview));
|
||||||
|
INIT_LIST_HEAD(&lv->list);
|
||||||
lv->pblk = PBLK_NONE;
|
lv->pblk = PBLK_NONE;
|
||||||
mutex_init(&lv->reflock);
|
mutex_init(&lv->reflock);
|
||||||
lv->ref = 0;
|
lv->ref = 0;
|
||||||
|
@ -429,8 +431,9 @@ struct lbatviewcache {
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
struct pbatcache* pc;
|
struct pbatcache* pc;
|
||||||
struct lbatpagecache* lpc;
|
struct lbatpagecache* lpc;
|
||||||
unsigned int len;
|
struct list_head cache_head;
|
||||||
struct lbatview** cache;
|
unsigned int cache_len;
|
||||||
|
struct lbatview* cache;
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
|
@ -439,43 +442,14 @@ lbatviewcache_size(void)
|
||||||
return sizeof(struct lbatviewcache);
|
return sizeof(struct lbatviewcache);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
|
||||||
lbatviewcache_realloc(struct lbatviewcache* lvc, unsigned int len)
|
|
||||||
{
|
|
||||||
struct lbatview** cache;
|
|
||||||
unsigned int n;
|
|
||||||
struct lbatview* lv;
|
|
||||||
|
|
||||||
cache = kzalloc(len * sizeof(struct lbatview*), GFP_KERNEL);
|
|
||||||
if (!cache) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
n = 0;
|
|
||||||
if (lvc->len) {
|
|
||||||
memcpy(cache, lvc->cache, lvc->len * sizeof(struct lbatview*));
|
|
||||||
n = lvc->len;
|
|
||||||
kfree(lvc->cache);
|
|
||||||
}
|
|
||||||
lvc->len = len;
|
|
||||||
lvc->cache = cache;
|
|
||||||
while (n < len) {
|
|
||||||
lv = kmalloc(sizeof(struct lbatview), GFP_KERNEL);
|
|
||||||
if (!lv) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cache[n++] = lv;
|
|
||||||
if (!lbatview_ctr(lv, lvc->params, lvc->pc, lvc->lpc)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
lbatviewcache_ctr(struct lbatviewcache* lvc,
|
lbatviewcache_ctr(struct lbatviewcache* lvc,
|
||||||
struct cbd_params* params)
|
struct cbd_params* params, u32 cache_pages)
|
||||||
{
|
{
|
||||||
|
struct lbatview* cache;
|
||||||
|
u32 cache_len;
|
||||||
|
u32 n;
|
||||||
|
|
||||||
memset(lvc, 0, sizeof(struct lbatviewcache));
|
memset(lvc, 0, sizeof(struct lbatviewcache));
|
||||||
mutex_init(&lvc->lock);
|
mutex_init(&lvc->lock);
|
||||||
lvc->params = params;
|
lvc->params = params;
|
||||||
|
@ -483,18 +457,38 @@ lbatviewcache_ctr(struct lbatviewcache* lvc,
|
||||||
if (!lvc->pc) {
|
if (!lvc->pc) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!pbatcache_ctr(lvc->pc, params)) {
|
if (!pbatcache_ctr(lvc->pc, params, cache_pages)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
lvc->lpc = kmalloc(lbatpagecache_size(), GFP_KERNEL);
|
lvc->lpc = kmalloc(lbatpagecache_size(), GFP_KERNEL);
|
||||||
if (!lvc->lpc) {
|
if (!lvc->lpc) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!lbatpagecache_ctr(lvc->lpc, params)) {
|
if (!lbatpagecache_ctr(lvc->lpc, params, cache_pages)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
/* lbatviewcache gets one entry per lbatpage (XXX: 5/6?) */
|
||||||
|
cache_len = (cache_pages * 15 / 32);
|
||||||
|
if (!cache_len) {
|
||||||
|
printk(KERN_ERR "%s: Cache too small\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
|
||||||
|
cache = kzalloc(cache_len * sizeof(struct lbatview), GFP_KERNEL);
|
||||||
|
if (!cache) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
INIT_LIST_HEAD(&lvc->cache_head);
|
||||||
|
lvc->cache_len = cache_len;
|
||||||
|
lvc->cache = cache;
|
||||||
|
for (n = 0; n < cache_len; ++n) {
|
||||||
|
if (!lbatview_ctr(&cache[n], lvc->params, lvc->pc, lvc->lpc)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
list_add_tail(&cache[n].list, &lvc->cache_head);
|
||||||
|
}
|
||||||
|
|
||||||
return lbatviewcache_realloc(lvc, 1);
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -503,8 +497,8 @@ lbatviewcache_dtr(struct lbatviewcache* lvc)
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
struct lbatview* lv;
|
struct lbatview* lv;
|
||||||
|
|
||||||
for (n = 0; n < lvc->len; ++n) {
|
for (n = 0; n < lvc->cache_len; ++n) {
|
||||||
lv = lvc->cache[n];
|
lv = &lvc->cache[n];
|
||||||
if (!lv) {
|
if (!lv) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -512,11 +506,11 @@ lbatviewcache_dtr(struct lbatviewcache* lvc)
|
||||||
if (lv->ref) {
|
if (lv->ref) {
|
||||||
printk(KERN_ERR "%s: lbatview ref leak: n=%u ref=%u\n", __func__, n, lv->ref);
|
printk(KERN_ERR "%s: lbatview ref leak: n=%u ref=%u\n", __func__, n, lv->ref);
|
||||||
}
|
}
|
||||||
kfree(lv);
|
|
||||||
}
|
}
|
||||||
kfree(lvc->cache);
|
kfree(lvc->cache);
|
||||||
lvc->cache = NULL;
|
lvc->cache = NULL;
|
||||||
lvc->len = 0;
|
lvc->cache_len = 0;
|
||||||
|
INIT_LIST_HEAD(&lvc->cache_head);
|
||||||
lbatpagecache_dtr(lvc->lpc);
|
lbatpagecache_dtr(lvc->lpc);
|
||||||
kfree(lvc->lpc);
|
kfree(lvc->lpc);
|
||||||
lvc->lpc = NULL;
|
lvc->lpc = NULL;
|
||||||
|
@ -537,7 +531,6 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
|
||||||
u64 pblk;
|
u64 pblk;
|
||||||
u32 count;
|
u32 count;
|
||||||
|
|
||||||
unsigned int n;
|
|
||||||
struct lbatview* lv;
|
struct lbatview* lv;
|
||||||
|
|
||||||
zone = lblk / lvc->params->lblk_per_zone;
|
zone = lblk / lvc->params->lblk_per_zone;
|
||||||
|
@ -549,10 +542,10 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
|
||||||
count = (rel_pblk == lbat_len(lvc->params) - 1) ? 1 : 2;
|
count = (rel_pblk == lbat_len(lvc->params) - 1) ? 1 : 2;
|
||||||
|
|
||||||
mutex_lock(&lvc->lock);
|
mutex_lock(&lvc->lock);
|
||||||
for (n = 0; n < lvc->len; ++n) {
|
list_for_each_entry(lv, &lvc->cache_head, list) {
|
||||||
lv = lvc->cache[n];
|
|
||||||
mutex_lock(&lv->reflock);
|
mutex_lock(&lv->reflock);
|
||||||
if (lv->pblk == pblk) {
|
if (lv->pblk == pblk) {
|
||||||
|
list_move(&lv->list, &lvc->cache_head);
|
||||||
if (lv->ref == 0) {
|
if (lv->ref == 0) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
|
@ -561,32 +554,23 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
|
||||||
mutex_unlock(&lv->reflock);
|
mutex_unlock(&lv->reflock);
|
||||||
return lv;
|
return lv;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lv->reflock);
|
|
||||||
}
|
|
||||||
for (n = 0; n < lvc->len; ++n) {
|
|
||||||
lv = lvc->cache[n];
|
|
||||||
mutex_lock(&lv->reflock);
|
|
||||||
if (lv->pblk == PBLK_NONE) {
|
if (lv->pblk == PBLK_NONE) {
|
||||||
|
list_move(&lv->list, &lvc->cache_head);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lv->reflock);
|
mutex_unlock(&lv->reflock);
|
||||||
}
|
}
|
||||||
for (n = 0; n < lvc->len; ++n) {
|
list_for_each_entry_reverse(lv, &lvc->cache_head, list) {
|
||||||
lv = lvc->cache[n];
|
|
||||||
mutex_lock(&lv->reflock);
|
mutex_lock(&lv->reflock);
|
||||||
if (lv->ref == 0) {
|
if (lv->ref == 0) {
|
||||||
|
list_move(&lv->list, &lvc->cache_head);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lv->reflock);
|
mutex_unlock(&lv->reflock);
|
||||||
}
|
}
|
||||||
n = lvc->len;
|
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||||
if (!lbatviewcache_realloc(lvc, lvc->len * 2)) {
|
mutex_unlock(&lvc->lock);
|
||||||
printk(KERN_ERR "%s: realloc failed\n", __func__);
|
return NULL;
|
||||||
mutex_unlock(&lvc->lock);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
lv = lvc->cache[n];
|
|
||||||
mutex_lock(&lv->reflock);
|
|
||||||
|
|
||||||
found:
|
found:
|
||||||
mutex_unlock(&lvc->lock);
|
mutex_unlock(&lvc->lock);
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include <linux/dm-compress.h>
|
#include <linux/dm-compress.h>
|
||||||
|
|
||||||
struct lbd {
|
struct lbd {
|
||||||
|
struct list_head list;
|
||||||
u64 lblk;
|
u64 lblk;
|
||||||
struct mutex reflock;
|
struct mutex reflock;
|
||||||
unsigned int ref;
|
unsigned int ref;
|
||||||
|
@ -254,6 +255,7 @@ lbd_ctr(struct lbd* lbd,
|
||||||
u32 nr_pages = lblk_per_pblk(params);
|
u32 nr_pages = lblk_per_pblk(params);
|
||||||
|
|
||||||
memset(lbd, 0, sizeof(struct lbd));
|
memset(lbd, 0, sizeof(struct lbd));
|
||||||
|
INIT_LIST_HEAD(&lbd->list);
|
||||||
lbd->lblk = LBLK_NONE;
|
lbd->lblk = LBLK_NONE;
|
||||||
mutex_init(&lbd->reflock);
|
mutex_init(&lbd->reflock);
|
||||||
lbd->ref = 0;
|
lbd->ref = 0;
|
||||||
|
@ -507,8 +509,9 @@ struct lbdcache
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
void* percpu;
|
void* percpu;
|
||||||
struct lbatviewcache* lvc;
|
struct lbatviewcache* lvc;
|
||||||
unsigned int len;
|
struct list_head cache_head;
|
||||||
struct lbd** cache;
|
unsigned int cache_len;
|
||||||
|
struct lbd* cache;
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
|
@ -517,39 +520,6 @@ lbdcache_size(void)
|
||||||
return sizeof(struct lbdcache);
|
return sizeof(struct lbdcache);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
|
||||||
lbdcache_realloc(struct lbdcache* lc, unsigned int len)
|
|
||||||
{
|
|
||||||
struct lbd** cache;
|
|
||||||
unsigned int n;
|
|
||||||
struct lbd* lbd;
|
|
||||||
|
|
||||||
cache = kzalloc(len * sizeof(struct lbd*), GFP_KERNEL);
|
|
||||||
if (!cache) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
n = 0;
|
|
||||||
if (lc->len) {
|
|
||||||
memcpy(cache, lc->cache, lc->len * sizeof(struct lbd*));
|
|
||||||
n = lc->len;
|
|
||||||
kfree(lc->cache);
|
|
||||||
}
|
|
||||||
lc->len = len;
|
|
||||||
lc->cache = cache;
|
|
||||||
while (n < len) {
|
|
||||||
lbd = kzalloc(sizeof(struct lbd), GFP_KERNEL);
|
|
||||||
if (!lbd) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cache[n++] = lbd;
|
|
||||||
if (!lbd_ctr(lbd, lc->params, lc->lvc, lc->percpu)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
lbdcache_alloc_compress_state(void* percpu, const struct cbd_params* params, int cpu)
|
lbdcache_alloc_compress_state(void* percpu, const struct cbd_params* params, int cpu)
|
||||||
{
|
{
|
||||||
|
@ -624,9 +594,12 @@ lbdcache_free_compress_state(void* percpu, const struct cbd_params* params, int
|
||||||
|
|
||||||
bool
|
bool
|
||||||
lbdcache_ctr(struct lbdcache* lc,
|
lbdcache_ctr(struct lbdcache* lc,
|
||||||
struct cbd_params* params)
|
struct cbd_params* params, u32 cache_pages)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
struct lbd* cache;
|
||||||
|
u32 cache_len;
|
||||||
|
u32 n;
|
||||||
|
|
||||||
memset(lc, 0, sizeof(struct lbdcache));
|
memset(lc, 0, sizeof(struct lbdcache));
|
||||||
mutex_init(&lc->lock);
|
mutex_init(&lc->lock);
|
||||||
|
@ -641,11 +614,32 @@ lbdcache_ctr(struct lbdcache* lc,
|
||||||
if (!lc->lvc) {
|
if (!lc->lvc) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!lbatviewcache_ctr(lc->lvc, params)) {
|
if (!lbatviewcache_ctr(lc->lvc, params, cache_pages)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
return lbdcache_realloc(lc, 1);
|
/* lbdcache gets 1/2 of cache_pages */
|
||||||
|
cache_len = (cache_pages * 1 / 2) / lblk_per_pblk(params);
|
||||||
|
if (!cache_len) {
|
||||||
|
printk(KERN_ERR "%s: Cache too small\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
|
||||||
|
cache = kzalloc(cache_len * sizeof(struct lbd), GFP_KERNEL);
|
||||||
|
if (!cache) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
INIT_LIST_HEAD(&lc->cache_head);
|
||||||
|
lc->cache_len = cache_len;
|
||||||
|
lc->cache = cache;
|
||||||
|
for (n = 0; n < cache_len; ++n) {
|
||||||
|
if (!lbd_ctr(&cache[n], params, lc->lvc, lc->percpu)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
list_add_tail(&cache[n].list, &lc->cache_head);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -655,8 +649,8 @@ lbdcache_dtr(struct lbdcache* lc)
|
||||||
struct lbd* lbd;
|
struct lbd* lbd;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for (n = 0; n < lc->len; ++n) {
|
for (n = 0; n < lc->cache_len; ++n) {
|
||||||
lbd = lc->cache[n];
|
lbd = &lc->cache[n];
|
||||||
if (!lbd) {
|
if (!lbd) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -664,11 +658,11 @@ lbdcache_dtr(struct lbdcache* lc)
|
||||||
if (lbd->ref) {
|
if (lbd->ref) {
|
||||||
printk(KERN_ERR "%s: lbd ref leak: n=%u ref=%u\n", __func__, n, lbd->ref);
|
printk(KERN_ERR "%s: lbd ref leak: n=%u ref=%u\n", __func__, n, lbd->ref);
|
||||||
}
|
}
|
||||||
kfree(lbd);
|
|
||||||
}
|
}
|
||||||
kfree(lc->cache);
|
kfree(lc->cache);
|
||||||
lc->cache = NULL;
|
lc->cache = NULL;
|
||||||
lc->len = 0;
|
lc->cache_len = 0;
|
||||||
|
INIT_LIST_HEAD(&lc->cache_head);
|
||||||
lbatviewcache_dtr(lc->lvc);
|
lbatviewcache_dtr(lc->lvc);
|
||||||
kfree(lc->lvc);
|
kfree(lc->lvc);
|
||||||
lc->lvc = NULL;
|
lc->lvc = NULL;
|
||||||
|
@ -683,14 +677,13 @@ lbdcache_dtr(struct lbdcache* lc)
|
||||||
struct lbd*
|
struct lbd*
|
||||||
lbdcache_get(struct lbdcache* lc, u64 lblk)
|
lbdcache_get(struct lbdcache* lc, u64 lblk)
|
||||||
{
|
{
|
||||||
unsigned int n;
|
|
||||||
struct lbd* lbd;
|
struct lbd* lbd;
|
||||||
|
|
||||||
mutex_lock(&lc->lock);
|
mutex_lock(&lc->lock);
|
||||||
for (n = 0; n < lc->len; ++n) {
|
list_for_each_entry(lbd, &lc->cache_head, list) {
|
||||||
lbd = lc->cache[n];
|
|
||||||
mutex_lock(&lbd->reflock);
|
mutex_lock(&lbd->reflock);
|
||||||
if (lbd->lblk == lblk) {
|
if (lbd->lblk == lblk) {
|
||||||
|
list_move(&lbd->list, &lc->cache_head);
|
||||||
if (lbd->ref == 0) {
|
if (lbd->ref == 0) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
|
@ -699,32 +692,23 @@ lbdcache_get(struct lbdcache* lc, u64 lblk)
|
||||||
mutex_unlock(&lbd->reflock);
|
mutex_unlock(&lbd->reflock);
|
||||||
return lbd;
|
return lbd;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lbd->reflock);
|
|
||||||
}
|
|
||||||
for (n = 0; n < lc->len; ++n) {
|
|
||||||
lbd = lc->cache[n];
|
|
||||||
mutex_lock(&lbd->reflock);
|
|
||||||
if (lbd->lblk == LBLK_NONE) {
|
if (lbd->lblk == LBLK_NONE) {
|
||||||
|
list_move(&lbd->list, &lc->cache_head);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lbd->reflock);
|
mutex_unlock(&lbd->reflock);
|
||||||
}
|
}
|
||||||
for (n = 0; n < lc->len; ++n) {
|
list_for_each_entry_reverse(lbd, &lc->cache_head, list) {
|
||||||
lbd = lc->cache[n];
|
|
||||||
mutex_lock(&lbd->reflock);
|
mutex_lock(&lbd->reflock);
|
||||||
if (lbd->ref == 0 && !lbd_error(lbd)) {
|
if (lbd->ref == 0 && !lbd_error(lbd)) {
|
||||||
|
list_move(&lbd->list, &lc->cache_head);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&lbd->reflock);
|
mutex_unlock(&lbd->reflock);
|
||||||
}
|
}
|
||||||
n = lc->len;
|
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||||
if (!lbdcache_realloc(lc, lc->len * 2)) {
|
mutex_unlock(&lc->lock);
|
||||||
printk(KERN_ERR "%s: realloc failed\n", __func__);
|
return NULL;
|
||||||
mutex_unlock(&lc->lock);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
lbd = lc->cache[n];
|
|
||||||
mutex_lock(&lbd->reflock);
|
|
||||||
|
|
||||||
found:
|
found:
|
||||||
mutex_unlock(&lc->lock);
|
mutex_unlock(&lc->lock);
|
||||||
|
|
|
@ -30,12 +30,15 @@
|
||||||
#include <linux/dm-compress.h>
|
#include <linux/dm-compress.h>
|
||||||
|
|
||||||
struct pbat {
|
struct pbat {
|
||||||
|
struct list_head list;
|
||||||
u32 zone;
|
u32 zone;
|
||||||
struct mutex reflock;
|
struct mutex reflock;
|
||||||
unsigned int ref;
|
unsigned int ref;
|
||||||
|
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
|
bool full;
|
||||||
|
u32 last_alloc;
|
||||||
struct page** pagev;
|
struct page** pagev;
|
||||||
u8* buf;
|
u8* buf;
|
||||||
};
|
};
|
||||||
|
@ -47,11 +50,14 @@ pbat_ctr(struct pbat* pbat,
|
||||||
u32 nr_pages = pbat_len(params);
|
u32 nr_pages = pbat_len(params);
|
||||||
|
|
||||||
memset(pbat, 0, sizeof(struct pbat));
|
memset(pbat, 0, sizeof(struct pbat));
|
||||||
|
INIT_LIST_HEAD(&pbat->list);
|
||||||
pbat->zone = ZONE_NONE;
|
pbat->zone = ZONE_NONE;
|
||||||
mutex_init(&pbat->reflock);
|
mutex_init(&pbat->reflock);
|
||||||
pbat->ref = 0;
|
pbat->ref = 0;
|
||||||
mutex_init(&pbat->lock);
|
mutex_init(&pbat->lock);
|
||||||
pbat->params = params;
|
pbat->params = params;
|
||||||
|
pbat->full = false;
|
||||||
|
pbat->last_alloc = 0;
|
||||||
pbat->pagev = kzalloc(nr_pages * sizeof(struct page*), GFP_KERNEL);
|
pbat->pagev = kzalloc(nr_pages * sizeof(struct page*), GFP_KERNEL);
|
||||||
if (!pbat->pagev) {
|
if (!pbat->pagev) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -149,6 +155,8 @@ pbat_reset(struct pbat* pbat, u32 zone)
|
||||||
}
|
}
|
||||||
if (pbat->zone != zone) {
|
if (pbat->zone != zone) {
|
||||||
pbat->zone = zone;
|
pbat->zone = zone;
|
||||||
|
pbat->full = false;
|
||||||
|
pbat->last_alloc = 0;
|
||||||
ret = pbat_read(pbat);
|
ret = pbat_read(pbat);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,11 +184,17 @@ pbat_alloc(struct pbat* pbat)
|
||||||
u64 pblk;
|
u64 pblk;
|
||||||
|
|
||||||
mutex_lock(&pbat->lock);
|
mutex_lock(&pbat->lock);
|
||||||
idx = cbd_bitmap_alloc(pbat->buf, pblk_count);
|
if (pbat->full) {
|
||||||
if (idx == pblk_count) {
|
|
||||||
pblk = PBLK_NONE;
|
pblk = PBLK_NONE;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
idx = cbd_bitmap_alloc(pbat->buf, pblk_count, pbat->last_alloc);
|
||||||
|
if (idx == pblk_count) {
|
||||||
|
pbat->full = true;
|
||||||
|
pblk = PBLK_NONE;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
pbat->last_alloc = idx;
|
||||||
pblk = idx + zone_data_off(pbat->params, pbat->zone);
|
pblk = idx + zone_data_off(pbat->params, pbat->zone);
|
||||||
SetPageDirty(pbat->pagev[0]);
|
SetPageDirty(pbat->pagev[0]);
|
||||||
|
|
||||||
|
@ -206,6 +220,7 @@ pbat_free(struct pbat* pbat, u64 pblk)
|
||||||
BUG_ON(idx >= pblk_count);
|
BUG_ON(idx >= pblk_count);
|
||||||
mutex_lock(&pbat->lock);
|
mutex_lock(&pbat->lock);
|
||||||
cbd_bitmap_free(pbat->buf, idx);
|
cbd_bitmap_free(pbat->buf, idx);
|
||||||
|
pbat->full = false;
|
||||||
SetPageDirty(pbat->pagev[0]);
|
SetPageDirty(pbat->pagev[0]);
|
||||||
mutex_unlock(&pbat->lock);
|
mutex_unlock(&pbat->lock);
|
||||||
|
|
||||||
|
@ -215,8 +230,9 @@ pbat_free(struct pbat* pbat, u64 pblk)
|
||||||
struct pbatcache {
|
struct pbatcache {
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
struct cbd_params* params;
|
struct cbd_params* params;
|
||||||
unsigned int len;
|
struct list_head cache_head;
|
||||||
struct pbat** cache;
|
unsigned int cache_len;
|
||||||
|
struct pbat* cache;
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
|
@ -225,48 +241,40 @@ pbatcache_size(void)
|
||||||
return sizeof(struct pbatcache);
|
return sizeof(struct pbatcache);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
|
||||||
pbatcache_realloc(struct pbatcache* pc, unsigned int len)
|
|
||||||
{
|
|
||||||
struct pbat** cache;
|
|
||||||
unsigned int n;
|
|
||||||
struct pbat* pbat;
|
|
||||||
|
|
||||||
cache = kzalloc(len * sizeof(struct pbat*), GFP_KERNEL);
|
|
||||||
if (!cache) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
n = 0;
|
|
||||||
if (pc->len) {
|
|
||||||
memcpy(cache, pc->cache, pc->len * sizeof(struct pbat*));
|
|
||||||
n = pc->len;
|
|
||||||
kfree(pc->cache);
|
|
||||||
}
|
|
||||||
pc->len = len;
|
|
||||||
pc->cache = cache;
|
|
||||||
while (n < len) {
|
|
||||||
pbat = kmalloc(sizeof(struct pbat), GFP_KERNEL);
|
|
||||||
if (!pbat) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cache[n++] = pbat;
|
|
||||||
if (!pbat_ctr(pbat, pc->params)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
pbatcache_ctr(struct pbatcache* pc,
|
pbatcache_ctr(struct pbatcache* pc,
|
||||||
struct cbd_params* params)
|
struct cbd_params* params, u32 cache_pages)
|
||||||
{
|
{
|
||||||
|
struct pbat* cache;
|
||||||
|
u32 cache_len;
|
||||||
|
u32 n;
|
||||||
|
|
||||||
memset(pc, 0, sizeof(struct pbatcache));
|
memset(pc, 0, sizeof(struct pbatcache));
|
||||||
mutex_init(&pc->lock);
|
mutex_init(&pc->lock);
|
||||||
pc->params = params;
|
pc->params = params;
|
||||||
|
|
||||||
return pbatcache_realloc(pc, 1);
|
/* pbatcache gets 1/32 of cache_pages */
|
||||||
|
cache_len = (cache_pages * 1 / 32) / pbat_len(params);
|
||||||
|
if (!cache_len) {
|
||||||
|
printk(KERN_ERR "%s: Cache too small\n", __func__);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
|
||||||
|
cache = kzalloc(cache_len * sizeof(struct pbat), GFP_KERNEL);
|
||||||
|
if (!cache) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
INIT_LIST_HEAD(&pc->cache_head);
|
||||||
|
pc->cache_len = cache_len;
|
||||||
|
pc->cache = cache;
|
||||||
|
for (n = 0; n < cache_len; ++n) {
|
||||||
|
if (!pbat_ctr(&cache[n], pc->params)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
list_add_tail(&cache[n].list, &pc->cache_head);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -275,8 +283,8 @@ pbatcache_dtr(struct pbatcache* pc)
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
struct pbat* pbat;
|
struct pbat* pbat;
|
||||||
|
|
||||||
for (n = 0; n < pc->len; ++n) {
|
for (n = 0; n < pc->cache_len; ++n) {
|
||||||
pbat = pc->cache[n];
|
pbat = &pc->cache[n];
|
||||||
if (!pbat) {
|
if (!pbat) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -284,25 +292,24 @@ pbatcache_dtr(struct pbatcache* pc)
|
||||||
if (pbat->ref) {
|
if (pbat->ref) {
|
||||||
printk(KERN_ERR "%s: pbat ref leak: n=%u ref=%u\n", __func__, n, pbat->ref);
|
printk(KERN_ERR "%s: pbat ref leak: n=%u ref=%u\n", __func__, n, pbat->ref);
|
||||||
}
|
}
|
||||||
kfree(pbat);
|
|
||||||
}
|
}
|
||||||
kfree(pc->cache);
|
kfree(pc->cache);
|
||||||
pc->cache = NULL;
|
pc->cache = NULL;
|
||||||
pc->len = 0;
|
pc->cache_len = 0;
|
||||||
|
INIT_LIST_HEAD(&pc->cache_head);
|
||||||
pc->params = NULL;
|
pc->params = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct pbat*
|
struct pbat*
|
||||||
pbatcache_get(struct pbatcache* pc, u32 zone)
|
pbatcache_get(struct pbatcache* pc, u32 zone)
|
||||||
{
|
{
|
||||||
unsigned int n;
|
|
||||||
struct pbat* pbat;
|
struct pbat* pbat;
|
||||||
|
|
||||||
mutex_lock(&pc->lock);
|
mutex_lock(&pc->lock);
|
||||||
for (n = 0; n < pc->len; ++n) {
|
list_for_each_entry(pbat, &pc->cache_head, list) {
|
||||||
pbat = pc->cache[n];
|
|
||||||
mutex_lock(&pbat->reflock);
|
mutex_lock(&pbat->reflock);
|
||||||
if (pbat->zone == zone) {
|
if (pbat->zone == zone) {
|
||||||
|
list_move(&pbat->list, &pc->cache_head);
|
||||||
if (pbat->ref == 0) {
|
if (pbat->ref == 0) {
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
|
@ -311,33 +318,23 @@ pbatcache_get(struct pbatcache* pc, u32 zone)
|
||||||
mutex_unlock(&pbat->reflock);
|
mutex_unlock(&pbat->reflock);
|
||||||
return pbat;
|
return pbat;
|
||||||
}
|
}
|
||||||
mutex_unlock(&pbat->reflock);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (n = 0; n < pc->len; ++n) {
|
|
||||||
pbat = pc->cache[n];
|
|
||||||
mutex_lock(&pbat->reflock);
|
|
||||||
if (pbat->zone == ZONE_NONE) {
|
if (pbat->zone == ZONE_NONE) {
|
||||||
|
list_move(&pbat->list, &pc->cache_head);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&pbat->reflock);
|
mutex_unlock(&pbat->reflock);
|
||||||
}
|
}
|
||||||
for (n = 0; n < pc->len; ++n) {
|
list_for_each_entry_reverse(pbat, &pc->cache_head, list) {
|
||||||
pbat = pc->cache[n];
|
|
||||||
mutex_lock(&pbat->reflock);
|
mutex_lock(&pbat->reflock);
|
||||||
if (pbat->ref == 0 && !pbat_error(pbat)) {
|
if (pbat->ref == 0 && !pbat_error(pbat)) {
|
||||||
|
list_move(&pbat->list, &pc->cache_head);
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&pbat->reflock);
|
mutex_unlock(&pbat->reflock);
|
||||||
}
|
}
|
||||||
n = pc->len;
|
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||||
if (!pbatcache_realloc(pc, pc->len * 2)) {
|
mutex_unlock(&pc->lock);
|
||||||
printk(KERN_ERR "%s: realloc failed\n", __func__);
|
return NULL;
|
||||||
mutex_unlock(&pc->lock);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
pbat = pc->cache[n];
|
|
||||||
mutex_lock(&pbat->reflock);
|
|
||||||
|
|
||||||
found:
|
found:
|
||||||
mutex_unlock(&pc->lock);
|
mutex_unlock(&pc->lock);
|
||||||
|
|
|
@ -167,22 +167,33 @@ put64_le(u8** raw, u64 val)
|
||||||
|
|
||||||
/* XXX: Use kernel bit functions */
|
/* XXX: Use kernel bit functions */
|
||||||
static inline u32
|
static inline u32
|
||||||
cbd_bitmap_alloc(u8* buf, u32 bitsize)
|
cbd_bitmap_alloc(u8* buf, u32 bitsize, u32 hint)
|
||||||
{
|
{
|
||||||
u32 off = 0;
|
u32 off;
|
||||||
u32 bit = 0;
|
u32 bit;
|
||||||
|
|
||||||
for (off = 0; off < bitsize / BITS_PER_BYTE; ++off) {
|
for (off = hint / BITS_PER_BYTE; off < bitsize / BITS_PER_BYTE; ++off) {
|
||||||
if (buf[off] != 0xff) {
|
if (buf[off] != 0xff) {
|
||||||
|
bit = 0;
|
||||||
while (buf[off] & (1 << bit)) {
|
while (buf[off] & (1 << bit)) {
|
||||||
++bit;
|
++bit;
|
||||||
}
|
}
|
||||||
buf[off] |= (1 << bit);
|
buf[off] |= (1 << bit);
|
||||||
break;
|
return off * BITS_PER_BYTE + bit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (off = 0; off < hint / BITS_PER_BYTE; ++off) {
|
||||||
|
if (buf[off] != 0xff) {
|
||||||
|
bit = 0;
|
||||||
|
while (buf[off] & (1 << bit)) {
|
||||||
|
++bit;
|
||||||
|
}
|
||||||
|
buf[off] |= (1 << bit);
|
||||||
|
return off * BITS_PER_BYTE + bit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return off * BITS_PER_BYTE + bit;
|
return bitsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX: Use kernel bit functions */
|
/* XXX: Use kernel bit functions */
|
||||||
|
@ -498,7 +509,7 @@ int pbat_free(struct pbat* pbat, u64 pblk);
|
||||||
struct pbatcache;
|
struct pbatcache;
|
||||||
size_t pbatcache_size(void);
|
size_t pbatcache_size(void);
|
||||||
bool pbatcache_ctr(struct pbatcache* pbatcache,
|
bool pbatcache_ctr(struct pbatcache* pbatcache,
|
||||||
struct cbd_params* params);
|
struct cbd_params* params, u32 cache_pages);
|
||||||
void pbatcache_dtr(struct pbatcache* pbatcache);
|
void pbatcache_dtr(struct pbatcache* pbatcache);
|
||||||
struct pbat*
|
struct pbat*
|
||||||
pbatcache_get(struct pbatcache* pbatcache, u32 zone);
|
pbatcache_get(struct pbatcache* pbatcache, u32 zone);
|
||||||
|
@ -512,7 +523,7 @@ void lbatpage_put_buf(struct lbatpage* lp);
|
||||||
struct lbatpagecache;
|
struct lbatpagecache;
|
||||||
size_t lbatpagecache_size(void);
|
size_t lbatpagecache_size(void);
|
||||||
bool lbatpagecache_ctr(struct lbatpagecache* lpc,
|
bool lbatpagecache_ctr(struct lbatpagecache* lpc,
|
||||||
struct cbd_params* params);
|
struct cbd_params* params, u32 cache_pages);
|
||||||
void lbatpagecache_dtr(struct lbatpagecache* lpc);
|
void lbatpagecache_dtr(struct lbatpagecache* lpc);
|
||||||
struct lbatpage*
|
struct lbatpage*
|
||||||
lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk);
|
lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk);
|
||||||
|
@ -526,7 +537,7 @@ u64 lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx);
|
||||||
struct lbatviewcache;
|
struct lbatviewcache;
|
||||||
size_t lbatviewcache_size(void);
|
size_t lbatviewcache_size(void);
|
||||||
bool lbatviewcache_ctr(struct lbatviewcache* lvc,
|
bool lbatviewcache_ctr(struct lbatviewcache* lvc,
|
||||||
struct cbd_params* params);
|
struct cbd_params* params, u32 cache_pages);
|
||||||
void lbatviewcache_dtr(struct lbatviewcache* lvc);
|
void lbatviewcache_dtr(struct lbatviewcache* lvc);
|
||||||
struct lbatview*
|
struct lbatview*
|
||||||
lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk);
|
lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk);
|
||||||
|
@ -540,7 +551,7 @@ void lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf);
|
||||||
struct lbdcache;
|
struct lbdcache;
|
||||||
size_t lbdcache_size(void);
|
size_t lbdcache_size(void);
|
||||||
bool lbdcache_ctr(struct lbdcache* lc,
|
bool lbdcache_ctr(struct lbdcache* lc,
|
||||||
struct cbd_params* params);
|
struct cbd_params* params, u32 cache_pages);
|
||||||
void lbdcache_dtr(struct lbdcache* lc);
|
void lbdcache_dtr(struct lbdcache* lc);
|
||||||
struct lbd*
|
struct lbd*
|
||||||
lbdcache_get(struct lbdcache* lc, u64 lblk);
|
lbdcache_get(struct lbdcache* lc, u64 lblk);
|
||||||
|
|
Loading…
Reference in New Issue