Flush lbd on a timer in lbdcache instead of the main module
This commit is contained in:
parent
68956f70e8
commit
bd51b7cb89
|
@ -57,10 +57,6 @@ struct compress
|
|||
struct lbdcache* lc;
|
||||
|
||||
struct workqueue_struct* io_workq;
|
||||
|
||||
struct mutex lbd_lock;
|
||||
void* __percpu lbd_percpu;
|
||||
struct work_struct lbd_work;
|
||||
};
|
||||
|
||||
static inline u64
|
||||
|
@ -199,55 +195,6 @@ compress_write_header(struct compress* c)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
compress_flush(struct work_struct* work)
|
||||
{
|
||||
struct compress* c = container_of(work, struct compress, lbd_work);
|
||||
int cpu;
|
||||
struct lbd** lbdp;
|
||||
|
||||
mutex_lock(&c->lbd_lock);
|
||||
for (cpu = 0; cpu < num_online_cpus(); ++cpu) {
|
||||
lbdp = per_cpu_ptr(c->lbd_percpu, cpu);
|
||||
lbdcache_put(c->lc, *lbdp, &c->stats); /* XXX: check error */
|
||||
*lbdp = NULL;
|
||||
}
|
||||
mutex_unlock(&c->lbd_lock);
|
||||
}
|
||||
|
||||
static struct lbd*
|
||||
compress_get_lbd(struct compress* c)
|
||||
{
|
||||
int cpu;
|
||||
struct lbd** lbdp;
|
||||
struct lbd* lbd;
|
||||
|
||||
mutex_lock(&c->lbd_lock);
|
||||
cpu = get_cpu();
|
||||
lbdp = per_cpu_ptr(c->lbd_percpu, cpu);
|
||||
lbd = *lbdp;
|
||||
*lbdp = NULL;
|
||||
put_cpu();
|
||||
mutex_unlock(&c->lbd_lock);
|
||||
|
||||
return lbd;
|
||||
}
|
||||
|
||||
static void
|
||||
compress_put_lbd(struct compress* c, struct lbd* lbd)
|
||||
{
|
||||
int cpu;
|
||||
struct lbd** lbdp;
|
||||
|
||||
mutex_lock(&c->lbd_lock);
|
||||
cpu = get_cpu();
|
||||
lbdp = per_cpu_ptr(c->lbd_percpu, cpu);
|
||||
lbdcache_put(c->lc, *lbdp, &c->stats); /* XXX: check error */
|
||||
*lbdp = lbd;
|
||||
put_cpu();
|
||||
mutex_unlock(&c->lbd_lock);
|
||||
}
|
||||
|
||||
static int
|
||||
compress_read(struct compress *c, struct bio *bio)
|
||||
{
|
||||
|
@ -255,34 +202,22 @@ compress_read(struct compress *c, struct bio *bio)
|
|||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
u32 lblk_per_sector = lblk_per_pblk(&c->params) * PBLK_PER_SECTOR;
|
||||
u64 last_lblk = LBLK_NONE;
|
||||
|
||||
lbd = compress_get_lbd(c);
|
||||
if (lbd) {
|
||||
last_lblk = lbd_lblk(lbd);
|
||||
}
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
u64 lblk = iter.bi_sector / lblk_per_sector;
|
||||
u32 lblk_off = (iter.bi_sector - lblk * lblk_per_sector) * SECTOR_SIZE;
|
||||
unsigned long flags;
|
||||
char* data;
|
||||
|
||||
if (lblk != last_lblk) {
|
||||
struct lbd* newlbd = lbdcache_get(c->lc, lblk);
|
||||
lbdcache_put(c->lc, lbd, &c->stats);
|
||||
if (!newlbd) {
|
||||
lbd = lbdcache_get(c->lc, lblk);
|
||||
if (!lbd) {
|
||||
return -EIO;
|
||||
}
|
||||
lbd = newlbd;
|
||||
last_lblk = lblk;
|
||||
}
|
||||
|
||||
data = bvec_kmap_irq(&bv, &flags);
|
||||
lbd_data_read(lbd, lblk_off, bv.bv_len, data);
|
||||
bvec_kunmap_irq(data, &flags);
|
||||
lbdcache_put(c->lc, lbd);
|
||||
}
|
||||
compress_put_lbd(c, lbd);
|
||||
schedule_work(&c->lbd_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -294,34 +229,22 @@ compress_write(struct compress *c, struct bio *bio)
|
|||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
u32 lblk_per_sector = lblk_per_pblk(&c->params) * PBLK_PER_SECTOR;
|
||||
u64 last_lblk = LBLK_NONE;
|
||||
|
||||
lbd = compress_get_lbd(c);
|
||||
if (lbd) {
|
||||
last_lblk = lbd_lblk(lbd);
|
||||
}
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
u64 lblk = iter.bi_sector / lblk_per_sector;
|
||||
u32 lblk_off = (iter.bi_sector - lblk * lblk_per_sector) * SECTOR_SIZE;
|
||||
unsigned long flags;
|
||||
char* data;
|
||||
|
||||
if (lblk != last_lblk) {
|
||||
struct lbd* newlbd = lbdcache_get(c->lc, lblk);
|
||||
lbdcache_put(c->lc, lbd, &c->stats);
|
||||
if (!newlbd) {
|
||||
lbd = lbdcache_get(c->lc, lblk);
|
||||
if (!lbd) {
|
||||
return -EIO;
|
||||
}
|
||||
lbd = newlbd;
|
||||
last_lblk = lblk;
|
||||
}
|
||||
|
||||
data = bvec_kmap_irq(&bv, &flags);
|
||||
lbd_data_write(lbd, lblk_off, bv.bv_len, data);
|
||||
bvec_kunmap_irq(data, &flags);
|
||||
lbdcache_put(c->lc, lbd);
|
||||
}
|
||||
compress_put_lbd(c, lbd);
|
||||
schedule_work(&c->lbd_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -498,7 +421,7 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
if (!lbdcache_ctr(c->lc, &c->params, cache_pages)) {
|
||||
if (!lbdcache_ctr(c->lc, &c->params, &c->stats, cache_pages)) {
|
||||
printk(KERN_ERR "Failed to init logical block cache\n");
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
@ -511,10 +434,6 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
goto err;
|
||||
}
|
||||
|
||||
mutex_init(&c->lbd_lock);
|
||||
c->lbd_percpu = alloc_percpu(void*);
|
||||
INIT_WORK(&c->lbd_work, compress_flush);
|
||||
|
||||
printk(KERN_INFO "%s: success\n", __func__);
|
||||
|
||||
return 0;
|
||||
|
@ -530,7 +449,6 @@ compress_dtr(struct dm_target *ti)
|
|||
{
|
||||
int ret;
|
||||
struct compress *c;
|
||||
int cpu;
|
||||
|
||||
printk(KERN_INFO "%s: enter\n", __func__);
|
||||
|
||||
|
@ -542,12 +460,6 @@ compress_dtr(struct dm_target *ti)
|
|||
printk(KERN_INFO "Warning: failed to write header\n");
|
||||
}
|
||||
}
|
||||
cancel_work_sync(&c->lbd_work);
|
||||
for (cpu = 0; cpu < num_online_cpus(); ++cpu) {
|
||||
struct lbd** lbdp = per_cpu_ptr(c->lbd_percpu, cpu);
|
||||
lbdcache_put(c->lc, *lbdp, &c->stats);
|
||||
}
|
||||
free_percpu(c->lbd_percpu);
|
||||
lbdcache_dtr(c->lc);
|
||||
kfree(c->lc);
|
||||
if (c->io_workq) {
|
||||
|
|
|
@ -31,7 +31,9 @@
|
|||
#include <linux/dm-compress.h>
|
||||
|
||||
struct lbd {
|
||||
struct list_head list;
|
||||
struct list_head lru_list;
|
||||
struct list_head flush_list;
|
||||
unsigned long flush_jiffies;
|
||||
u64 lblk;
|
||||
struct mutex reflock;
|
||||
unsigned int ref;
|
||||
|
@ -255,7 +257,9 @@ lbd_ctr(struct lbd* lbd,
|
|||
u32 nr_pages = lblk_per_pblk(params);
|
||||
|
||||
memset(lbd, 0, sizeof(struct lbd));
|
||||
INIT_LIST_HEAD(&lbd->list);
|
||||
INIT_LIST_HEAD(&lbd->lru_list);
|
||||
INIT_LIST_HEAD(&lbd->flush_list);
|
||||
lbd->flush_jiffies = 0;
|
||||
lbd->lblk = LBLK_NONE;
|
||||
mutex_init(&lbd->reflock);
|
||||
lbd->ref = 0;
|
||||
|
@ -507,13 +511,18 @@ struct lbdcache
|
|||
{
|
||||
struct mutex lock;
|
||||
struct cbd_params* params;
|
||||
struct cbd_stats* stats;
|
||||
void* percpu;
|
||||
struct lbatviewcache* lvc;
|
||||
struct list_head cache_head;
|
||||
unsigned int cache_len;
|
||||
struct lbd* cache;
|
||||
struct delayed_work flush_dwork;
|
||||
struct list_head flush_head;
|
||||
};
|
||||
|
||||
static void lbdcache_flush(struct work_struct*);
|
||||
|
||||
size_t
|
||||
lbdcache_size(void)
|
||||
{
|
||||
|
@ -594,7 +603,8 @@ lbdcache_free_compress_state(void* percpu, const struct cbd_params* params, int
|
|||
|
||||
bool
|
||||
lbdcache_ctr(struct lbdcache* lc,
|
||||
struct cbd_params* params, u32 cache_pages)
|
||||
struct cbd_params* params, struct cbd_stats* stats,
|
||||
u32 cache_pages)
|
||||
{
|
||||
int cpu;
|
||||
struct lbd* cache;
|
||||
|
@ -604,6 +614,7 @@ lbdcache_ctr(struct lbdcache* lc,
|
|||
memset(lc, 0, sizeof(struct lbdcache));
|
||||
mutex_init(&lc->lock);
|
||||
lc->params = params;
|
||||
lc->stats = stats;
|
||||
lc->percpu = alloc_percpu(void*);
|
||||
for (cpu = 0; cpu < num_online_cpus(); ++cpu) {
|
||||
if (!lbdcache_alloc_compress_state(lc->percpu, params, cpu)) {
|
||||
|
@ -636,8 +647,10 @@ lbdcache_ctr(struct lbdcache* lc,
|
|||
if (!lbd_ctr(&cache[n], params, lc->lvc, lc->percpu)) {
|
||||
return false;
|
||||
}
|
||||
list_add_tail(&cache[n].list, &lc->cache_head);
|
||||
list_add_tail(&cache[n].lru_list, &lc->cache_head);
|
||||
}
|
||||
INIT_DELAYED_WORK(&lc->flush_dwork, lbdcache_flush);
|
||||
INIT_LIST_HEAD(&lc->flush_head);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -645,10 +658,19 @@ lbdcache_ctr(struct lbdcache* lc,
|
|||
void
|
||||
lbdcache_dtr(struct lbdcache* lc)
|
||||
{
|
||||
int ret;
|
||||
unsigned int n;
|
||||
struct lbd* lbd;
|
||||
int cpu;
|
||||
|
||||
cancel_delayed_work_sync(&lc->flush_dwork);
|
||||
flush_delayed_work(&lc->flush_dwork);
|
||||
list_for_each_entry(lbd, &lc->flush_head, flush_list) {
|
||||
ret = lbd_flush(lbd, lc->stats);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
|
||||
}
|
||||
}
|
||||
for (n = 0; n < lc->cache_len; ++n) {
|
||||
lbd = &lc->cache[n];
|
||||
if (!lbd) {
|
||||
|
@ -674,38 +696,92 @@ lbdcache_dtr(struct lbdcache* lc)
|
|||
lc->params = NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
lbdcache_flush(struct work_struct* work)
|
||||
{
|
||||
struct lbdcache* lc = container_of(work, struct lbdcache, flush_dwork.work);
|
||||
unsigned long now = jiffies;
|
||||
int ret;
|
||||
struct lbd* lbd;
|
||||
|
||||
mutex_lock(&lc->lock);
|
||||
while (!list_empty(&lc->flush_head)) {
|
||||
lbd = list_first_entry(&lc->flush_head, struct lbd, flush_list);
|
||||
mutex_lock(&lbd->reflock);
|
||||
BUG_ON(lbd->ref != 1);
|
||||
if (lbd->flush_jiffies > now) {
|
||||
mutex_unlock(&lbd->reflock);
|
||||
break;
|
||||
}
|
||||
list_del_init(&lbd->flush_list);
|
||||
lbd->ref = 0;
|
||||
ret = lbd_flush(lbd, lc->stats);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
|
||||
}
|
||||
mutex_unlock(&lbd->reflock);
|
||||
}
|
||||
if (!list_empty(&lc->flush_head)) {
|
||||
schedule_delayed_work(&lc->flush_dwork, COMPRESS_FLUSH_DELAY);
|
||||
}
|
||||
mutex_unlock(&lc->lock);
|
||||
}
|
||||
|
||||
struct lbd*
|
||||
lbdcache_get(struct lbdcache* lc, u64 lblk)
|
||||
{
|
||||
struct lbd* lbd;
|
||||
|
||||
mutex_lock(&lc->lock);
|
||||
list_for_each_entry(lbd, &lc->cache_head, list) {
|
||||
list_for_each_entry(lbd, &lc->cache_head, lru_list) {
|
||||
mutex_lock(&lbd->reflock);
|
||||
if (lbd->lblk == lblk) {
|
||||
list_move(&lbd->list, &lc->cache_head);
|
||||
list_move(&lbd->lru_list, &lc->cache_head);
|
||||
if (lbd->ref == 0) {
|
||||
goto found;
|
||||
}
|
||||
if (lbd->ref == 1 && !list_empty(&lc->flush_head)) {
|
||||
struct lbd* entry;
|
||||
list_for_each_entry(entry, &lc->flush_head, flush_list) {
|
||||
if (entry == lbd) {
|
||||
list_del_init(&lbd->flush_list);
|
||||
lbd->ref = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&lc->lock);
|
||||
++lbd->ref;
|
||||
mutex_unlock(&lbd->reflock);
|
||||
return lbd;
|
||||
}
|
||||
if (lbd->lblk == LBLK_NONE) {
|
||||
list_move(&lbd->list, &lc->cache_head);
|
||||
list_move(&lbd->lru_list, &lc->cache_head);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lbd->reflock);
|
||||
}
|
||||
list_for_each_entry_reverse(lbd, &lc->cache_head, list) {
|
||||
list_for_each_entry_reverse(lbd, &lc->cache_head, lru_list) {
|
||||
mutex_lock(&lbd->reflock);
|
||||
if (lbd->ref == 0 && !lbd_error(lbd)) {
|
||||
list_move(&lbd->list, &lc->cache_head);
|
||||
list_move(&lbd->lru_list, &lc->cache_head);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lbd->reflock);
|
||||
}
|
||||
if (!list_empty(&lc->flush_head)) {
|
||||
int ret;
|
||||
lbd = list_first_entry(&lc->flush_head, struct lbd, flush_list);
|
||||
mutex_lock(&lbd->reflock);
|
||||
BUG_ON(lbd->ref != 1);
|
||||
list_del_init(&lbd->flush_list);
|
||||
lbd->ref = 0;
|
||||
ret = lbd_flush(lbd, lc->stats);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
|
||||
}
|
||||
goto found;
|
||||
}
|
||||
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||
mutex_unlock(&lc->lock);
|
||||
return NULL;
|
||||
|
@ -723,21 +799,26 @@ found:
|
|||
}
|
||||
|
||||
int
|
||||
lbdcache_put(struct lbdcache* lc, struct lbd* lbd, struct cbd_stats* stats)
|
||||
lbdcache_put(struct lbdcache* lc, struct lbd* lbd)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!lbd) {
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&lc->lock);
|
||||
mutex_lock(&lbd->reflock);
|
||||
if (--lbd->ref == 0) {
|
||||
ret = lbd_flush(lbd, stats);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
|
||||
lbd->flush_jiffies = jiffies + COMPRESS_FLUSH_DELAY;
|
||||
lbd->ref = 1;
|
||||
list_add_tail(&lbd->flush_list, &lc->flush_head);
|
||||
/* This is racy, but it does not matter. */
|
||||
if (!delayed_work_pending(&lc->flush_dwork)) {
|
||||
schedule_delayed_work(&lc->flush_dwork, COMPRESS_FLUSH_DELAY);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&lbd->reflock);
|
||||
mutex_unlock(&lc->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -506,6 +506,8 @@ lba_put(const struct cbd_params* params,
|
|||
#define COMPRESS_HAVE_ZLIB 1
|
||||
#endif
|
||||
|
||||
#define COMPRESS_FLUSH_DELAY (HZ / 10)
|
||||
|
||||
typedef void (*pblk_endio_t)(struct bio*);
|
||||
|
||||
/* Single page allocator */
|
||||
|
@ -578,11 +580,12 @@ void lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf);
|
|||
struct lbdcache;
|
||||
size_t lbdcache_size(void);
|
||||
bool lbdcache_ctr(struct lbdcache* lc,
|
||||
struct cbd_params* params, u32 cache_pages);
|
||||
struct cbd_params* params, struct cbd_stats* stats,
|
||||
u32 cache_pages);
|
||||
void lbdcache_dtr(struct lbdcache* lc);
|
||||
struct lbd*
|
||||
lbdcache_get(struct lbdcache* lc, u64 lblk);
|
||||
int lbdcache_put(struct lbdcache* lc, struct lbd* lbd, struct cbd_stats* stats);
|
||||
int lbdcache_put(struct lbdcache* lc, struct lbd* lbd);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue