Defer releasing lbd until all I/O ops are done

This greatly increases performance.
This commit is contained in:
Tom Marshall 2019-11-04 14:37:32 -08:00
parent d7fb50911b
commit 3e81efb9f6
3 changed files with 87 additions and 25 deletions

View File

@ -57,6 +57,10 @@ struct compress
struct lbdcache* lc;
struct workqueue_struct* io_workq;
struct mutex lbd_lock;
void* __percpu lbd_percpu;
struct work_struct lbd_work;
};
static inline u64
@ -195,37 +199,68 @@ compress_write_header(struct compress* c)
return ret;
}
static struct lbd*
compress_lbdcache_swap(struct compress* c, u64 lblk, struct lbd* oldlbd)
static void
compress_flush(struct work_struct* work)
{
struct compress* c = container_of(work, struct compress, lbd_work);
int cpu;
struct lbd** lbdp;
mutex_lock(&c->lbd_lock);
for (cpu = 0; cpu < num_online_cpus(); ++cpu) {
lbdp = per_cpu_ptr(c->lbd_percpu, cpu);
lbdcache_put(c->lc, *lbdp, &c->stats); /* XXX: check error */
*lbdp = NULL;
}
mutex_unlock(&c->lbd_lock);
}
static struct lbd*
compress_get_lbd(struct compress* c)
{
int cpu;
struct lbd** lbdp;
struct lbd* lbd;
/* Get new data before putting old data to avoid flush */
lbd = lbdcache_get(c->lc, lblk);
if (!lbd) {
printk(KERN_ERR "%s: lbdcache_get failed\n", __func__);
lbdcache_put(c->lc, oldlbd, &c->stats);
return NULL;
}
if (lbdcache_put(c->lc, oldlbd, &c->stats) != 0) {
printk(KERN_ERR "%s: failed to put oldlbd\n", __func__);
lbdcache_put(c->lc, lbd, &c->stats);
return NULL;
}
mutex_lock(&c->lbd_lock);
cpu = get_cpu();
lbdp = per_cpu_ptr(c->lbd_percpu, cpu);
lbd = *lbdp;
*lbdp = NULL;
put_cpu();
mutex_unlock(&c->lbd_lock);
return lbd;
}
static void
compress_put_lbd(struct compress* c, struct lbd* lbd)
{
int cpu;
struct lbd** lbdp;
mutex_lock(&c->lbd_lock);
cpu = get_cpu();
lbdp = per_cpu_ptr(c->lbd_percpu, cpu);
lbdcache_put(c->lc, *lbdp, &c->stats); /* XXX: check error */
*lbdp = lbd;
put_cpu();
mutex_unlock(&c->lbd_lock);
}
static int
compress_read(struct compress *c, struct bio *bio)
{
struct lbd* lbd = NULL;
struct bio_vec bv;
struct bvec_iter iter;
int ret;
u32 lblk_per_sector = lblk_per_pblk(&c->params) * PBLK_PER_SECTOR;
u64 last_lblk = LBLK_NONE;
lbd = compress_get_lbd(c);
if (lbd) {
last_lblk = lbd_lblk(lbd);
}
bio_for_each_segment(bv, bio, iter) {
u64 lblk = iter.bi_sector / lblk_per_sector;
u32 lblk_off = (iter.bi_sector - lblk * lblk_per_sector) * SECTOR_SIZE;
@ -233,10 +268,12 @@ compress_read(struct compress *c, struct bio *bio)
char* data;
if (lblk != last_lblk) {
lbd = compress_lbdcache_swap(c, lblk, lbd);
if (!lbd) {
struct lbd* newlbd = lbdcache_get(c->lc, lblk);
lbdcache_put(c->lc, lbd, &c->stats);
if (!newlbd) {
return -EIO;
}
lbd = newlbd;
last_lblk = lblk;
}
@ -244,9 +281,10 @@ compress_read(struct compress *c, struct bio *bio)
lbd_data_read(lbd, lblk_off, bv.bv_len, data);
bvec_kunmap_irq(data, &flags);
}
ret = lbdcache_put(c->lc, lbd, &c->stats);
compress_put_lbd(c, lbd);
schedule_work(&c->lbd_work);
return ret;
return 0;
}
static int
@ -255,10 +293,13 @@ compress_write(struct compress *c, struct bio *bio)
struct lbd* lbd = NULL;
struct bio_vec bv;
struct bvec_iter iter;
int ret;
u32 lblk_per_sector = lblk_per_pblk(&c->params) * PBLK_PER_SECTOR;
u64 last_lblk = LBLK_NONE;
lbd = compress_get_lbd(c);
if (lbd) {
last_lblk = lbd_lblk(lbd);
}
bio_for_each_segment(bv, bio, iter) {
u64 lblk = iter.bi_sector / lblk_per_sector;
u32 lblk_off = (iter.bi_sector - lblk * lblk_per_sector) * SECTOR_SIZE;
@ -266,10 +307,12 @@ compress_write(struct compress *c, struct bio *bio)
char* data;
if (lblk != last_lblk) {
lbd = compress_lbdcache_swap(c, lblk, lbd);
if (!lbd) {
struct lbd* newlbd = lbdcache_get(c->lc, lblk);
lbdcache_put(c->lc, lbd, &c->stats);
if (!newlbd) {
return -EIO;
}
lbd = newlbd;
last_lblk = lblk;
}
@ -277,9 +320,10 @@ compress_write(struct compress *c, struct bio *bio)
lbd_data_write(lbd, lblk_off, bv.bv_len, data);
bvec_kunmap_irq(data, &flags);
}
ret = lbdcache_put(c->lc, lbd, &c->stats);
compress_put_lbd(c, lbd);
schedule_work(&c->lbd_work);
return ret;
return 0;
}
static void
@ -445,6 +489,10 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err;
}
mutex_init(&c->lbd_lock);
c->lbd_percpu = alloc_percpu(void*);
INIT_WORK(&c->lbd_work, compress_flush);
printk(KERN_INFO "%s: success\n", __func__);
return 0;
@ -460,6 +508,7 @@ compress_dtr(struct dm_target *ti)
{
int ret;
struct compress *c;
int cpu;
printk(KERN_INFO "%s: enter\n", __func__);
@ -471,6 +520,12 @@ compress_dtr(struct dm_target *ti)
printk(KERN_INFO "Warning: failed to write header\n");
}
}
cancel_work_sync(&c->lbd_work);
for (cpu = 0; cpu < num_online_cpus(); ++cpu) {
struct lbd** lbdp = per_cpu_ptr(c->lbd_percpu, cpu);
lbdcache_put(c->lc, *lbdp, &c->stats);
}
free_percpu(c->lbd_percpu);
lbdcache_dtr(c->lc);
kfree(c->lc);
if (c->io_workq) {

View File

@ -468,6 +468,12 @@ out:
return ret;
}
u64
lbd_lblk(struct lbd* lbd)
{
return lbd->lblk;
}
void
lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf)
{
@ -639,7 +645,7 @@ lbdcache_ctr(struct lbdcache* lc,
return false;
}
return lbdcache_realloc(lc, 1024);
return lbdcache_realloc(lc, 1);
}
void

View File

@ -533,6 +533,7 @@ struct lbatview*
int lbatviewcache_put(struct lbatviewcache* lvc, struct lbatview* lbv);
struct lbd;
u64 lbd_lblk(struct lbd* lbd);
void lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf);
void lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf);