Optimize lblk handling

This commit is contained in:
Tom Marshall 2019-10-09 14:41:13 -07:00
parent a5cc0f2e81
commit 7326fe2e14
1 changed files with 66 additions and 49 deletions

View File

@ -40,6 +40,7 @@
#define USE_WORKQUEUE 1
#define ZONE_NONE (u32)(~0)
#define PBLK_NONE (u64)(~0)
#define LBLK_NONE (u64)(~0)
/* per bio private data */
@ -69,13 +70,15 @@ struct dm_compress
void* pblk_alloc;
/* Currently cached zone lblk alloc info (if any) */
u32 lblk_alloc_idx;
u64 lblk_alloc_pblk;
u32 lblk_alloc_len;
void* lblk_alloc;
u64 lblk_alloc_elem_lblk;
struct lblk_alloc_elem* lblk_alloc_elem;
/* Currently cached lblk data (if any) */
u64 lblk_num;
bool lblk_dirty;
struct lblk_alloc_elem* lblk_alloc_elem;
void* lblk;
@ -339,54 +342,75 @@ pblk_alloc_put(struct dm_compress* dc, u64 pblk)
**************************************/
static int
lblk_alloc_write(struct dm_compress* dc)
lblk_alloc_elem_write(struct dm_compress* dc)
{
int ret;
u64 pblk;
u32 zone;
u32 zone_lblk;
u32 elem_off;
u32 elem_end;
u32 rel_pblk;
u32 count;
u64 pblk;
u8* buf;
if (!dc->lblk_alloc) {
printk(KERN_ERR "%s: lblk_alloc is NULL\n", __func__);
return -EINVAL;
BUG_ON(dc->lblk_alloc_elem_lblk == LBLK_NONE);
BUG_ON(dc->lblk_alloc_pblk == PBLK_NONE);
BUG_ON(dc->lblk_alloc_len == 0);
zone = dc->lblk_alloc_elem_lblk / dc->params.lblk_per_zone;
zone_lblk = dc->lblk_alloc_elem_lblk - (zone * dc->params.lblk_per_zone);
elem_off = lblk_alloc_elem_len(&dc->params) * zone_lblk;
elem_end = elem_off + lblk_alloc_elem_len(&dc->params);
rel_pblk = elem_off / PBLK_SIZE;
count = dc->lblk_alloc_len;
pblk = dc->lblk_alloc_pblk;
buf = dc->lblk_alloc + (elem_off - rel_pblk * PBLK_SIZE);
lblk_alloc_elem_put(&dc->params, buf, dc->lblk_alloc_elem);
ret = blkdev_pblk_write(dc->dev->bdev, pblk, count, dc->lblk_alloc);
return ret;
}
BUG_ON(dc->lblk_alloc_idx == ZONE_NONE);
pblk = lblk_alloc_off(&dc->params, dc->lblk_alloc_idx);
count = lblk_alloc_len(&dc->params);
static int
lblk_alloc_elem_read(struct dm_compress* dc, u64 lblk)
{
int ret;
u32 zone;
u32 zone_lblk;
u32 elem_off;
u32 elem_end;
u32 rel_pblk;
u32 count;
u64 pblk;
u8* buf;
ret = blkdev_pblk_write(dc->dev->bdev, pblk, count, dc->lblk_alloc);
if (dc->lblk_alloc_elem_lblk == lblk) {
return 0;
}
zone = lblk / dc->params.lblk_per_zone;
zone_lblk = lblk - (zone * dc->params.lblk_per_zone);
elem_off = lblk_alloc_elem_len(&dc->params) * zone_lblk;
elem_end = elem_off + lblk_alloc_elem_len(&dc->params);
rel_pblk = elem_off / PBLK_SIZE;
count = 1 + (elem_end - 1) / PBLK_SIZE - (elem_off / PBLK_SIZE);
pblk = lblk_alloc_off(&dc->params, zone) + rel_pblk;
if (dc->lblk_alloc_pblk != pblk || dc->lblk_alloc_len < count) {
ret = blkdev_pblk_read(dc->dev->bdev, pblk, count, dc->lblk_alloc);
if (ret != 0) {
return ret;
}
dc->lblk_alloc_pblk = pblk;
dc->lblk_alloc_len = count;
}
buf = dc->lblk_alloc + (elem_off - rel_pblk * PBLK_SIZE);
lblk_alloc_elem_get(&dc->params, buf, dc->lblk_alloc_elem);
dc->lblk_alloc_elem_lblk = lblk;
return 0;
}
/*
* XXX: Another opportunity to choose speed vs. space: only allocate two
* pages for lblk_alloc_elem buffer instead of the entire lblk_alloc.
*/
static int
lblk_alloc_read(struct dm_compress* dc, u32 idx)
{
int ret;
u64 pblk;
u32 count;
if (dc->lblk_alloc_idx == idx) {
return 0;
}
pblk = lblk_alloc_off(&dc->params, idx);
count = lblk_alloc_len(&dc->params);
ret = blkdev_pblk_read(dc->dev->bdev, pblk, count, dc->lblk_alloc);
if (ret == 0) {
dc->lblk_alloc_idx = idx;
}
return ret;
}
/**************************************
* Logical block functions
**************************************/
@ -452,15 +476,8 @@ lblk_write(struct dm_compress* dc)
zone_lblk = dc->lblk_num - (zone * dc->params.lblk_per_zone);
elem_buf = dc->lblk_alloc + zone_lblk * lblk_alloc_elem_len(&dc->params);
/*
* We must have dc->lblk_alloc and dc->lblk_alloc_elem cached by
* the previous lblk_read().
*/
if (dc->lblk_alloc_idx != zone) {
printk(KERN_ERR "*** lblk_alloc not cached: %lu vs %lu\n", (unsigned long)dc->lblk_alloc_idx, (unsigned long)zone);
return -EIO;
}
BUG_ON(dc->lblk_alloc_idx != zone);
/* We must have a cached lblk elem */
BUG_ON(dc->lblk_alloc_elem_lblk == LBLK_NONE);
d_len = PBLK_SIZE * lblk_per_pblk(&dc->params);
#ifdef CBD_DETECT_ZERO_BLOCKS
@ -512,10 +529,9 @@ lblk_write(struct dm_compress* dc)
}
}
lblk_alloc_elem_put(&dc->params, elem_buf, dc->lblk_alloc_elem);
ret = lblk_alloc_write(dc);
ret = lblk_alloc_elem_write(dc);
if (ret != 0) {
printk(KERN_ERR " lblk_alloc_write failed\n");
printk(KERN_ERR " lblk_alloc_elem_write failed\n");
return ret;
}
ret = pblk_alloc_flush(dc);
@ -566,12 +582,11 @@ lblk_read(struct dm_compress* dc, u64 idx)
zone_lblk = idx - (zone * dc->params.lblk_per_zone);
elem_buf = dc->lblk_alloc + zone_lblk * lblk_alloc_elem_len(&dc->params);
ret = lblk_alloc_read(dc, zone);
ret = lblk_alloc_elem_read(dc, idx);
if (ret != 0) {
printk(KERN_ERR " lblk_alloc_read failed\n");
printk(KERN_ERR " lblk_alloc_elem_read failed\n");
return ret;
}
lblk_alloc_elem_get(&dc->params, elem_buf, dc->lblk_alloc_elem);
c_len = dc->lblk_alloc_elem->len;
if (c_len == 0) {
@ -671,7 +686,7 @@ compress_free_buffers(struct dm_compress* dc)
kfree(dc->lblk_alloc_elem);
dc->lblk_alloc_elem = NULL;
compress_free_pages(dc->lblk_alloc, PBLK_SIZE * lblk_alloc_len(&dc->params));
compress_free_pages(dc->lblk_alloc, PBLK_SIZE * 2);
dc->lblk_alloc = NULL;
compress_free_pages(dc->pblk_alloc, PBLK_SIZE * pblk_alloc_len(&dc->params));
@ -709,19 +724,21 @@ compress_alloc_buffers(struct dm_compress* dc)
printk(KERN_ERR "%s: Failed to alloc pblk_alloc\n", __func__);
goto out_nomem;
}
dc->lblk_alloc_idx = ZONE_NONE;
dc->lblk_alloc = compress_alloc_pages(PBLK_SIZE * lblk_alloc_len(&dc->params));
dc->lblk_alloc_pblk = PBLK_NONE;
dc->lblk_alloc_len = 0;
dc->lblk_alloc = compress_alloc_pages(PBLK_SIZE * 2);
if (!dc->lblk_alloc) {
printk(KERN_ERR "%s: Failed to alloc lblk_alloc\n", __func__);
goto out_nomem;
}
dc->lblk_num = LBLK_NONE;
dc->lblk_dirty = false;
dc->lblk_alloc_elem_lblk = LBLK_NONE;
dc->lblk_alloc_elem = kmalloc(offsetof(struct lblk_alloc_elem, pblk[lblk_per_pblk(&dc->params)]), GFP_KERNEL);
if (!dc->lblk_alloc_elem) {
printk(KERN_ERR "%s: Failed to alloc lblk_alloc_elem\n", __func__);
goto out_nomem;
}
dc->lblk_num = LBLK_NONE;
dc->lblk_dirty = false;
dc->lblk = compress_alloc_pages(PBLK_SIZE * lblk_per_pblk(&dc->params));
if (!dc->lblk) {
printk(KERN_ERR "%s: Failed to alloc lblk\n", __func__);