Implement async writes

This commit is contained in:
Tom Marshall 2019-10-10 09:23:34 -07:00
parent 26152ce1ec
commit 36dec597ac
1 changed files with 39 additions and 28 deletions

View File

@ -56,8 +56,6 @@ struct dm_compress
struct cbd_params params;
bool io_failed; /* XXX: remove when debugging complete */
/* XXX: dm_target.off */
sector_t dm_off;
@ -222,26 +220,28 @@ blkdev_pblk_read(struct block_device* dev, u64 pblk, u32 count, void *data)
return ret;
}
static int
static void
blkdev_pblk_write_endio(struct bio* bio)
{
void* data = page_address(bio->bi_io_vec[0].bv_page);
unsigned int count = bio->bi_max_vecs;
compress_free_pages(data, count);
bio_put(bio);
}
static void
blkdev_pblk_write(struct block_device* dev, u64 pblk, u32 count, void *data)
{
int ret;
struct bio* bio;
bio = blkdev_pblk_io_prepare(dev, REQ_OP_WRITE, pblk, count, data);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return -ENOMEM;
return;
}
bio->bi_end_io = blkdev_pblk_write_endio;
/* XXX: Make writes asychronous. */
ret = submit_bio_wait(bio);
if (ret != 0) {
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
}
bio_put(bio);
return ret;
submit_bio(bio);
}
/**************************************
@ -251,18 +251,20 @@ blkdev_pblk_write(struct block_device* dev, u64 pblk, u32 count, void *data)
static int
pblk_alloc_write(struct dm_compress* dc)
{
int ret;
u64 pblk;
u32 count;
void* pg;
BUG_ON(dc->pblk_alloc_idx == ZONE_NONE);
pblk = pblk_alloc_off(&dc->params, dc->pblk_alloc_idx);
count = pblk_alloc_len(&dc->params);
ret = blkdev_pblk_write(dc->dev->bdev, pblk, count, dc->pblk_alloc);
if (ret != 0) {
return ret;
pg = compress_alloc_pages(PBLK_SIZE);
if (!pg) {
return -ENOMEM;
}
memcpy(pg, dc->pblk_alloc, count * PBLK_SIZE);
blkdev_pblk_write(dc->dev->bdev, pblk, count, pg);
dc->pblk_alloc_dirty = false;
@ -394,7 +396,6 @@ pblk_alloc_put(struct dm_compress* dc, u64 pblk)
static int
lblk_alloc_elem_write(struct dm_compress* dc)
{
int ret;
u32 zone;
u32 zone_lblk;
u32 elem_off;
@ -403,6 +404,7 @@ lblk_alloc_elem_write(struct dm_compress* dc)
u32 count;
u64 pblk;
u8* buf;
void* pg;
BUG_ON(dc->lblk_alloc_elem_lblk == LBLK_NONE);
BUG_ON(dc->lblk_alloc_pblk == PBLK_NONE);
@ -417,9 +419,15 @@ lblk_alloc_elem_write(struct dm_compress* dc)
pblk = dc->lblk_alloc_pblk;
buf = dc->lblk_alloc + (elem_off - rel_pblk * PBLK_SIZE);
lblk_alloc_elem_put(&dc->params, buf, dc->lblk_alloc_elem);
ret = blkdev_pblk_write(dc->dev->bdev, pblk, count, dc->lblk_alloc);
return ret;
pg = compress_alloc_pages(count * PBLK_SIZE);
if (!pg) {
return -ENOMEM;
}
memcpy(pg, dc->lblk_alloc, count * PBLK_SIZE);
blkdev_pblk_write(dc->dev->bdev, pblk, count, pg);
return 0;
}
static int
@ -542,6 +550,10 @@ lblk_write(struct dm_compress* dc)
else {
c_len = lblk_compress(dc);
if (c_len > 0) {
size_t c_blkrem = c_len % PBLK_SIZE;
if (c_blkrem) {
memset(dc->lz4_cbuf + c_len, 0, c_blkrem);
}
c_buf = dc->lz4_cbuf;
dc->lblk_alloc_elem->len = c_len;
}
@ -554,6 +566,7 @@ lblk_write(struct dm_compress* dc)
for (n = 0; n < lblk_per_pblk(&dc->params); ++n) {
if (c_len > PBLK_SIZE * n) {
void* pg;
pblk = dc->lblk_alloc_elem->pblk[n];
if (!pblk) {
pblk = pblk_alloc_get(dc, zone);
@ -563,7 +576,12 @@ lblk_write(struct dm_compress* dc)
}
dc->lblk_alloc_elem->pblk[n] = pblk;
}
blkdev_pblk_write(dc->dev->bdev, pblk, 1, c_buf);
pg = compress_alloc_pages(PBLK_SIZE);
if (!pg) {
return -ENOMEM;
}
memcpy(pg, c_buf, PBLK_SIZE);
blkdev_pblk_write(dc->dev->bdev, pblk, 1, pg);
c_buf += PBLK_SIZE;
}
else {
@ -926,12 +944,6 @@ static void compress_io(struct dm_compress_io* io)
struct dm_compress* dc = io->dc;
struct bio* bio = io->bio;
if (dc->io_failed) {
bio->bi_status = BLK_STS_IOERR; /* XXX */
bio_endio(bio);
return;
}
mutex_lock(&dc->io_lock);
switch (bio_op(bio)) {
@ -947,7 +959,6 @@ static void compress_io(struct dm_compress_io* io)
}
if (ret) {
printk(KERN_ERR "%s: failed, ret=%d\n", __func__, ret);
dc->io_failed = true;
}
mutex_unlock(&dc->io_lock);