Rearrange compress init

This commit is contained in:
Tom Marshall 2019-11-01 15:08:34 -07:00
parent ee7eacd4a6
commit 8e1630b08c
1 changed files with 60 additions and 67 deletions

View File

@ -56,7 +56,6 @@ struct compress
struct lbdcache* lc;
struct workqueue_struct* io_workq;
bool io_failed;
};
static inline u64
@ -76,14 +75,13 @@ dm_target_pblk_size(struct dm_target* ti)
**************************************/
static int
compress_open(struct compress* c, u64 dev_nr_pblks)
compress_read_header(struct compress* c)
{
int err;
int ret = 0;
struct page* pblkpage;
u8 *pblkbuf;
struct page* iopagev[1];
struct cbd_header header;
u64 max_nr_zones;
pblkpage = cbd_alloc_page();
if (!pblkpage) {
@ -94,76 +92,59 @@ compress_open(struct compress* c, u64 dev_nr_pblks)
memset(&header, 0, sizeof(header));
header.params.priv = c->dev->bdev;
err = pblk_read_wait(&header.params, 0, 1, iopagev);
if (err) {
ret = pblk_read_wait(&header.params, 0, 1, iopagev);
if (ret) {
printk(KERN_ERR "%s: failed to read header\n", __func__);
cbd_free_page(pblkpage);
return err;
goto out;
}
cbd_header_get(pblkbuf, &header);
cbd_free_page(pblkpage);
if (memcmp(header.magic, CBD_MAGIC, sizeof(header.magic)) != 0) {
printk(KERN_ERR "%s: bad magic\n", __func__);
err = -EINVAL;
ret = -EINVAL;
goto out;
}
if (header.version_major != CBD_VERSION_MAJOR) {
printk(KERN_ERR "%s: bad version\n", __func__);
err = -EINVAL;
ret = -EINVAL;
goto out;
}
if (header.version_minor != CBD_VERSION_MINOR) {
printk(KERN_ERR "%s: bad version\n", __func__);
err = -EINVAL;
ret = -EINVAL;
goto out;
}
if (header.params.algorithm == CBD_ALG_NONE ||
header.params.algorithm >= CBD_ALG_MAX) {
printk(KERN_ERR "%s: bad algorithm\n", __func__);
err = -EINVAL;
ret = -EINVAL;
goto out;
}
#ifndef COMPRESS_HAVE_LZ4
if (header.params.algorithm == CBD_ALG_LZ4) {
printk(KERN_ERR "%s: algorithm lz4 is not built into kernel\n", __func__);
err = -EINVAL;
ret = -EINVAL;
goto out;
}
#endif
#ifndef COMPRESS_HAVE_ZLIB
if (header.params.algorithm == CBD_ALG_ZLIB) {
printk(KERN_ERR "%s: algorithm zlib is not built into kernel\n", __func__);
err = -EINVAL;
ret = -EINVAL;
goto out;
}
#endif
if (header.params.compression < 1 || header.params.compression > 9) {
printk(KERN_ERR "%s: bad compression\n", __func__);
err = -EINVAL;
ret = -EINVAL;
goto out;
}
if (header.params.lblk_shift < LBLK_SHIFT_MIN ||
header.params.lblk_shift > LBLK_SHIFT_MAX) {
printk(KERN_ERR "%s: bad lblk_shift\n", __func__);
err = -EINVAL;
ret = -EINVAL;
goto out;
}
/* XXX: validate minumum pblk using zone_off(max_zone+1) */
if (header.params.nr_pblk > dev_nr_pblks) {
printk(KERN_ERR "%s: bad nr_pblk\n", __func__);
err = -EINVAL;
goto out;
}
max_nr_zones = zone_for_pblk(&header.params, dev_nr_pblks);
if (header.params.nr_zones > max_nr_zones) {
printk(KERN_ERR "%s: bad nr_zones\n", __func__);
err = -EINVAL;
goto out;
}
/* XXX: validate lblk_per_zone */
printk(KERN_INFO "%s: parameters...\n", __func__);
printk(KERN_INFO " algorithm=%hu\n", (unsigned short)header.params.algorithm);
@ -175,30 +156,9 @@ compress_open(struct compress* c, u64 dev_nr_pblks)
memcpy(&c->params, &header.params, sizeof(header.params));
c->lc = kmalloc(lbdcache_size(), GFP_KERNEL);
if (!c->lc) {
err = -ENOMEM;
printk(KERN_ERR "Failed to alloc lbdcache\n");
goto out;
}
if (!lbdcache_ctr(c->lc, &c->params)) {
err = -ENOMEM;
printk(KERN_ERR "Failed to init logical block cache\n");
goto out;
}
c->io_workq = alloc_workqueue("compress_io", WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
if (!c->io_workq) {
printk(KERN_ERR "%s: failed to alloc io_workq\n", __func__);
err = -ENOMEM;
goto out;
}
c->io_failed = false;
out:
/* XXX: cleanup on error */
return err;
cbd_free_page(pblkpage);
return ret;
}
static struct lbd*
@ -340,7 +300,7 @@ compress_io_work(struct work_struct* work)
static int
compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int err;
int ret;
unsigned int argn;
struct compress *c = NULL;
u64 dev_nr_pblks;
@ -394,22 +354,59 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (get_order(dev_nr_pblks) >= 48) {
ti->error = "Device too large";
kfree(c);
return -EINVAL;
ret = -EINVAL;
goto err;
}
ti->per_io_data_size = ALIGN(sizeof(struct compress_io), ARCH_KMALLOC_MINALIGN);
err = compress_open(c, dev_nr_pblks);
if (err) {
dm_put_device(ti, c->dev);
kfree(c);
return err;
ret = compress_read_header(c);
if (ret) {
goto err;
}
/* XXX: validate minumum pblk using zone_off(max_zone+1) */
if (c->params.nr_pblk > dev_nr_pblks) {
printk(KERN_ERR "%s: bad nr_pblk\n", __func__);
ret = -EINVAL;
goto err;
}
if (c->params.nr_zones > zone_for_pblk(&c->params, dev_nr_pblks)) {
printk(KERN_ERR "%s: bad nr_zones\n", __func__);
ret = -EINVAL;
goto err;
}
/* XXX: validate lblk_per_zone */
c->lc = kmalloc(lbdcache_size(), GFP_KERNEL);
if (!c->lc) {
printk(KERN_ERR "Failed to alloc lbdcache\n");
ret = -ENOMEM;
goto err;
}
if (!lbdcache_ctr(c->lc, &c->params)) {
printk(KERN_ERR "Failed to init logical block cache\n");
ret = -ENOMEM;
goto err;
}
c->io_workq = alloc_workqueue("compress_io", WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
if (!c->io_workq) {
printk(KERN_ERR "%s: failed to alloc io_workq\n", __func__);
ret = -ENOMEM;
goto err;
}
printk(KERN_INFO "%s: success\n", __func__);
return 0;
err:
dm_put_device(ti, c->dev);
kfree(c);
return ret;
}
static void
@ -435,10 +432,6 @@ compress_map(struct dm_target *ti, struct bio *bio)
struct compress *c = ti->private;
struct compress_io *cio;
if (c->io_failed) {
return DM_MAPIO_KILL;
}
/* from dm-crypt.c */
if (unlikely(bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)) {
bio_set_dev(bio, c->dev->bdev);