Clean up compress_ctr and compress_dtr

Always set dm_target.error on initialization error.

Always cleanup properly on error.
This commit is contained in:
Tom Marshall 2019-11-14 15:06:36 +01:00
parent 2df4071304
commit bfecc33c54
1 changed files with 46 additions and 26 deletions

View File

@ -75,7 +75,7 @@ dm_target_pblk_size(struct dm_target* ti, struct cbd_params* params)
**************************************/
static int
compress_read_header(struct compress* c)
compress_read_header(struct compress* c, char** errorp)
{
int ret = 0;
struct page* page;
@ -84,85 +84,92 @@ compress_read_header(struct compress* c)
page = cbd_alloc_page();
if (!page) {
*errorp = "Out of memory";
return -ENOMEM;
}
buf = page_address(page);
ret = pblk_read_wait(&c->kparams, 0, 1, page);
if (ret) {
printk(KERN_ERR "%s: failed to read header\n", __func__);
*errorp = "Header: failed to read";
goto out;
}
memset(&header, 0, sizeof(header));
cbd_header_get(buf, &header);
if (memcmp(header.magic, CBD_MAGIC, sizeof(header.magic)) != 0) {
printk(KERN_ERR "%s: bad magic\n", __func__);
*errorp = "Header: bad magic";
ret = -EINVAL;
goto out;
}
if (header.version_major != CBD_VERSION_MAJOR) {
printk(KERN_ERR "%s: bad version\n", __func__);
*errorp = "Header: unsupported major version";
ret = -EINVAL;
goto out;
}
if (header.version_minor != CBD_VERSION_MINOR) {
printk(KERN_ERR "%s: bad version\n", __func__);
*errorp = "Header: unsupported minor version";
ret = -EINVAL;
goto out;
}
if (cbd_compression_alg_get(&header.params) == CBD_ALG_NONE ||
cbd_compression_alg_get(&header.params) >= CBD_ALG_MAX) {
printk(KERN_ERR "%s: bad algorithm\n", __func__);
*errorp = "Header: unknown compression algorithm";
ret = -EINVAL;
goto out;
}
#ifndef COMPRESS_HAVE_LZ4
if (cbd_compression_alg_get(&header.params) == CBD_ALG_LZ4) {
printk(KERN_ERR "%s: algorithm lz4 is not built into kernel\n", __func__);
*errorp = "Header: compression algorithm lz4 is not built into kernel";
ret = -EINVAL;
goto out;
}
#endif
#ifndef COMPRESS_HAVE_ZLIB
if (cbd_compression_alg_get(&header.params) == CBD_ALG_ZLIB) {
printk(KERN_ERR "%s: algorithm zlib is not built into kernel\n", __func__);
*errorp = "Header: compression algorithm zlib is not built into kernel";
ret = -EINVAL;
goto out;
}
#endif
if (cbd_compression_level_get(&header.params) < 1 ||
cbd_compression_level_get(&header.params) > 9) {
printk(KERN_ERR "%s: bad compression\n", __func__);
*errorp = "Header: compression level out of bounds";
ret = -EINVAL;
goto out;
}
if (header.params.pblk_shift < PBLK_SHIFT_MIN ||
header.params.pblk_shift > PBLK_SHIFT_MAX) {
printk(KERN_ERR "%s: bad pblk_shift\n", __func__);
*errorp = "Header: pblk_shift out of bounds";
ret = -EINVAL;
goto out;
}
if (header.params.lblk_shift < LBLK_SHIFT_MIN ||
header.params.lblk_shift > LBLK_SHIFT_MAX) {
printk(KERN_ERR "%s: bad lblk_shift\n", __func__);
*errorp = "Header: lblk_shift out of bounds";
ret = -EINVAL;
goto out;
}
if (header.params.lba_elem_pblk_bytes != 2 &&
header.params.lba_elem_pblk_bytes != 4 &&
header.params.lba_elem_pblk_bytes != 6) {
printk(KERN_ERR "%s: bad lba_elem_pblk_bytes\n", __func__);
*errorp = "Header: lba_elem_pblk_bytes out of bounds";
ret = -EINVAL;
goto out;
}
if (header.params.pbat_shift < LBLK_SHIFT_MIN ||
header.params.pbat_shift > LBLK_SHIFT_MAX) {
*errorp = "Header: pbat_shift out of bounds";
ret = -EINVAL;
goto out;
}
if (pbat_len(&header.params) * pblk_size(&header.params) > PAGE_SIZE) {
printk(KERN_ERR "%s: pbat size too large\n", __func__);
*errorp = "Header: pbat size too large";
ret = -EINVAL;
goto out;
}
if (lba_len(&header.params) > pblk_size(&header.params)) {
printk(KERN_ERR "%s: lba elem size too large\n", __func__);
*errorp = "Header: lba elem size too large";
ret = -EINVAL;
goto out;
}
@ -459,6 +466,21 @@ compress_unregister_sysfs(struct compress* c)
kobject_del(&c->kobj);
}
static void
__compress_dtr(struct compress* c)
{
printk(KERN_INFO "%s: enter\n", __func__);
if (c->lc) {
lbdcache_dtr(c->lc);
kfree(c->lc);
}
if (c->io_workq) {
destroy_workqueue(c->io_workq);
}
compress_unregister_sysfs(c);
}
/*
* Usage:
* echo "<start_sector> <end_sector> compress <backing_device> <args...>" | dmsetup create <compress_name>
@ -513,7 +535,7 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
c = kzalloc(sizeof(struct compress), GFP_KERNEL);
if (!c) {
ti->error = "Failed to allocate target";
ti->error = "Out of memory";
return -ENOMEM;
}
@ -534,8 +556,9 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
c->kparams.dev = c->dev->bdev;
mutex_init(&c->kstats.lock);
ret = compress_read_header(c);
ret = compress_read_header(c, &ti->error);
if (ret) {
/* ti->error already set */
goto err;
}
target_nr_pblks = dm_target_pblk_size(ti, &c->kparams.params);
@ -564,12 +587,13 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ret = compress_write_header(c);
c->kparams.params.flags = save_flags;
if (ret) {
ti->error = "Failed to write header";
goto err;
}
}
if (target_nr_pblks < zone_off(&c->kparams.params, c->kparams.params.nr_zones)) {
printk(KERN_ERR "%s: Device too small\n", __func__);
ti->error = "Device too small";
ret = -EINVAL;
goto err;
}
@ -578,19 +602,19 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
c->lc = kmalloc(lbdcache_size(), GFP_KERNEL);
if (!c->lc) {
printk(KERN_ERR "Failed to alloc lbdcache\n");
ti->error = "Out of memory";
ret = -ENOMEM;
goto err;
}
if (!lbdcache_ctr(c->lc, &c->kparams, &c->kstats, cache_pages)) {
printk(KERN_ERR "Failed to init logical block cache\n");
ti->error = "Failed to init logical block cache";
ret = -ENOMEM;
goto err;
}
c->io_workq = alloc_workqueue("compress_io", WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
if (!c->io_workq) {
printk(KERN_ERR "%s: failed to alloc io_workq\n", __func__);
ti->error = "Failed to alloc io_workq";
ret = -ENOMEM;
goto err;
}
@ -600,6 +624,7 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return 0;
err:
__compress_dtr(c);
dm_put_device(ti, c->dev);
kfree(c);
return ret;
@ -621,12 +646,7 @@ compress_dtr(struct dm_target *ti)
printk(KERN_INFO "Warning: failed to write header\n");
}
}
lbdcache_dtr(c->lc);
kfree(c->lc);
if (c->io_workq) {
destroy_workqueue(c->io_workq);
}
compress_unregister_sysfs(c);
__compress_dtr(c);
dm_put_device(ti, c->dev);
kfree(c);
}