716 lines
19 KiB
C
716 lines
19 KiB
C
/*
|
|
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version 2
|
|
* of the License, or (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
* 02110-1301, USA.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/device-mapper.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/lz4.h>
|
|
|
|
#include <linux/dm-compress.h>
|
|
|
|
// XXX: find a better name for this, something about storage vs. speed.
|
|
// XXX: should this be in cbd_params?
|
|
// #define CBD_DETECT_ZERO_BLOCKS
|
|
|
|
/*
|
|
* XXX
|
|
* If we don't use a workqueue, pblk_read() stalls. Why?
|
|
*/
|
|
#define USE_WORKQUEUE 1
|
|
|
|
struct compress;
|
|
|
|
/* per bio private data */
|
|
struct compress_io {
|
|
struct compress* c;
|
|
struct bio* bio;
|
|
struct work_struct work;
|
|
};
|
|
|
|
struct compress
|
|
{
|
|
struct dm_dev* dev;
|
|
|
|
struct kobject kobj;
|
|
struct completion kobj_unregister;
|
|
|
|
struct compress_params kparams;
|
|
struct compress_stats kstats;
|
|
struct lbdcache* lc;
|
|
|
|
struct workqueue_struct* io_workq;
|
|
};
|
|
|
|
static struct kobject* compress_kobj;
|
|
|
|
static inline u64
|
|
dm_target_pblk_size(struct dm_target* ti, struct cbd_params* params)
|
|
{
|
|
return ti->len >> params->pblk_shift;
|
|
}
|
|
|
|
/**************************************
|
|
* Main functions
|
|
**************************************/
|
|
|
|
static int
|
|
compress_read_header(struct compress* c)
|
|
{
|
|
int ret = 0;
|
|
struct page* page;
|
|
u8 *buf;
|
|
struct cbd_header header;
|
|
|
|
page = cbd_alloc_page();
|
|
if (!page) {
|
|
return -ENOMEM;
|
|
}
|
|
buf = page_address(page);
|
|
|
|
ret = pblk_read_wait(&c->kparams, 0, 1, page);
|
|
if (ret) {
|
|
printk(KERN_ERR "%s: failed to read header\n", __func__);
|
|
goto out;
|
|
}
|
|
memset(&header, 0, sizeof(header));
|
|
cbd_header_get(buf, &header);
|
|
|
|
if (memcmp(header.magic, CBD_MAGIC, sizeof(header.magic)) != 0) {
|
|
printk(KERN_ERR "%s: bad magic\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (header.version_major != CBD_VERSION_MAJOR) {
|
|
printk(KERN_ERR "%s: bad version\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (header.version_minor != CBD_VERSION_MINOR) {
|
|
printk(KERN_ERR "%s: bad version\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (cbd_compression_alg_get(&header.params) == CBD_ALG_NONE ||
|
|
cbd_compression_alg_get(&header.params) >= CBD_ALG_MAX) {
|
|
printk(KERN_ERR "%s: bad algorithm\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
#ifndef COMPRESS_HAVE_LZ4
|
|
if (cbd_compression_alg_get(&header.params) == CBD_ALG_LZ4) {
|
|
printk(KERN_ERR "%s: algorithm lz4 is not built into kernel\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
#endif
|
|
#ifndef COMPRESS_HAVE_ZLIB
|
|
if (cbd_compression_alg_get(&header.params) == CBD_ALG_ZLIB) {
|
|
printk(KERN_ERR "%s: algorithm zlib is not built into kernel\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
#endif
|
|
if (cbd_compression_level_get(&header.params) < 1 ||
|
|
cbd_compression_level_get(&header.params) > 9) {
|
|
printk(KERN_ERR "%s: bad compression\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (header.params.pblk_shift < PBLK_SHIFT_MIN ||
|
|
header.params.pblk_shift > PBLK_SHIFT_MAX) {
|
|
printk(KERN_ERR "%s: bad pblk_shift\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (header.params.lblk_shift < LBLK_SHIFT_MIN ||
|
|
header.params.lblk_shift > LBLK_SHIFT_MAX) {
|
|
printk(KERN_ERR "%s: bad lblk_shift\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (header.params.lba_elem_pblk_bytes != 2 &&
|
|
header.params.lba_elem_pblk_bytes != 4 &&
|
|
header.params.lba_elem_pblk_bytes != 6) {
|
|
printk(KERN_ERR "%s: bad lba_elem_pblk_bytes\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (pbat_len(&header.params) * pblk_size(&header.params) > PAGE_SIZE) {
|
|
printk(KERN_ERR "%s: pbat size too large\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
if (lba_len(&header.params) > pblk_size(&header.params)) {
|
|
printk(KERN_ERR "%s: lba elem size too large\n", __func__);
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
printk(KERN_INFO "%s: parameters...\n", __func__);
|
|
printk(KERN_INFO " compression=0x%02x\n", (unsigned int)header.params.compression);
|
|
printk(KERN_INFO " pblk_shift=%hu\n", (unsigned short)header.params.pblk_shift);
|
|
printk(KERN_INFO " lblk_shift=%hu\n", (unsigned short)header.params.lblk_shift);
|
|
printk(KERN_INFO " lba_elem_pblk_bytes=%hu\n", (unsigned short)header.params.lba_elem_pblk_bytes);
|
|
printk(KERN_INFO " pbat_shift=%hu\n", (unsigned short)header.params.pbat_shift);
|
|
printk(KERN_INFO " nr_zones=%u\n", (unsigned int)header.params.nr_zones);
|
|
printk(KERN_INFO " lblk_per_zone=%u\n", (unsigned int)header.params.lblk_per_zone);
|
|
printk(KERN_INFO "%s: stats...\n", __func__);
|
|
printk(KERN_INFO " pblk_used=%lu\n", (unsigned long)header.stats.pblk_used);
|
|
printk(KERN_INFO " lblk_used=%lu\n", (unsigned long)header.stats.lblk_used);
|
|
|
|
memcpy(&c->kparams.params, &header.params, sizeof(header.params));
|
|
memcpy(&c->kstats.stats, &header.stats, sizeof(header.stats));
|
|
|
|
out:
|
|
cbd_free_page(page);
|
|
return ret;
|
|
}
|
|
|
|
static int
|
|
compress_write_header(struct compress* c)
|
|
{
|
|
int ret = 0;
|
|
struct page* page;
|
|
u8* buf;
|
|
struct cbd_header header;
|
|
|
|
page = cbd_alloc_page();
|
|
if (!page) {
|
|
return -ENOMEM;
|
|
}
|
|
buf = page_address(page);
|
|
memset(&header, 0, sizeof(header));
|
|
memcpy(header.magic, CBD_MAGIC, sizeof(header.magic));
|
|
header.version_major = CBD_VERSION_MAJOR;
|
|
header.version_minor = CBD_VERSION_MINOR;
|
|
memcpy(&header.params, &c->kparams.params, sizeof(header.params));
|
|
memcpy(&header.stats, &c->kstats.stats, sizeof(header.stats));
|
|
cbd_header_put(buf, &header);
|
|
ret = pblk_write_wait(&c->kparams, 0, 1, page);
|
|
if (ret) {
|
|
printk(KERN_ERR "%s: failed to write header\n", __func__);
|
|
}
|
|
cbd_free_page(page);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int
|
|
compress_read(struct compress *c, struct bio *bio)
|
|
{
|
|
struct lbd* lbd = NULL;
|
|
struct bio_vec bv;
|
|
struct bvec_iter iter;
|
|
u32 lblk_per_sector = lblk_size(&c->kparams.params) >> SECTOR_SHIFT;
|
|
|
|
bio_for_each_segment(bv, bio, iter) {
|
|
u64 lblk = iter.bi_sector / lblk_per_sector;
|
|
u32 lblk_off = (iter.bi_sector - lblk * lblk_per_sector) * SECTOR_SIZE;
|
|
unsigned long flags;
|
|
char* data;
|
|
|
|
lbd = lbdcache_get(c->lc, lblk);
|
|
if (!lbd) {
|
|
return -EIO;
|
|
}
|
|
data = bvec_kmap_irq(&bv, &flags);
|
|
lbd_data_read(lbd, lblk_off, bv.bv_len, data);
|
|
bvec_kunmap_irq(data, &flags);
|
|
lbdcache_put(c->lc, lbd);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
compress_write(struct compress *c, struct bio *bio)
|
|
{
|
|
struct lbd* lbd = NULL;
|
|
struct bio_vec bv;
|
|
struct bvec_iter iter;
|
|
u32 lblk_per_sector = lblk_size(&c->kparams.params) >> SECTOR_SHIFT;
|
|
|
|
bio_for_each_segment(bv, bio, iter) {
|
|
u64 lblk = iter.bi_sector / lblk_per_sector;
|
|
u32 lblk_off = (iter.bi_sector - lblk * lblk_per_sector) * SECTOR_SIZE;
|
|
unsigned long flags;
|
|
char* data;
|
|
|
|
lbd = lbdcache_get(c->lc, lblk);
|
|
if (!lbd) {
|
|
return -EIO;
|
|
}
|
|
data = bvec_kmap_irq(&bv, &flags);
|
|
lbd_data_write(lbd, lblk_off, bv.bv_len, data);
|
|
bvec_kunmap_irq(data, &flags);
|
|
lbdcache_put(c->lc, lbd);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void
|
|
compress_io(struct compress_io* cio)
|
|
{
|
|
int ret;
|
|
struct compress* c = cio->c;
|
|
struct bio* bio = cio->bio;
|
|
|
|
switch (bio_op(bio)) {
|
|
case REQ_OP_READ:
|
|
ret = compress_read(c, bio);
|
|
break;
|
|
case REQ_OP_WRITE:
|
|
ret = compress_write(c, bio);
|
|
break;
|
|
default:
|
|
printk(KERN_ERR "%s: unknown op in bio: %u\n", __func__, bio_op(bio));
|
|
ret = -EINVAL;
|
|
}
|
|
if (ret) {
|
|
printk(KERN_ERR "%s: failed, ret=%d\n", __func__, ret);
|
|
}
|
|
|
|
bio->bi_status = (ret == 0 ? BLK_STS_OK : BLK_STS_IOERR); /* XXX */
|
|
bio_endio(bio);
|
|
}
|
|
|
|
#ifdef USE_WORKQUEUE
|
|
static void
|
|
compress_io_work(struct work_struct* work)
|
|
{
|
|
struct compress_io* cio = container_of(work, struct compress_io, work);
|
|
|
|
compress_io(cio);
|
|
}
|
|
#endif
|
|
|
|
/*** sysfs stuff ***/
|
|
|
|
typedef enum {
|
|
attr_lblk_size,
|
|
attr_pblk_used,
|
|
attr_pblk_total,
|
|
attr_lblk_used,
|
|
attr_lblk_total,
|
|
attr_pbat_r,
|
|
attr_pbat_w,
|
|
attr_lbatpblk_r,
|
|
attr_lbatpblk_w,
|
|
attr_lbd_r,
|
|
attr_lbd_w,
|
|
} attr_id_t;
|
|
|
|
struct compress_attr {
|
|
struct attribute attr;
|
|
short attr_id;
|
|
};
|
|
|
|
static ssize_t
|
|
compress_attr_show(struct kobject* kobj, struct attribute* attr,
|
|
char* buf)
|
|
{
|
|
struct compress* c = container_of(kobj, struct compress, kobj);
|
|
struct compress_attr* a = container_of(attr, struct compress_attr, attr);
|
|
u64 val = 0;
|
|
|
|
mutex_lock(&c->kstats.lock);
|
|
switch (a->attr_id) {
|
|
case attr_lblk_size:
|
|
val = lblk_size(&c->kparams.params);
|
|
break;
|
|
case attr_pblk_used:
|
|
val = c->kstats.stats.pblk_used;
|
|
break;
|
|
case attr_pblk_total:
|
|
val = pbat_len(&c->kparams.params) *
|
|
pblk_size_bits(&c->kparams.params) *
|
|
c->kparams.params.nr_zones;
|
|
break;
|
|
case attr_lblk_used:
|
|
val = c->kstats.stats.lblk_used;
|
|
break;
|
|
case attr_lblk_total:
|
|
val = c->kparams.params.lblk_per_zone *
|
|
c->kparams.params.nr_zones;
|
|
break;
|
|
case attr_pbat_r:
|
|
val = c->kstats.pbat_r;
|
|
break;
|
|
case attr_pbat_w:
|
|
val = c->kstats.pbat_w;
|
|
break;
|
|
case attr_lbatpblk_r:
|
|
val = c->kstats.lbatpblk_r;
|
|
break;
|
|
case attr_lbatpblk_w:
|
|
val = c->kstats.lbatpblk_w;
|
|
break;
|
|
case attr_lbd_r:
|
|
val = c->kstats.lbd_r;
|
|
break;
|
|
case attr_lbd_w:
|
|
val = c->kstats.lbd_w;
|
|
break;
|
|
}
|
|
mutex_unlock(&c->kstats.lock);
|
|
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)val);
|
|
}
|
|
|
|
#define COMPRESS_ATTR(_name,_mode,_id) \
|
|
static struct compress_attr compress_attr_##_name = { \
|
|
.attr = { .name = __stringify(_name), .mode = _mode }, \
|
|
.attr_id = attr_##_id, \
|
|
}
|
|
|
|
#define COMPRESS_ATTR_FUNC(_name,_mode) COMPRESS_ATTR(_name, _mode, _name)
|
|
|
|
COMPRESS_ATTR_FUNC(lblk_size, 0444);
|
|
COMPRESS_ATTR_FUNC(pblk_used, 0444);
|
|
COMPRESS_ATTR_FUNC(pblk_total, 0444);
|
|
COMPRESS_ATTR_FUNC(lblk_used, 0444);
|
|
COMPRESS_ATTR_FUNC(lblk_total, 0444);
|
|
COMPRESS_ATTR_FUNC(pbat_r, 0444);
|
|
COMPRESS_ATTR_FUNC(pbat_w, 0444);
|
|
COMPRESS_ATTR_FUNC(lbatpblk_r, 0444);
|
|
COMPRESS_ATTR_FUNC(lbatpblk_w, 0444);
|
|
COMPRESS_ATTR_FUNC(lbd_r, 0444);
|
|
COMPRESS_ATTR_FUNC(lbd_w, 0444);
|
|
|
|
#define ATTR_LIST(name) &compress_attr_##name.attr
|
|
|
|
static struct attribute* compress_attrs[] = {
|
|
ATTR_LIST(lblk_size),
|
|
ATTR_LIST(pblk_used),
|
|
ATTR_LIST(pblk_total),
|
|
ATTR_LIST(lblk_used),
|
|
ATTR_LIST(lblk_total),
|
|
ATTR_LIST(pbat_r),
|
|
ATTR_LIST(pbat_w),
|
|
ATTR_LIST(lbatpblk_r),
|
|
ATTR_LIST(lbatpblk_w),
|
|
ATTR_LIST(lbd_r),
|
|
ATTR_LIST(lbd_w),
|
|
NULL
|
|
};
|
|
|
|
#undef ATTR_LIST
|
|
|
|
static void
|
|
compress_sysfs_release(struct kobject* kobj)
|
|
{
|
|
struct compress* c = container_of(kobj, struct compress, kobj);
|
|
|
|
complete(&c->kobj_unregister);
|
|
}
|
|
|
|
static const struct sysfs_ops compress_attr_ops = {
|
|
.show = compress_attr_show,
|
|
};
|
|
|
|
static struct kobj_type compress_ktype = {
|
|
.default_attrs = compress_attrs,
|
|
.sysfs_ops = &compress_attr_ops,
|
|
.release = compress_sysfs_release,
|
|
};
|
|
|
|
static int
|
|
compress_register_sysfs(struct compress* c)
|
|
{
|
|
int err;
|
|
char name[32];
|
|
|
|
snprintf(name, sizeof(name), "%pg", c->dev->bdev);
|
|
|
|
init_completion(&c->kobj_unregister);
|
|
err = kobject_init_and_add(&c->kobj, &compress_ktype, compress_kobj, "%s", name);
|
|
if (err) {
|
|
kobject_put(&c->kobj);
|
|
wait_for_completion(&c->kobj_unregister);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
static void
|
|
compress_unregister_sysfs(struct compress* c)
|
|
{
|
|
kobject_del(&c->kobj);
|
|
}
|
|
|
|
/*
|
|
* Usage:
|
|
* echo "<start_sector> <end_sector> compress <backing_device> <args...>" | dmsetup create <compress_name>
|
|
* Where:
|
|
* start_sector is the starting sector of the backing device.
|
|
* end_sector is the ending sector of the backing device.
|
|
* compress is the name of this module.
|
|
* backing_device is the name backing device.
|
|
* args may include:
|
|
* cache_pages=#
|
|
* compress_name is the name of the compress device.
|
|
*/
|
|
static int
|
|
compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|
{
|
|
int ret;
|
|
unsigned int argn;
|
|
u32 cache_pages = 0;
|
|
struct compress *c = NULL;
|
|
u64 target_nr_pblks;
|
|
|
|
printk(KERN_INFO "%s: enter: argc=%u\n", __func__, argc);
|
|
for (argn = 0; argn < argc; ++argn) {
|
|
printk(KERN_INFO " ... arg[%u]=\"%s\"\n", argn, argv[argn]);
|
|
}
|
|
if (argc == 0) {
|
|
ti->error = "No device specified";
|
|
return -EINVAL;
|
|
}
|
|
|
|
argn = 1;
|
|
while (argn < argc) {
|
|
const char* arg = argv[argn++];
|
|
const char* val = NULL;
|
|
const char* eq = strchr(arg, '=');
|
|
int err;
|
|
if (eq) {
|
|
val = eq + 1;
|
|
}
|
|
/* XXX: Parse suffixes */
|
|
if (!memcmp(arg, "cache_pages", 7)) {
|
|
err = kstrtouint(eq + 1, 0, &cache_pages);
|
|
if (err) {
|
|
ti->error = "Failed to parse cache_pages";
|
|
return -EINVAL;
|
|
}
|
|
continue;
|
|
}
|
|
ti->error = "Unrecognized argument";
|
|
return -EINVAL;
|
|
}
|
|
|
|
c = kzalloc(sizeof(struct compress), GFP_KERNEL);
|
|
if (!c) {
|
|
ti->error = "Failed to allocate target";
|
|
return -ENOMEM;
|
|
}
|
|
|
|
if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev)) {
|
|
ti->error = "Device lookup failed";
|
|
kfree(c);
|
|
return -EINVAL;
|
|
}
|
|
|
|
ti->private = c;
|
|
ti->per_io_data_size = ALIGN(sizeof(struct compress_io), ARCH_KMALLOC_MINALIGN);
|
|
|
|
ret = compress_register_sysfs(c);
|
|
if (ret) {
|
|
ti->error = "Failed to register sysfs";
|
|
goto err;
|
|
}
|
|
c->kparams.dev = c->dev->bdev;
|
|
mutex_init(&c->kstats.lock);
|
|
|
|
ret = compress_read_header(c);
|
|
if (ret) {
|
|
goto err;
|
|
}
|
|
target_nr_pblks = dm_target_pblk_size(ti, &c->kparams.params);
|
|
if ((target_nr_pblks >> 48) != 0) {
|
|
ti->error = "Device too large";
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
if (!cache_pages) {
|
|
/* Minimum of 1/1k RAM and 1/64k device size */
|
|
cache_pages = min((unsigned int)(totalram_pages >> 10),
|
|
(unsigned int)(target_nr_pblks >> 16));
|
|
if (cache_pages < 32 * 2 * num_online_cpus()) {
|
|
cache_pages = 32 * 2 * num_online_cpus();
|
|
}
|
|
}
|
|
printk(KERN_INFO "%s: pages=%lu pblks=%lu cache_pages=%u\n",
|
|
__func__, totalram_pages, (unsigned long)target_nr_pblks, cache_pages);
|
|
|
|
if (c->kparams.params.flags & CBD_FLAG_DIRTY) {
|
|
printk(KERN_INFO "Warning: device was not properly closed\n");
|
|
}
|
|
if (dm_table_get_mode(ti->table) & FMODE_WRITE) {
|
|
u16 save_flags = c->kparams.params.flags;
|
|
c->kparams.params.flags |= CBD_FLAG_DIRTY;
|
|
ret = compress_write_header(c);
|
|
c->kparams.params.flags = save_flags;
|
|
if (ret) {
|
|
goto err;
|
|
}
|
|
}
|
|
|
|
if (target_nr_pblks < zone_off(&c->kparams.params, c->kparams.params.nr_zones)) {
|
|
printk(KERN_ERR "%s: Device too small\n", __func__);
|
|
ret = -EINVAL;
|
|
goto err;
|
|
}
|
|
|
|
/* XXX: validate lblk_per_zone */
|
|
|
|
c->lc = kmalloc(lbdcache_size(), GFP_KERNEL);
|
|
if (!c->lc) {
|
|
printk(KERN_ERR "Failed to alloc lbdcache\n");
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
}
|
|
if (!lbdcache_ctr(c->lc, &c->kparams, &c->kstats, cache_pages)) {
|
|
printk(KERN_ERR "Failed to init logical block cache\n");
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
}
|
|
|
|
c->io_workq = alloc_workqueue("compress_io", WQ_HIGHPRI | WQ_MEM_RECLAIM, 1);
|
|
if (!c->io_workq) {
|
|
printk(KERN_ERR "%s: failed to alloc io_workq\n", __func__);
|
|
ret = -ENOMEM;
|
|
goto err;
|
|
}
|
|
|
|
printk(KERN_INFO "%s: success\n", __func__);
|
|
|
|
return 0;
|
|
|
|
err:
|
|
dm_put_device(ti, c->dev);
|
|
kfree(c);
|
|
return ret;
|
|
}
|
|
|
|
static void
|
|
compress_dtr(struct dm_target *ti)
|
|
{
|
|
int ret;
|
|
struct compress *c;
|
|
|
|
printk(KERN_INFO "%s: enter\n", __func__);
|
|
|
|
c = ti->private;
|
|
|
|
if (dm_table_get_mode(ti->table) & FMODE_WRITE) {
|
|
ret = compress_write_header(c);
|
|
if (ret) {
|
|
printk(KERN_INFO "Warning: failed to write header\n");
|
|
}
|
|
}
|
|
lbdcache_dtr(c->lc);
|
|
kfree(c->lc);
|
|
if (c->io_workq) {
|
|
destroy_workqueue(c->io_workq);
|
|
}
|
|
compress_unregister_sysfs(c);
|
|
dm_put_device(ti, c->dev);
|
|
kfree(c);
|
|
}
|
|
|
|
static int
|
|
compress_map(struct dm_target *ti, struct bio *bio)
|
|
{
|
|
struct compress *c = ti->private;
|
|
struct compress_io *cio;
|
|
|
|
if (c->kparams.params.flags & CBD_FLAG_ERROR) {
|
|
bio->bi_status = BLK_STS_IOERR;
|
|
bio_endio(bio);
|
|
return DM_MAPIO_SUBMITTED; /* XXXX: DM_MAPIO_KILL? */
|
|
}
|
|
|
|
/* from dm-crypt.c */
|
|
if (unlikely(bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)) {
|
|
bio_set_dev(bio, c->dev->bdev);
|
|
if (bio_sectors(bio)) {
|
|
/* XXX: remap to underlying data */
|
|
}
|
|
return DM_MAPIO_REMAPPED;
|
|
}
|
|
|
|
/* Synchronous I/O operations deadlock, so queue them. */
|
|
/* XXX: clone the bio? */
|
|
cio = dm_per_bio_data(bio, ti->per_io_data_size);
|
|
cio->c = c;
|
|
cio->bio = bio;
|
|
#ifdef USE_WORKQUEUE
|
|
INIT_WORK(&cio->work, compress_io_work);
|
|
queue_work(c->io_workq, &cio->work);
|
|
#else
|
|
compress_io(io);
|
|
#endif
|
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
}
|
|
|
|
static struct target_type compress_target = {
|
|
.name = "compress",
|
|
.version = { 1, 0, 0 },
|
|
.module = THIS_MODULE,
|
|
.ctr = compress_ctr,
|
|
.dtr = compress_dtr,
|
|
.map = compress_map,
|
|
};
|
|
|
|
static int __init
|
|
dm_compress_init(void)
|
|
{
|
|
int res;
|
|
|
|
compress_kobj = kobject_create_and_add("compress", fs_kobj);
|
|
if (!compress_kobj) {
|
|
printk(KERN_ERR "Failed to add sysfs kobj\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
res = dm_register_target(&compress_target);
|
|
if (res < 0) {
|
|
printk(KERN_ERR "Failed to register dm-compress: %d\n", res);
|
|
}
|
|
|
|
return res;
|
|
}
|
|
|
|
static void __exit
|
|
dm_compress_exit(void)
|
|
{
|
|
dm_unregister_target(&compress_target);
|
|
|
|
if (compress_kobj) {
|
|
kobject_put(compress_kobj);
|
|
compress_kobj = NULL;
|
|
}
|
|
}
|
|
|
|
module_init(dm_compress_init);
|
|
module_exit(dm_compress_exit);
|
|
|
|
MODULE_DESCRIPTION("compress target for transparent compression");
|
|
MODULE_AUTHOR("Tom Marshall <tdm.code@gmail.com>");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_VERSION("1.0");
|