Add zone lazy init and do more cleanup

* Add lazy init logic to cb format.  Override with --full-init.
 * Make cbd check lazy init aware.
 * Fix up a bunch of size calculation errors.
 * Add pblk validity check in lbd_flush, lbd_read.
This commit is contained in:
Tom Marshall 2019-11-15 19:30:47 +01:00
parent 8b9b922344
commit b169a0fbcc
9 changed files with 266 additions and 65 deletions

View File

@ -140,6 +140,7 @@ usage(void)
" -z --compress-alg Compression algorithm [lz4]\n"
" -Z --compress-level Compression level [1]\n"
" --profile Set -p -l -z -Z automatically\n"
" --full-init Fully init device (no lazy init)\n"
" Note:\n"
" -c and -s are different ways of specifying the compressed device size.\n"
" Only one may be used, not both.\n"
@ -180,6 +181,7 @@ do_format(int argc, char** argv)
{ "compress-alg", required_argument, NULL, 'z' },
{ "compress-level", required_argument, NULL, 'Z' },
{ "profile", required_argument, NULL, 0x1 },
{ "full-init", no_argument, NULL, 0x2 },
{ NULL, no_argument, NULL, 0 }
};
char opt;
@ -191,6 +193,7 @@ do_format(int argc, char** argv)
uint pbatsize = 1;
enum cbd_alg alg = CBD_ALG_LZ4;
uint level = 1;
bool full_init = false;
uint8_t pshift;
uint8_t lshift;
@ -268,6 +271,9 @@ do_format(int argc, char** argv)
error("Invalid profile \"%s\"\n", optarg);
}
break;
case 0x2:
full_init = true;
break;
default:
usage();
}
@ -289,7 +295,9 @@ do_format(int argc, char** argv)
}
dev = argv[optind++];
cbd_format(dev, alg, level, pshift, lshift, pbatshift, psize, lsize);
cbd_format(dev, alg, level,
pshift, lshift, pbatshift, psize, lsize,
full_init);
return 0;
}

View File

@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/device-mapper.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
@ -60,16 +61,37 @@ struct compress
struct lbdcache* lc;
struct workqueue_struct* io_workq;
struct wait_queue_head init_waitq;
struct task_struct* init_thread;
};
static struct kobject* compress_kobj;
static inline u64
dm_target_pblk_size(struct dm_target* ti, struct cbd_params* params)
blkdev_pages(struct block_device* bdev)
{
return i_size_read(bdev->bd_inode) >> PAGE_SHIFT;
}
static inline u64
blkdev_pblks(struct block_device* bdev, struct cbd_params* params)
{
return i_size_read(bdev->bd_inode) >> (SECTOR_SHIFT + params->pblk_shift);
}
static inline u64
target_pblks(struct dm_target* ti, struct cbd_params* params)
{
return ti->len >> params->pblk_shift;
}
static inline u64
logical_pblks(struct cbd_params* params)
{
return (params->lblk_per_zone << params->lblk_shift) * params->nr_zones;
}
/**************************************
* Main functions
**************************************/
@ -157,8 +179,8 @@ compress_read_header(struct compress* c, char** errorp)
ret = -EINVAL;
goto out;
}
if (header.params.pbat_shift < LBLK_SHIFT_MIN ||
header.params.pbat_shift > LBLK_SHIFT_MAX) {
if (header.params.pbat_shift < PBAT_SHIFT_MIN ||
header.params.pbat_shift > PBAT_SHIFT_MAX) {
*errorp = "Header: pbat_shift out of bounds";
ret = -EINVAL;
goto out;
@ -173,6 +195,11 @@ compress_read_header(struct compress* c, char** errorp)
ret = -EINVAL;
goto out;
}
if ((zone_off(&header.params, header.params.nr_zones) >> 48) != 0) {
*errorp = "Header: logical device too large";
ret = -EINVAL;
goto out;
}
printk(KERN_INFO "%s: parameters...\n", __func__);
printk(KERN_INFO " compression=0x%02x\n", (unsigned int)header.params.compression);
@ -223,6 +250,93 @@ compress_write_header(struct compress* c)
return ret;
}
static int
compress_write_header_dirty(struct compress* c)
{
int ret = 0;
u16 save_flags;
save_flags = c->kparams.params.flags;
c->kparams.params.flags |= CBD_FLAG_DIRTY;
ret = compress_write_header(c);
c->kparams.params.flags = save_flags;
return ret;
}
static void
init_zone(struct compress* c, u32 zone, struct page* page)
{
u32 pblk_per_page = PAGE_SIZE / pblk_size(&c->kparams.params);
u64 zone_pblk = zone_off(&c->kparams.params, zone);
u32 nr_pblk = zone_metadata_len(&c->kparams.params);
u32 pblk_idx;
u64 pblkv[PBLK_IOV_MAX];
u32 iov_len;
u32 n;
pblk_idx = 0;
while (pblk_idx < nr_pblk) {
iov_len = min(nr_pblk - pblk_idx, pblk_per_page);
for (n = 0; n < iov_len; ++n) {
pblkv[n] = zone_pblk + pblk_idx++;
}
pblk_writev(&c->kparams, pblkv, iov_len, page);
lock_page(page);
}
}
static int
init_zone_thread(void* arg)
{
int ret = 0;
struct compress* c = arg;
struct page* page;
u8* buf;
unsigned long now;
unsigned long next_write;
printk(KERN_INFO "%s: initializing zones\n", __func__);
page = cbd_alloc_page();
if (!page) {
printk(KERN_ERR "%s: Out of memory\n", __func__);
return -ENOMEM;
}
buf = page_address(page);
memset(buf, 0, PAGE_SIZE);
lock_page(page);
next_write = jiffies + 5*HZ;
while (c->kparams.params.init_zones < c->kparams.params.nr_zones) {
init_zone(c, c->kparams.params.init_zones, page);
if (PageError(page)) {
printk(KERN_ERR "%s: write failed\n", __func__);
break;
}
++c->kparams.params.init_zones;
now = jiffies;
if (time_after_eq(now, next_write)) {
printk(KERN_INFO "%s: initialized %u/%u zones\n", __func__,
c->kparams.params.init_zones, c->kparams.params.nr_zones);
ret = compress_write_header_dirty(c);
if (ret) {
break;
}
wake_up_interruptible(&c->init_waitq);
next_write = now + 5*HZ;
}
if (kthread_should_stop()) {
break;
}
}
cbd_free_page(page);
compress_write_header_dirty(c);
c->init_thread = NULL;
wake_up_interruptible(&c->init_waitq);
printk(KERN_INFO "%s: exit\n", __func__);
return ret;
}
static int
compress_read(struct compress *c, struct bio *bio)
{
@ -234,17 +348,25 @@ compress_read(struct compress *c, struct bio *bio)
bio_for_each_segment(bv, bio, iter) {
u64 lblk = iter.bi_sector / lblk_per_sector;
u32 lblk_off = (iter.bi_sector - lblk * lblk_per_sector) * SECTOR_SIZE;
u32 lblk_zone = zone_for_lblk(&c->kparams.params, lblk);
unsigned long flags;
char* data;
lbd = lbdcache_get(c->lc, lblk);
if (!lbd) {
return -EIO;
if (c->kparams.params.init_zones >= lblk_zone) {
lbd = lbdcache_get(c->lc, lblk);
if (!lbd) {
return -EIO;
}
data = bvec_kmap_irq(&bv, &flags);
lbd_data_read(lbd, lblk_off, bv.bv_len, data);
bvec_kunmap_irq(data, &flags);
lbdcache_put(c->lc, lbd);
}
else {
data = bvec_kmap_irq(&bv, &flags);
memset(data, 0, bv.bv_len);
bvec_kunmap_irq(data, &flags);
}
data = bvec_kmap_irq(&bv, &flags);
lbd_data_read(lbd, lblk_off, bv.bv_len, data);
bvec_kunmap_irq(data, &flags);
lbdcache_put(c->lc, lbd);
}
return 0;
@ -261,9 +383,21 @@ compress_write(struct compress *c, struct bio *bio)
bio_for_each_segment(bv, bio, iter) {
u64 lblk = iter.bi_sector / lblk_per_sector;
u32 lblk_off = (iter.bi_sector - lblk * lblk_per_sector) * SECTOR_SIZE;
u32 lblk_zone = zone_for_lblk(&c->kparams.params, lblk);
int ret;
unsigned long flags;
char* data;
if (c->kparams.params.init_zones < lblk_zone && c->init_thread != NULL) {
ret = wait_event_interruptible(c->init_waitq,
c->kparams.params.init_zones >= lblk_zone || c->init_thread == NULL);
if (ret) {
return ret;
}
}
if (c->kparams.params.init_zones < lblk_zone) {
return -EIO;
}
lbd = lbdcache_get(c->lc, lblk);
if (!lbd) {
return -EIO;
@ -316,6 +450,8 @@ compress_io_work(struct work_struct* work)
/*** sysfs stuff ***/
typedef enum {
attr_zone_init,
attr_zone_total,
attr_lblk_size,
attr_pblk_used,
attr_pblk_total,
@ -344,6 +480,12 @@ compress_attr_show(struct kobject* kobj, struct attribute* attr,
mutex_lock(&c->kstats.lock);
switch (a->attr_id) {
case attr_zone_init:
val = c->kparams.params.init_zones;
break;
case attr_zone_total:
val = c->kparams.params.nr_zones;
break;
case attr_lblk_size:
val = lblk_size(&c->kparams.params);
break;
@ -393,6 +535,8 @@ static struct compress_attr compress_attr_##_name = { \
#define COMPRESS_ATTR_FUNC(_name,_mode) COMPRESS_ATTR(_name, _mode, _name)
COMPRESS_ATTR_FUNC(zone_init, 0444);
COMPRESS_ATTR_FUNC(zone_total, 0444);
COMPRESS_ATTR_FUNC(lblk_size, 0444);
COMPRESS_ATTR_FUNC(pblk_used, 0444);
COMPRESS_ATTR_FUNC(pblk_total, 0444);
@ -408,6 +552,8 @@ COMPRESS_ATTR_FUNC(lbd_w, 0444);
#define ATTR_LIST(name) &compress_attr_##name.attr
static struct attribute* compress_attrs[] = {
ATTR_LIST(zone_init),
ATTR_LIST(zone_total),
ATTR_LIST(lblk_size),
ATTR_LIST(pblk_used),
ATTR_LIST(pblk_total),
@ -471,6 +617,10 @@ __compress_dtr(struct compress* c)
{
printk(KERN_INFO "%s: enter\n", __func__);
if (c->init_thread) {
kthread_stop(c->init_thread);
c->init_thread = NULL;
}
if (c->lc) {
lbdcache_dtr(c->lc);
kfree(c->lc);
@ -501,7 +651,6 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
u32 cache_pages = 0;
bool sync = false;
struct compress *c = NULL;
u64 target_nr_pblks;
printk(KERN_INFO "%s: enter: argc=%u\n", __func__, argc);
for (argn = 0; argn < argc; ++argn) {
@ -566,43 +715,45 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* ti->error already set */
goto err;
}
target_nr_pblks = dm_target_pblk_size(ti, &c->kparams.params);
if ((target_nr_pblks >> 48) != 0) {
ti->error = "Device too large";
if (blkdev_pblks(c->dev->bdev, &c->kparams.params) <
zone_off(&c->kparams.params, c->kparams.params.nr_zones)) {
printk(KERN_ERR "%s: physical device too small: "
"actual=%lu, needed=%lu\n", __func__,
(unsigned long)blkdev_pblks(c->dev->bdev, &c->kparams.params),
(unsigned long)zone_off(&c->kparams.params, c->kparams.params.nr_zones));
ti->error = "Physical device too small";
ret = -EINVAL;
goto err;
}
if (target_pblks(ti, &c->kparams.params) !=
logical_pblks(&c->kparams.params)) {
printk(KERN_WARNING "%s: incorrect target device size: "
"expected pblks=%lu, actual pblks=%lu\n", __func__,
(unsigned long)logical_pblks(&c->kparams.params),
(unsigned long)target_pblks(ti, &c->kparams.params));
}
if (!cache_pages) {
/* Minimum of 1/1k RAM and 1/64k device size */
cache_pages = min((unsigned int)(totalram_pages >> 10),
(unsigned int)(target_nr_pblks >> 16));
(unsigned int)(blkdev_pages(c->dev->bdev) >> 16));
if (cache_pages < 32 * 2 * num_online_cpus()) {
cache_pages = 32 * 2 * num_online_cpus();
}
}
printk(KERN_INFO "%s: pages=%lu pblks=%lu cache_pages=%u\n",
__func__, totalram_pages, (unsigned long)target_nr_pblks, cache_pages);
printk(KERN_INFO "%s: totalram_pages=%lu blkdev_pages=%lu cache_pages=%u\n",
__func__, totalram_pages, (unsigned long)blkdev_pages(c->dev->bdev), cache_pages);
if (c->kparams.params.flags & CBD_FLAG_DIRTY) {
printk(KERN_INFO "Warning: device was not properly closed\n");
}
if (dm_table_get_mode(ti->table) & FMODE_WRITE) {
u16 save_flags = c->kparams.params.flags;
c->kparams.params.flags |= CBD_FLAG_DIRTY;
ret = compress_write_header(c);
c->kparams.params.flags = save_flags;
ret = compress_write_header_dirty(c);
if (ret) {
ti->error = "Failed to write header";
goto err;
}
}
if (target_nr_pblks < zone_off(&c->kparams.params, c->kparams.params.nr_zones)) {
ti->error = "Device too small";
ret = -EINVAL;
goto err;
}
/* XXX: validate lblk_per_zone */
c->lc = kmalloc(lbdcache_size(), GFP_KERNEL);
@ -624,6 +775,17 @@ compress_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err;
}
init_waitqueue_head(&c->init_waitq);
if (c->kparams.params.init_zones < c->kparams.params.nr_zones) {
c->init_thread = kthread_run(init_zone_thread, c, "compress_zone_init");
if (IS_ERR(c->init_thread)) {
ti->error = "Failed to start zone init thread";
ret = PTR_ERR(c->init_thread);
c->init_thread = NULL;
goto err;
}
}
printk(KERN_INFO "%s: success\n", __func__);
return 0;
@ -639,19 +801,16 @@ static void
compress_dtr(struct dm_target *ti)
{
int ret;
struct compress *c;
struct compress* c = ti->private;
printk(KERN_INFO "%s: enter\n", __func__);
c = ti->private;
__compress_dtr(c);
if (dm_table_get_mode(ti->table) & FMODE_WRITE) {
ret = compress_write_header(c);
if (ret) {
printk(KERN_INFO "Warning: failed to write header\n");
}
}
__compress_dtr(c);
dm_put_device(ti, c->dev);
kfree(c);
}

View File

@ -382,6 +382,7 @@ lbd_flush(struct lbd* lbd)
iov_len = min(nr_pblk - pblk_idx, pblk_per_page);
for (n = 0; n < iov_len; ++n) {
pblkv[n] = lbatview_elem_pblk(lbd->lv, lbd->lblk, pblk_idx++);
BUG_ON(pblkv[n] == PBLK_NONE);
}
pblk_writev(lbd->kparams, pblkv, iov_len, lbd->pagev[pg_idx]);
}
@ -434,6 +435,11 @@ lbd_read(struct lbd* lbd)
iov_len = min(nr_pblk - pblk_idx, pblk_per_page);
for (n = 0; n < iov_len; ++n) {
pblkv[n] = lbatview_elem_pblk(lbd->lv, lbd->lblk, pblk_idx++);
if (pblkv[n] == PBLK_NONE) {
printk(KERN_ERR "%s: bad pblk\n", __func__);
ret = -EIO;
goto out;
}
}
ret = pblk_readv_wait(lbd->kparams, pblkv, iov_len, lbd->pagev[pg_idx]);
if (ret) {
@ -629,7 +635,7 @@ lbdcache_free_compress_state(void* percpu, const struct cbd_params* params, int
bool
lbdcache_ctr(struct lbdcache* lc,
struct compress_params* kparams, struct compress_stats* kstats,
bool sync, u32 cache_pages)
u32 cache_pages, bool sync)
{
int cpu;
struct lbd* cache;

View File

@ -237,7 +237,7 @@ pbatcache_ctr(struct pbatcache* pc,
memset(pc, 0, sizeof(struct pbatcache));
/* pbatcache gets 1/32 of cache_pages */
cache_len = (cache_pages * 1 / 32) / pbat_len(&kparams->params);
cache_len = (cache_pages * 1 / 32);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;

View File

@ -69,7 +69,8 @@ int cbd_format(const char* dev,
enum cbd_alg alg, uint level,
uint8_t pshift, uint8_t lshift,
uint8_t pbatshift,
uint64_t psize, uint64_t lsize);
uint64_t psize, uint64_t lsize,
bool full_init);
int cbd_open(const char* dev,
const char* name,
uint64_t cache_pages, bool sync);

View File

@ -41,6 +41,7 @@ struct cbd_params {
/* u8 pad */
u32 nr_zones;
u32 lblk_per_zone;
u32 init_zones;
};
struct cbd_stats {
@ -363,6 +364,7 @@ cbd_header_get(const u8* buf, struct cbd_header* header)
p += 1; /* pad */
header->params.nr_zones = get32_le(&p);
header->params.lblk_per_zone = get32_le(&p);
header->params.init_zones = get32_le(&p);
p = buf + 64;
header->stats.pblk_used = get64_le(&p);
header->stats.lblk_used = get64_le(&p);
@ -385,6 +387,7 @@ cbd_header_put(u8* buf, const struct cbd_header* header)
put_byte(&p, 0); /* pad */
put32_le(&p, header->params.nr_zones);
put32_le(&p, header->params.lblk_per_zone);
put32_le(&p, header->params.init_zones);
p = buf + 64;
put64_le(&p, header->stats.pblk_used);
put64_le(&p, header->stats.lblk_used);
@ -648,7 +651,7 @@ struct lbdcache;
size_t lbdcache_size(void);
bool lbdcache_ctr(struct lbdcache* lc,
struct compress_params* kparams, struct compress_stats* kstats,
bool sync, u32 cache_pages);
u32 cache_pages, bool sync);
void lbdcache_dtr(struct lbdcache* lc);
struct lbd*
lbdcache_get(struct lbdcache* lc, u64 lblk);

View File

@ -92,7 +92,7 @@ lbat_write(int fd, const struct cbd_params* params, u32 zone, const u8* data)
}
static void
check_header(const struct cbd_header* header)
check_header(struct cbd_header* header)
{
enum cbd_alg alg = cbd_compression_alg_get(&header->params);
u8 level = cbd_compression_level_get(&header->params);
@ -116,6 +116,10 @@ check_header(const struct cbd_header* header)
header->params.lblk_shift >= LBLK_SHIFT_MAX) {
error("Bad logical block shift\n");
}
if (header->params.init_zones > header->params.nr_zones) {
verbose(1, "init_zones incorrect, fixing\n");
header->params.init_zones = header->params.nr_zones;
}
}
static bool
@ -311,7 +315,7 @@ check_lbat(struct check_state* state, const struct cbd_params* params)
{
u32 zone;
for (zone = 0; zone < params->nr_zones; ++zone) {
for (zone = 0; zone < params->init_zones; ++zone) {
u8* lbat = calloc(pblk_size(params), lbat_len(params));
bool zone_empty = true;
bool changed = false;
@ -321,7 +325,7 @@ check_lbat(struct check_state* state, const struct cbd_params* params)
verbose(2, "Zone %u: lbat=[%lu..%lu] alloc=[%lu .. %lu]\n",
(unsigned int)zone,
(unsigned long)zone_off(params, zone),
(unsigned long)lbat_off(params, zone),
(unsigned long)(zone_data_off(params, zone) - 1),
(unsigned long)zone_data_off(params, zone),
(unsigned long)(zone_off(params, zone + 1) - 1));
@ -362,7 +366,7 @@ check_pbat(struct check_state* state, const struct cbd_params* params)
u8* pbat;
pbat = malloc(pblk_size(params) * pbat_len(params));
for (zone = 0; zone < params->nr_zones; ++zone) {
for (zone = 0; zone < params->init_zones; ++zone) {
bool changed = false;
pbat_read(state->fd, params, zone, pbat);
if (memcmp(pbat, state->pbatv[zone], pblk_size(params) * pbat_len(params)) != 0) {

View File

@ -30,16 +30,15 @@ cbd_format(const char* dev,
enum cbd_alg alg, uint level,
uint8_t pshift, uint8_t lshift,
uint8_t pbatshift,
uint64_t psize, uint64_t lsize)
uint64_t psize, uint64_t lsize,
bool full_init)
{
int devfd;
uint pblk_size;
uint lblk_size;
uint32_t est_zone_len;
struct cbd_header header;
uint8_t header_buf[PAGE_SIZE];
uint8_t* data_buf;
uint64_t pblk;
uint64_t zone_idx;
devfd = open(dev, O_RDWR);
if (devfd < 0) {
@ -121,34 +120,55 @@ cbd_format(const char* dev,
}
/* XXX: Initial estimate */
header.params.lblk_per_zone = zone_data_len(&header.params) * (lsize / lblk_size) / (psize / pblk_size);
printf(" initial estimate for lblk_per_zone: %lu\n", (unsigned long)header.params.lblk_per_zone);
printf(" initial estimate for lblk_per_zone: %u\n", (unsigned int)header.params.lblk_per_zone);
header.params.nr_zones = ((psize / pblk_size) - CBD_HEADER_BLOCKS) / zone_len(&header.params);
est_zone_len = zone_len(&header.params);
header.params.lblk_per_zone = DIV_ROUND_UP(lsize / lblk_size, header.params.nr_zones);
while (zone_len(&header.params) > est_zone_len) {
--header.params.lblk_per_zone;
}
header.params.init_zones = 0;
printf("%s: header...\n", __func__);
printf(" compression=0x%02x\n", (unsigned int)header.params.compression);
printf(" pblk_shift=%hu\n", (unsigned short)header.params.pblk_shift);
printf(" lblk_shift=%hu\n", (unsigned short)header.params.lblk_shift);
printf(" lba_elem_pblk_bytes=%hu\n", (unsigned short)header.params.lba_elem_pblk_bytes);
printf(" pbat_shift=%hu\n", (unsigned short)header.params.pbat_shift);
printf(" nr_zones=%lu\n", (unsigned long)header.params.nr_zones);
printf(" lblk_per_zone=%lu\n", (unsigned long)header.params.lblk_per_zone);
printf(" nr_zones=%u\n", (unsigned int)header.params.nr_zones);
printf(" lblk_per_zone=%u\n", (unsigned int)header.params.lblk_per_zone);
memset(header_buf, 0, sizeof(header_buf));
cbd_header_put(header_buf, &header);
pblk_write(devfd, pblk_size, 0, 1, header_buf);
pblk = 0;
pblk_write(devfd, pblk_size, pblk, 1, header_buf);
pblk += CBD_HEADER_BLOCKS;
if (full_init) {
uint32_t nr_pblk = zone_metadata_len(&header.params);
uint8_t* data_buf;
uint64_t pblk;
time_t now;
time_t next_write;
printf("Writing %lu zones ...\n",
(unsigned long)header.params.nr_zones);
printf("Writing %lu zones ...\n",
(unsigned long)header.params.nr_zones);
data_buf = calloc(zone_metadata_len(&header.params), PAGE_SIZE);
for (zone_idx = 0; zone_idx < header.params.nr_zones; ++zone_idx) {
pblk = zone_off(&header.params, zone_idx);
pblk_write(devfd, pblk_size, pblk, zone_metadata_len(&header.params), data_buf);
data_buf = calloc(nr_pblk, pblk_size);
next_write = time(NULL) + 5;
while (header.params.init_zones < header.params.nr_zones) {
pblk = zone_off(&header.params, header.params.init_zones);
pblk_write(devfd, pblk_size, pblk, nr_pblk, data_buf);
++header.params.init_zones;
now = time(NULL);
if (now >= next_write) {
printf("Initialized %u/%u zones\n",
header.params.init_zones, header.params.nr_zones);
cbd_header_put(header_buf, &header);
pblk_write(devfd, pblk_size, 0, 1, header_buf);
next_write = now + 5;
}
}
free(data_buf);
cbd_header_put(header_buf, &header);
pblk_write(devfd, pblk_size, 0, 1, header_buf);
}
free(data_buf);
return 0;
}

View File

@ -5,7 +5,7 @@
#include <cbdutil.h>
static uint64_t
device_logical_size(const char* dev)
device_logical_sectors(const char* dev)
{
int fd;
uint8_t buf[SECTOR_SIZE];
@ -26,7 +26,7 @@ device_logical_size(const char* dev)
}
lblk_total = header.params.lblk_per_zone * header.params.nr_zones;
return lblk_total * lblk_size(&header.params);
return lblk_total << (header.params.lblk_shift + header.params.pblk_shift);
}
int
@ -36,7 +36,7 @@ cbd_open(const char* dev,
{
int ret;
struct stat st;
uint64_t lsize;
uint64_t nr_logical_sectors;
char optbuf[80];
char params[256];
struct dm_task* dmt;
@ -49,7 +49,7 @@ cbd_open(const char* dev,
if (!S_ISBLK(st.st_mode)) {
error("Not a block device\n");
}
lsize = device_logical_size(dev);
nr_logical_sectors = device_logical_sectors(dev);
strcpy(params, dev);
if (cache_pages) {
@ -69,10 +69,10 @@ cbd_open(const char* dev,
error("dm_task_set_name failed\n");
}
printf("%s: start_sector=0 num_sectors=%lu\n", __func__,
(unsigned long)(lsize / SECTOR_SIZE));
(unsigned long)nr_logical_sectors);
ret = dm_task_add_target(dmt,
0,
lsize / SECTOR_SIZE,
nr_logical_sectors,
"compress",
params);
if (ret == 0) {