#ifndef _LINUX_DM_COMPRESS_H #define _LINUX_DM_COMPRESS_H #define PBLK_SHIFT_MIN 0 #define PBLK_SHIFT_MAX 3 #define LBLK_SHIFT_MIN 1 #define LBLK_SHIFT_MAX 10 #define PBAT_SHIFT_MIN 0 #define PBAT_SHIFT_MAX 3 #define ZONE_NONE (u32)(~0) #define PBLK_NONE (u64)(~0) #define LBLK_NONE (u64)(~0) #define CBD_HEADER_BLOCKS 1 #define CBD_UNCOMPRESSED 1 static const u8 CBD_MAGIC[] = { 'C', 'B', 'D', '\0' }; static const u16 CBD_VERSION_MAJOR = 1; static const u16 CBD_VERSION_MINOR = 1; #define CBD_FLAG_DIRTY 0x0001 #define CBD_FLAG_ERROR 0x0002 #define CBD_FLAG_DETECT_ZEROS 0x0100 enum cbd_alg { CBD_ALG_NONE, CBD_ALG_LZ4, CBD_ALG_ZLIB, /* lzo, zstd, ... */ CBD_ALG_MAX }; struct cbd_params { u16 flags; u8 compression; /* alg and level */ u8 pblk_shift; u8 lblk_shift; u8 lba_elem_pblk_bytes; u8 pbat_shift; /* u8 pad */ u32 nr_zones; u32 lblk_per_zone; u32 init_zones; }; struct cbd_stats { u64 pblk_used; u64 lblk_used; }; struct cbd_header { u8 magic[4]; u16 version_major; u16 version_minor; struct cbd_params params; struct cbd_stats stats; }; struct lba { u32 len; /* Compressed length */ u64 pblk[1]; /* Vector of physical blocks */ }; static inline void get_mem(const u8** raw, u8* buf, size_t len) { memcpy(buf, *raw, len); *raw += len; } static inline void put_mem(u8** raw, const u8* buf, size_t len) { memcpy(*raw, buf, len); *raw += len; } static inline u8 get_byte(const u8** raw) { u8 val = **raw; *raw += sizeof(u8); return val; } static inline void put_byte(u8** raw, u8 val) { **raw = val; *raw += sizeof(u8); } static inline u16 get16_le(const u8** raw) { u16 leval = 0; memcpy(&leval, *raw, sizeof(leval)); *raw += sizeof(leval); return __le16_to_cpu(leval); } static inline void put16_le(u8** raw, u16 val) { u16 leval = __cpu_to_le16(val); memcpy(*raw, &leval, sizeof(leval)); *raw += sizeof(leval); } static inline u32 get32_le(const u8** raw) { u32 leval = 0; memcpy(&leval, *raw, sizeof(leval)); *raw += sizeof(leval); return __le32_to_cpu(leval); } static inline void put32_le(u8** raw, u32 val) { u32 leval = __cpu_to_le32(val); memcpy(*raw, &leval, sizeof(leval)); *raw += sizeof(leval); } static inline u64 get48_le(const u8** raw) { u64 leval = 0; memcpy(&leval, *raw, 6); *raw += 6; return __le64_to_cpu(leval); } static inline void put48_le(u8** raw, u64 val) { u64 leval = __cpu_to_le64(val); memcpy(*raw, &leval, 6); *raw += 6; } static inline u64 get64_le(const u8** raw) { u64 leval = 0; memcpy(&leval, *raw, sizeof(leval)); *raw += sizeof(leval); return __le64_to_cpu(leval); } static inline void put64_le(u8** raw, u64 val) { u64 leval = __cpu_to_le64(val); memcpy(*raw, &leval, sizeof(leval)); *raw += sizeof(leval); } /* XXX: Use kernel bit functions */ static inline u32 cbd_bitmap_alloc(u8* buf, u32 bitsize, u32 hint) { u32 off; u32 bit; for (off = hint / BITS_PER_BYTE; off < bitsize / BITS_PER_BYTE; ++off) { if (buf[off] != 0xff) { bit = 0; while (buf[off] & (1 << bit)) { ++bit; } buf[off] |= (1 << bit); return off * BITS_PER_BYTE + bit; } } for (off = 0; off < hint / BITS_PER_BYTE; ++off) { if (buf[off] != 0xff) { bit = 0; while (buf[off] & (1 << bit)) { ++bit; } buf[off] |= (1 << bit); return off * BITS_PER_BYTE + bit; } } return bitsize; } /* XXX: Use kernel bit functions */ static inline void cbd_bitmap_free(u8* buf, u32 idx) { u32 off = idx / BITS_PER_BYTE; u32 bit = idx % BITS_PER_BYTE; buf[off] &= ~(1 << bit); } static inline void cbd_bitmap_set(u8* buf, u32 idx) { u32 off = idx / BITS_PER_BYTE; u32 bit = idx % BITS_PER_BYTE; buf[off] |= (1 << bit); } static inline void cbd_bitmap_reset(u8* buf, u32 idx) { u32 off = idx / BITS_PER_BYTE; u32 bit = idx % BITS_PER_BYTE; buf[off] &= ~(1 << bit); } static inline bool cbd_bitmap_isset(u8* buf, u32 idx) { u32 off = idx / BITS_PER_BYTE; u32 bit = idx % BITS_PER_BYTE; return buf[off] & (1 << bit); } static inline u32 pblk_size(const struct cbd_params* params) { return (1 << params->pblk_shift) * SECTOR_SIZE; } static inline u32 pblk_size_bits(const struct cbd_params* params) { return pblk_size(params) * BITS_PER_BYTE; } static inline u32 lblk_per_pblk(const struct cbd_params* params) { return (1 << params->lblk_shift); } static inline u32 lblk_size(const struct cbd_params* params) { return pblk_size(params) * lblk_per_pblk(params); } static inline u32 pbat_len(const struct cbd_params* params) { return (1 << params->pbat_shift); } static inline u32 lba_elem_len_bytes(const struct cbd_params* params) { return (lblk_size(params) > 0xffff) ? 4 : 2; } static inline u32 lba_elem_pblk_bytes(const struct cbd_params* params) { return params->lba_elem_pblk_bytes; } static inline u32 lba_len(const struct cbd_params* params) { return lba_elem_len_bytes(params) + lba_elem_pblk_bytes(params) * (1 << params->lblk_shift); } static inline u32 lbat_len(const struct cbd_params* params) { return DIV_ROUND_UP(params->lblk_per_zone * lba_len(params), pblk_size(params)); } static inline u32 zone_metadata_len(const struct cbd_params* params) { return pbat_len(params) + lbat_len(params); } static inline u32 zone_data_len(const struct cbd_params* params) { return pbat_len(params) * pblk_size(params) * BITS_PER_BYTE; } static inline u32 zone_len(const struct cbd_params* params) { return zone_metadata_len(params) + zone_data_len(params); } static inline u64 zone_off(const struct cbd_params* params, u32 idx) { return CBD_HEADER_BLOCKS + idx * zone_len(params); } static inline u64 pbat_off(const struct cbd_params* params, u32 idx) { return zone_off(params, idx) + 0; } static inline u64 lbat_off(const struct cbd_params* params, u32 idx) { return zone_off(params, idx) + pbat_len(params); } static inline u64 zone_data_off(const struct cbd_params* params, u32 idx) { return zone_off(params, idx) + pbat_len(params) + lbat_len(params); } static inline u32 zone_for_pblk(const struct cbd_params* params, u64 pblk) { if (pblk < CBD_HEADER_BLOCKS) { return ZONE_NONE; } return (pblk - CBD_HEADER_BLOCKS) / zone_len(params); } static inline u32 zone_for_lblk(const struct cbd_params* params, u64 lblk) { return (lblk / params->lblk_per_zone); } static inline void cbd_header_get(const u8* buf, struct cbd_header* header) { const u8* p; p = buf + 0; get_mem(&p, header->magic, sizeof(header->magic)); header->version_major = get16_le(&p); header->version_minor = get16_le(&p); header->params.flags = get16_le(&p); header->params.compression = get_byte(&p); header->params.pblk_shift = get_byte(&p); header->params.lblk_shift = get_byte(&p); header->params.lba_elem_pblk_bytes = get_byte(&p); header->params.pbat_shift = get_byte(&p); p += 1; /* pad */ header->params.nr_zones = get32_le(&p); header->params.lblk_per_zone = get32_le(&p); header->params.init_zones = get32_le(&p); p = buf + 64; header->stats.pblk_used = get64_le(&p); header->stats.lblk_used = get64_le(&p); } static inline void cbd_header_put(u8* buf, const struct cbd_header* header) { u8* p; p = buf + 0; put_mem(&p, header->magic, sizeof(header->magic)); put16_le(&p, header->version_major); put16_le(&p, header->version_minor); put16_le(&p, header->params.flags); put_byte(&p, header->params.compression); put_byte(&p, header->params.pblk_shift); put_byte(&p, header->params.lblk_shift); put_byte(&p, header->params.lba_elem_pblk_bytes); put_byte(&p, header->params.pbat_shift); put_byte(&p, 0); /* pad */ put32_le(&p, header->params.nr_zones); put32_le(&p, header->params.lblk_per_zone); put32_le(&p, header->params.init_zones); p = buf + 64; put64_le(&p, header->stats.pblk_used); put64_le(&p, header->stats.lblk_used); } static inline enum cbd_alg cbd_compression_alg_get(const struct cbd_params* params) { return (enum cbd_alg)(params->compression >> 4); } static inline void cbd_compression_alg_put(struct cbd_params* params, enum cbd_alg alg) { params->compression = (alg << 4) | (params->compression & 0x0f); } static inline u8 cbd_compression_level_get(const struct cbd_params* params) { return (params->compression & 0x0f); } static inline void cbd_compression_level_put(struct cbd_params* params, u8 level) { params->compression = (params->compression & 0xf0) | level; } static inline u32 lba_len_get(const struct cbd_params* params, const u8* buf) { if (lba_elem_len_bytes(params) == 2) { return get16_le(&buf); } else { return get32_le(&buf); } } static inline void lba_len_put(const struct cbd_params* params, u8* buf, u32 val) { if (lba_elem_len_bytes(params) == 2) { put16_le(&buf, val); } else { put32_le(&buf, val); } } static inline u64 lba_pblk_get(const struct cbd_params* params, const u8* buf, u32 idx) { const u8* p = buf; u32 len_bytes = lba_elem_len_bytes(params); u32 pblk_bytes = lba_elem_pblk_bytes(params); if (pblk_bytes == 2) { p += len_bytes + 2 * idx; return get16_le(&p); } else if (pblk_bytes == 4) { p += len_bytes + 4 * idx; return get32_le(&p); } else { p += len_bytes + 6 * idx; return get48_le(&p); } } static inline void lba_pblk_put(const struct cbd_params* params, u8* buf, u32 idx, u64 val) { u8* p = buf; u32 len_bytes = lba_elem_len_bytes(params); u32 pblk_bytes = lba_elem_pblk_bytes(params); if (pblk_bytes == 2) { p += len_bytes + 2 * idx; put16_le(&p, val); } else if (pblk_bytes == 4) { p += len_bytes + 4 * idx; put32_le(&p, val); } else { p += len_bytes + 6 * idx; put48_le(&p, val); } } static inline void lba_get(const struct cbd_params* params, const u8* buf, struct lba* lba) { u32 n; u32 len_bytes = lba_elem_len_bytes(params); u32 pblk_bytes = lba_elem_pblk_bytes(params); if (len_bytes == 2) { lba->len = get16_le(&buf); } else { lba->len = get32_le(&buf); } if (pblk_bytes == 2) { for (n = 0; n < lblk_per_pblk(params); ++n) { lba->pblk[n] = get16_le(&buf); } } else if (pblk_bytes == 4) { for (n = 0; n < lblk_per_pblk(params); ++n) { lba->pblk[n] = get32_le(&buf); } } else { for (n = 0; n < lblk_per_pblk(params); ++n) { lba->pblk[n] = get48_le(&buf); } } } static inline void lba_put(const struct cbd_params* params, u8* buf, const struct lba* lba) { u32 n; u32 len_bytes = lba_elem_len_bytes(params); u32 pblk_bytes = lba_elem_pblk_bytes(params); if (len_bytes == 2) { put16_le(&buf, lba->len); } else { put32_le(&buf, lba->len); } if (pblk_bytes == 2) { for (n = 0; n < lblk_per_pblk(params); ++n) { put16_le(&buf, lba->pblk[n]); } } else if (pblk_bytes == 4) { for (n = 0; n < lblk_per_pblk(params); ++n) { put32_le(&buf, lba->pblk[n]); } } else { for (n = 0; n < lblk_per_pblk(params); ++n) { put48_le(&buf, lba->pblk[n]); } } } #ifdef __KERNEL__ #if defined(CONFIG_LZ4_COMPRESS) || defined(CONFIG_LZ4_COMPRESS_MODULE) #define COMPRESS_HAVE_LZ4 1 #endif #if defined(CONFIG_ZLIB_INFLATE) || defined(CONFIG_ZLIB_INFLATE_MODULE) #define COMPRESS_HAVE_ZLIB 1 #endif #define COMPRESS_FLUSH_DELAY (HZ / 10) struct compress_params { struct block_device* dev; struct cbd_params params; }; struct compress_stats { struct mutex lock; struct cbd_stats stats; u64 pbat_r; u64 pbat_w; u64 lbatpblk_r; u64 lbatpblk_w; u64 lbd_r; u64 lbd_w; }; typedef void (*pblk_endio_t)(struct bio*); /* Single page allocator */ struct page* cbd_alloc_page(void); void cbd_free_page(struct page* page); /* Multiple page allocator */ struct page* cbd_alloc_pages(size_t len); void cbd_free_pages(struct page* pages, size_t len); /* Vector page allocator */ bool cbd_alloc_pagev(struct page** pagev, size_t len); void cbd_free_pagev(struct page** pagev, size_t len); /* Core low-level I/O */ #define PBLK_IOV_MAX (PAGE_SIZE / SECTOR_SIZE) int pblk_read_wait(struct compress_params* kparams, u64 pblk, u32 count, struct page* page); int pblk_readv_wait(struct compress_params* kparams, u64* pblkv, u32 count, struct page* page); int pblk_write_wait(struct compress_params* kparams, u64 pblk, u32 count, struct page* page); void pblk_write(struct compress_params* kparams, u64 pblk, u32 count, struct page* page); void pblk_writev(struct compress_params* kparams, u64* pblkv, u32 count, struct page* page); struct pbat; u32 pbat_zone(struct pbat* pbat); u64 pbat_alloc(struct pbat* pbat); int pbat_free(struct pbat* pbat, u64 pblk); struct pbatcache; size_t pbatcache_size(void); bool pbatcache_ctr(struct pbatcache* pbatcache, struct compress_params* kparams, struct compress_stats* kstats, u32 cache_pages); void pbatcache_dtr(struct pbatcache* pbatcache); struct pbat* pbatcache_get(struct pbatcache* pbatcache, u32 zone, bool avail); int pbatcache_put(struct pbatcache* pbatcache, struct pbat* pbat); struct lbatpblk; u8* lbatpblk_get_buf(struct lbatpblk* lp, bool rw); void lbatpblk_put_buf(struct lbatpblk* lp); struct lbatpblkcache; size_t lbatpblkcache_size(void); bool lbatpblkcache_ctr(struct lbatpblkcache* lpc, struct compress_params* kparams, struct compress_stats* kstats, u32 cache_pages); void lbatpblkcache_dtr(struct lbatpblkcache* lpc); struct lbatpblk* lbatpblkcache_get(struct lbatpblkcache* lpc, u64 pblk); int lbatpblkcache_put(struct lbatpblkcache* lpc, struct lbatpblk* lpi); struct lbatview; int lbatview_elem_realloc(struct lbatview* lv, u64 lblk, u32 len); u32 lbatview_elem_len(struct lbatview* lv, u64 lblk); u64 lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx); struct lbatviewcache; size_t lbatviewcache_size(void); bool lbatviewcache_ctr(struct lbatviewcache* lvc, struct compress_params* kparams, struct compress_stats* kstats, u32 cache_pages); void lbatviewcache_dtr(struct lbatviewcache* lvc); struct lbatview* lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk); int lbatviewcache_put(struct lbatviewcache* lvc, struct lbatview* lbv); struct lbd; u64 lbd_lblk(struct lbd* lbd); void lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf); void lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf); struct lbdcache; size_t lbdcache_size(void); bool lbdcache_ctr(struct lbdcache* lc, struct compress_params* kparams, struct compress_stats* kstats, u32 cache_pages, bool sync); void lbdcache_dtr(struct lbdcache* lc); struct lbd* lbdcache_get(struct lbdcache* lc, u64 lblk); int lbdcache_put(struct lbdcache* lc, struct lbd* lbd); #endif #endif /* _LINUX_DM_COMPRESS_H */