cbd/dm-compress/pbat.c

366 lines
8.7 KiB
C

/*
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/lz4.h>
#include <linux/dm-compress.h>
struct pbat {
struct list_head list;
u32 zone;
struct mutex reflock;
unsigned int ref;
struct mutex lock;
struct compress_params* kparams;
struct compress_stats* kstats;
bool full;
u32 last_alloc;
struct page* page;
u8* buf;
};
static bool
pbat_ctr(struct pbat* pbat,
struct compress_params* kparams,
struct compress_stats* kstats)
{
memset(pbat, 0, sizeof(struct pbat));
INIT_LIST_HEAD(&pbat->list);
pbat->zone = ZONE_NONE;
mutex_init(&pbat->reflock);
pbat->ref = 0;
mutex_init(&pbat->lock);
pbat->kparams = kparams;
pbat->kstats = kstats;
pbat->full = false;
pbat->last_alloc = 0;
pbat->page = cbd_alloc_page();
if (!pbat->page) {
return false;
}
pbat->buf = page_address(pbat->page);
return true;
}
static void
pbat_dtr(struct pbat* pbat)
{
if (pbat->page) {
lock_page(pbat->page);
}
pbat->buf = NULL;
cbd_free_page(pbat->page);
pbat->page = NULL;
}
static bool
pbat_error(struct pbat* pbat)
{
return PageError(pbat->page);
}
static int
pbat_flush(struct pbat* pbat)
{
int ret = 0;
u64 pblk;
mutex_lock(&pbat->lock);
if (!PageDirty(pbat->page)) {
goto unlock;
}
if (pbat_error(pbat)) {
ret = -EIO;
goto unlock;
}
pblk = pbat_off(&pbat->kparams->params, pbat->zone);
pblk_write(pbat->kparams, pblk, pbat_len(&pbat->kparams->params), pbat->page);
mutex_unlock(&pbat->lock);
mutex_lock(&pbat->kstats->lock);
++pbat->kstats->pbat_w;
mutex_unlock(&pbat->kstats->lock);
return ret;
unlock:
unlock_page(pbat->page);
mutex_unlock(&pbat->lock);
return ret;
}
static int
pbat_read(struct pbat* pbat)
{
int ret = 0;
u64 pblk;
BUG_ON(pbat_error(pbat));
pblk = pbat_off(&pbat->kparams->params, pbat->zone);
ret = pblk_read_wait(pbat->kparams, pblk, pbat_len(&pbat->kparams->params), pbat->page);
mutex_lock(&pbat->kstats->lock);
++pbat->kstats->pbat_r;
mutex_unlock(&pbat->kstats->lock);
return ret;
}
static int
pbat_reset(struct pbat* pbat, u32 zone)
{
int ret = 0;
lock_page(pbat->page);
if (pbat->zone != zone) {
pbat->zone = zone;
pbat->full = false;
pbat->last_alloc = 0;
ret = pbat_read(pbat);
}
if (ret) {
unlock_page(pbat->page);
pbat->zone = ZONE_NONE;
}
return ret;
}
u32
pbat_zone(struct pbat* pbat)
{
return pbat->zone;
}
u64
pbat_alloc(struct pbat* pbat)
{
u32 bitsize = pblk_size_bits(&pbat->kparams->params) *
pbat_len(&pbat->kparams->params);
u32 idx;
u64 pblk;
mutex_lock(&pbat->lock);
if (pbat->full) {
pblk = PBLK_NONE;
goto out;
}
idx = cbd_bitmap_alloc(pbat->buf, bitsize, pbat->last_alloc);
if (idx == bitsize) {
pbat->full = true;
pblk = PBLK_NONE;
goto out;
}
pbat->last_alloc = idx;
pblk = idx + zone_data_off(&pbat->kparams->params, pbat->zone);
SetPageDirty(pbat->page);
out:
mutex_unlock(&pbat->lock);
return pblk;
}
int
pbat_free(struct pbat* pbat, u64 pblk)
{
u32 bitsize = pblk_size_bits(&pbat->kparams->params) *
pbat_len(&pbat->kparams->params);
u32 zone;
u32 idx;
zone = zone_for_pblk(&pbat->kparams->params, pblk);
BUG_ON(zone != pbat->zone);
if (pblk < zone_data_off(&pbat->kparams->params, zone)) {
printk(KERN_ERR "%s: pblk in metadata\n", __func__);
return -EINVAL;
}
idx = pblk - zone_data_off(&pbat->kparams->params, zone);
BUG_ON(idx >= bitsize);
mutex_lock(&pbat->lock);
cbd_bitmap_free(pbat->buf, idx);
pbat->full = false;
SetPageDirty(pbat->page);
mutex_unlock(&pbat->lock);
return 0;
}
struct pbatcache {
struct mutex cache_lock;
struct list_head cache_head;
unsigned int cache_len;
struct pbat* cache;
u8* full;
};
size_t
pbatcache_size(void)
{
return sizeof(struct pbatcache);
}
bool
pbatcache_ctr(struct pbatcache* pc,
struct compress_params* kparams, struct compress_stats* kstats,
u32 cache_pages)
{
struct pbat* cache;
u32 cache_len;
u32 n;
memset(pc, 0, sizeof(struct pbatcache));
/* pbatcache gets 1/32 of cache_pages */
cache_len = (cache_pages * 1 / 32);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;
}
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
cache = kzalloc(cache_len * sizeof(struct pbat), GFP_KERNEL);
if (!cache) {
return false;
}
mutex_init(&pc->cache_lock);
INIT_LIST_HEAD(&pc->cache_head);
pc->cache_len = cache_len;
pc->cache = cache;
for (n = 0; n < cache_len; ++n) {
if (!pbat_ctr(&cache[n], kparams, kstats)) {
return false;
}
list_add_tail(&cache[n].list, &pc->cache_head);
}
pc->full = kzalloc(DIV_ROUND_UP(kparams->params.nr_zones, BITS_PER_BYTE), GFP_KERNEL);
return true;
}
void
pbatcache_dtr(struct pbatcache* pc)
{
unsigned int n;
struct pbat* pbat;
kfree(pc->full);
for (n = 0; n < pc->cache_len; ++n) {
pbat = &pc->cache[n];
if (!pbat) {
continue;
}
pbat_dtr(pbat);
if (pbat->ref) {
printk(KERN_ERR "%s: pbat ref leak: n=%u ref=%u\n", __func__, n, pbat->ref);
}
}
kfree(pc->cache);
pc->cache = NULL;
pc->cache_len = 0;
INIT_LIST_HEAD(&pc->cache_head);
}
struct pbat*
pbatcache_get(struct pbatcache* pc, u32 zone, bool avail)
{
struct pbat* pbat;
mutex_lock(&pc->cache_lock);
if (avail && cbd_bitmap_isset(pc->full, zone)) {
mutex_unlock(&pc->cache_lock);
return NULL;
}
list_for_each_entry(pbat, &pc->cache_head, list) {
mutex_lock(&pbat->reflock);
if (pbat->zone == zone) {
list_move(&pbat->list, &pc->cache_head);
mutex_unlock(&pc->cache_lock);
if (pbat->ref == 0) {
goto found;
}
++pbat->ref;
mutex_unlock(&pbat->reflock);
return pbat;
}
if (pbat->zone == ZONE_NONE) {
list_move(&pbat->list, &pc->cache_head);
mutex_unlock(&pc->cache_lock);
goto found;
}
mutex_unlock(&pbat->reflock);
}
list_for_each_entry_reverse(pbat, &pc->cache_head, list) {
mutex_lock(&pbat->reflock);
if (pbat->ref == 0 && !pbat_error(pbat)) {
list_move(&pbat->list, &pc->cache_head);
mutex_unlock(&pc->cache_lock);
goto found;
}
mutex_unlock(&pbat->reflock);
}
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
mutex_unlock(&pc->cache_lock);
return NULL;
found:
if (pbat_reset(pbat, zone) != 0) {
mutex_unlock(&pbat->reflock);
return NULL;
}
pbat->ref = 1;
mutex_unlock(&pbat->reflock);
return pbat;
}
int
pbatcache_put(struct pbatcache* pc, struct pbat* pbat)
{
int ret = 0;
if (!pbat) {
return 0;
}
mutex_lock(&pbat->reflock);
if (--pbat->ref == 0) {
ret = pbat_flush(pbat);
if (ret) {
printk(KERN_ERR "%s: pbat_flush failed\n", __func__);
}
if (pbat->full) {
cbd_bitmap_set(pc->full, pbat->zone);
}
else {
cbd_bitmap_reset(pc->full, pbat->zone);
}
}
mutex_unlock(&pbat->reflock);
return ret;
}