cbd/dm-compress/lbatpblk.c

303 lines
6.9 KiB
C

/*
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/lz4.h>
#include <linux/dm-compress.h>
struct lbatpblk {
struct list_head list;
u64 pblk;
struct mutex reflock;
unsigned int ref;
struct mutex lock;
struct compress_params* kparams;
struct compress_stats* kstats;
struct page* page;
u8* buf;
bool dirty;
};
static bool
lbatpblk_ctr(struct lbatpblk* lp,
struct compress_params* kparams,
struct compress_stats* kstats)
{
memset(lp, 0, sizeof(struct lbatpblk));
INIT_LIST_HEAD(&lp->list);
lp->pblk = PBLK_NONE;
mutex_init(&lp->reflock);
lp->ref = 0;
mutex_init(&lp->lock);
lp->kparams = kparams;
lp->kstats = kstats;
lp->page = cbd_alloc_page();
if (!lp->page) {
return false;
}
lp->buf = page_address(lp->page);
lp->dirty = false;
return true;
}
static void
lbatpblk_dtr(struct lbatpblk* lp)
{
if (lp->page) {
lock_page(lp->page);
}
lp->buf = NULL;
cbd_free_page(lp->page);
lp->page = NULL;
}
static bool
lbatpblk_error(struct lbatpblk* lp)
{
return PageError(lp->page);
}
static int
lbatpblk_flush(struct lbatpblk* lp)
{
int ret = 0;
mutex_lock(&lp->lock);
if (!PageDirty(lp->page)) {
goto unlock;
}
if (lbatpblk_error(lp)) {
ret = -EIO;
goto unlock;
}
pblk_write(lp->kparams, lp->pblk, 1, lp->page);
mutex_unlock(&lp->lock);
mutex_lock(&lp->kstats->lock);
++lp->kstats->lbatpblk_w;
mutex_unlock(&lp->kstats->lock);
return ret;
unlock:
unlock_page(lp->page);
mutex_unlock(&lp->lock);
return ret;
}
static int
lbatpblk_read(struct lbatpblk* lp)
{
int ret = 0;
BUG_ON(lbatpblk_error(lp));
ret = pblk_read_wait(lp->kparams, lp->pblk, 1, lp->page);
mutex_lock(&lp->kstats->lock);
++lp->kstats->lbatpblk_r;
mutex_unlock(&lp->kstats->lock);
return ret;
}
static int
lbatpblk_reset(struct lbatpblk* lp, u64 pblk)
{
int ret = 0;
lock_page(lp->page);
if (lp->pblk != pblk) {
lp->pblk = pblk;
ret = lbatpblk_read(lp);
}
if (ret) {
unlock_page(lp->page);
lp->pblk = PBLK_NONE;
}
return ret;
}
u8*
lbatpblk_get_buf(struct lbatpblk* lp, bool rw)
{
mutex_lock(&lp->lock);
if (rw) {
SetPageDirty(lp->page);
}
return lp->buf;
}
void
lbatpblk_put_buf(struct lbatpblk* lp)
{
mutex_unlock(&lp->lock);
}
struct lbatpblkcache {
struct mutex cache_lock;
struct list_head cache_head;
unsigned int cache_len;
struct lbatpblk* cache;
};
size_t
lbatpblkcache_size(void)
{
return sizeof(struct lbatpblkcache);
}
bool
lbatpblkcache_ctr(struct lbatpblkcache* lpc,
struct compress_params* kparams, struct compress_stats* kstats,
u32 cache_pages)
{
struct lbatpblk* cache;
u32 cache_len;
u32 n;
memset(lpc, 0, sizeof(struct lbatpblkcache));
/* lbatpblkcache gets 15/32 of cache pages */
cache_len = (cache_pages * 15 / 32);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;
}
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
cache = kzalloc(cache_len * sizeof(struct lbatpblk), GFP_KERNEL);
if (!cache) {
return false;
}
mutex_init(&lpc->cache_lock);
INIT_LIST_HEAD(&lpc->cache_head);
lpc->cache_len = cache_len;
lpc->cache = cache;
for (n = 0; n < cache_len; ++n) {
if (!lbatpblk_ctr(&cache[n], kparams, kstats)) {
return false;
}
list_add_tail(&cache[n].list, &lpc->cache_head);
}
return true;
}
void
lbatpblkcache_dtr(struct lbatpblkcache* lpc)
{
unsigned int n;
struct lbatpblk* lp;
for (n = 0; n < lpc->cache_len; ++n) {
lp = &lpc->cache[n];
if (!lp) {
continue;
}
lbatpblk_dtr(lp);
if (lp->ref) {
printk(KERN_ERR "%s: lbatpblk ref leak: n=%u ref=%u\n", __func__, n, lp->ref);
}
}
kfree(lpc->cache);
lpc->cache = NULL;
lpc->cache_len = 0;
INIT_LIST_HEAD(&lpc->cache_head);
}
struct lbatpblk*
lbatpblkcache_get(struct lbatpblkcache* lpc, u64 pblk)
{
struct lbatpblk* lp;
mutex_lock(&lpc->cache_lock);
list_for_each_entry(lp, &lpc->cache_head, list) {
mutex_lock(&lp->reflock);
if (lp->pblk == pblk) {
list_move(&lp->list, &lpc->cache_head);
mutex_unlock(&lpc->cache_lock);
if (lp->ref == 0) {
goto found;
}
++lp->ref;
mutex_unlock(&lp->reflock);
return lp;
}
if (lp->pblk == PBLK_NONE) {
list_move(&lp->list, &lpc->cache_head);
mutex_unlock(&lpc->cache_lock);
goto found;
}
mutex_unlock(&lp->reflock);
}
list_for_each_entry_reverse(lp, &lpc->cache_head, list) {
mutex_lock(&lp->reflock);
if (lp->ref == 0 && !lbatpblk_error(lp)) {
list_move(&lp->list, &lpc->cache_head);
mutex_unlock(&lpc->cache_lock);
goto found;
}
mutex_unlock(&lp->reflock);
}
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
mutex_unlock(&lpc->cache_lock);
return NULL;
found:
if (lbatpblk_reset(lp, pblk) != 0) {
mutex_unlock(&lp->reflock);
return NULL;
}
lp->ref = 1;
mutex_unlock(&lp->reflock);
return lp;
}
int
lbatpblkcache_put(struct lbatpblkcache* lpc, struct lbatpblk* lp)
{
int ret = 0;
if (!lp) {
return 0;
}
mutex_lock(&lp->reflock);
if (--lp->ref == 0) {
ret = lbatpblk_flush(lp);
if (ret) {
printk(KERN_ERR "%s: lbatpblk_flush failed\n", __func__);
}
}
mutex_unlock(&lp->reflock);
return ret;
}