cbd/dm-compress/pbat.c

382 lines
8.3 KiB
C

/*
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/lz4.h>
#include <linux/dm-compress.h>
struct pbat {
u32 zone;
struct mutex reflock;
unsigned int ref;
struct mutex lock;
struct cbd_params* params;
struct page** pagev;
u8* buf;
};
static bool
pbat_ctr(struct pbat* pbat,
struct cbd_params* params)
{
u32 nr_pages = pbat_len(params);
memset(pbat, 0, sizeof(struct pbat));
pbat->zone = ZONE_NONE;
mutex_init(&pbat->reflock);
pbat->ref = 0;
mutex_init(&pbat->lock);
pbat->params = params;
pbat->pagev = kzalloc(nr_pages * sizeof(struct page*), GFP_KERNEL);
if (!pbat->pagev) {
return false;
}
if (!cbd_alloc_pagev(pbat->pagev, nr_pages)) {
return false;
}
pbat->buf = vmap(pbat->pagev, nr_pages, VM_MAP, PAGE_KERNEL);
if (!pbat->buf) {
return false;
}
return true;
}
static void
pbat_dtr(struct pbat* pbat)
{
u32 nr_pages = pbat_len(pbat->params);
u32 n;
for (n = 0; n < nr_pages; ++n) {
lock_page(pbat->pagev[n]);
}
vunmap(pbat->buf);
pbat->buf = NULL;
cbd_free_pagev(pbat->pagev, nr_pages);
kfree(pbat->pagev);
pbat->pagev = NULL;
}
static bool
pbat_error(struct pbat* pbat)
{
u32 nr_pages = pbat_len(pbat->params);
u32 n;
for (n = 0; n < nr_pages; ++n) {
if (PageError(pbat->pagev[n])) {
return true;
}
}
return false;
}
static int
pbat_flush(struct pbat* pbat)
{
int ret = 0;
u32 nr_pages = pbat_len(pbat->params);
u32 n;
u64 pblk;
mutex_lock(&pbat->lock);
if (!PageDirty(pbat->pagev[0])) {
goto unlock;
}
if (pbat_error(pbat)) {
ret = -EIO;
goto unlock;
}
pblk = pbat_off(pbat->params, pbat->zone);
pblk_write(pbat->params->priv, pblk, nr_pages, pbat->pagev);
mutex_unlock(&pbat->lock);
return ret;
unlock:
for (n = 0; n < nr_pages; ++n) {
unlock_page(pbat->pagev[n]);
}
mutex_unlock(&pbat->lock);
return ret;
}
static int
pbat_read(struct pbat* pbat)
{
int ret = 0;
u32 nr_pages = pbat_len(pbat->params);
u64 pblk;
/* XXX: can't happen because pbatcache will not use a page with an error */
if (PageError(pbat->pagev[0])) {
return -EIO;
}
pblk = pbat_off(pbat->params, pbat->zone);
ret = pblk_read_wait(pbat->params->priv, pblk, nr_pages, pbat->pagev);
return ret;
}
static int
pbat_reset(struct pbat* pbat, u32 zone)
{
int ret = 0;
u32 nr_pages = pbat_len(pbat->params);
u32 n;
for (n = 0; n < nr_pages; ++n) {
lock_page(pbat->pagev[n]);
}
if (pbat->zone != zone) {
pbat->zone = zone;
ret = pbat_read(pbat);
}
if (ret) {
for (n = 0; n < nr_pages; ++n) {
unlock_page(pbat->pagev[n]);
}
pbat->zone = ZONE_NONE;
}
return ret;
}
u32
pbat_zone(struct pbat* pbat)
{
return pbat->zone;
}
u64
pbat_alloc(struct pbat* pbat)
{
u32 pblk_count = pbat_len(pbat->params) * PBLK_SIZE_BITS;
u32 idx;
u64 pblk;
mutex_lock(&pbat->lock);
idx = cbd_bitmap_alloc(pbat->buf, pblk_count);
if (idx == pblk_count) {
pblk = PBLK_NONE;
goto out;
}
pblk = idx + zone_data_off(pbat->params, pbat->zone);
SetPageDirty(pbat->pagev[0]);
out:
mutex_unlock(&pbat->lock);
return pblk;
}
int
pbat_free(struct pbat* pbat, u64 pblk)
{
u32 pblk_count = pbat_len(pbat->params) * PBLK_SIZE_BITS;
u32 zone;
u32 idx;
zone = zone_for_pblk(pbat->params, pblk);
BUG_ON(zone != pbat->zone);
if (pblk < zone_data_off(pbat->params, zone)) {
printk(KERN_ERR "%s: pblk in metadata\n", __func__);
return -EINVAL;
}
idx = pblk - zone_data_off(pbat->params, zone);
BUG_ON(idx >= pblk_count);
mutex_lock(&pbat->lock);
cbd_bitmap_free(pbat->buf, idx);
SetPageDirty(pbat->pagev[0]);
mutex_unlock(&pbat->lock);
return 0;
}
struct pbatcache {
struct mutex lock;
struct cbd_params* params;
unsigned int len;
struct pbat** cache;
};
size_t
pbatcache_size(void)
{
return sizeof(struct pbatcache);
}
static bool
pbatcache_realloc(struct pbatcache* pc, unsigned int len)
{
struct pbat** cache;
unsigned int n;
struct pbat* pbat;
cache = kzalloc(len * sizeof(struct pbat*), GFP_KERNEL);
if (!cache) {
return false;
}
n = 0;
if (pc->len) {
memcpy(cache, pc->cache, pc->len * sizeof(struct pbat*));
n = pc->len;
kfree(pc->cache);
}
pc->len = len;
pc->cache = cache;
while (n < len) {
pbat = kmalloc(sizeof(struct pbat), GFP_KERNEL);
if (!pbat) {
return false;
}
cache[n++] = pbat;
if (!pbat_ctr(pbat, pc->params)) {
return false;
}
}
return true;
}
bool
pbatcache_ctr(struct pbatcache* pc,
struct cbd_params* params)
{
memset(pc, 0, sizeof(struct pbatcache));
mutex_init(&pc->lock);
pc->params = params;
return pbatcache_realloc(pc, 1);
}
void
pbatcache_dtr(struct pbatcache* pc)
{
unsigned int n;
struct pbat* pbat;
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
if (!pbat) {
continue;
}
pbat_dtr(pbat);
if (pbat->ref) {
printk(KERN_ERR "%s: pbat ref leak: n=%u ref=%u\n", __func__, n, pbat->ref);
}
kfree(pbat);
}
kfree(pc->cache);
pc->cache = NULL;
pc->len = 0;
pc->params = NULL;
}
struct pbat*
pbatcache_get(struct pbatcache* pc, u32 zone)
{
unsigned int n;
struct pbat* pbat;
mutex_lock(&pc->lock);
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
if (pbat->zone == zone) {
if (pbat->ref == 0) {
goto found;
}
mutex_unlock(&pc->lock);
++pbat->ref;
mutex_unlock(&pbat->reflock);
return pbat;
}
mutex_unlock(&pbat->reflock);
}
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
if (pbat->zone == ZONE_NONE) {
goto found;
}
mutex_unlock(&pbat->reflock);
}
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
if (pbat->ref == 0 && !pbat_error(pbat)) {
goto found;
}
mutex_unlock(&pbat->reflock);
}
n = pc->len;
if (!pbatcache_realloc(pc, pc->len * 2)) {
printk(KERN_ERR "%s: realloc failed\n", __func__);
mutex_unlock(&pc->lock);
return NULL;
}
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
found:
mutex_unlock(&pc->lock);
if (pbat_reset(pbat, zone) != 0) {
mutex_unlock(&pbat->reflock);
return NULL;
}
pbat->ref = 1;
mutex_unlock(&pbat->reflock);
return pbat;
}
int
pbatcache_put(struct pbatcache* pc, struct pbat* pbat)
{
int ret = 0;
if (!pbat) {
return 0;
}
mutex_lock(&pbat->reflock);
if (--pbat->ref == 0) {
ret = pbat_flush(pbat);
if (ret) {
printk(KERN_ERR "%s: pbat_flush failed\n", __func__);
}
}
mutex_unlock(&pbat->reflock);
return ret;
}