cbd/dm-compress/pbat.c

371 lines
8.3 KiB
C
Raw Normal View History

/*
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/lz4.h>
#include <linux/dm-compress.h>
struct pbat {
u32 zone;
struct mutex reflock;
unsigned int ref;
struct mutex lock;
enum cache_state state;
struct cbd_params* params;
struct page* pages;
u8* buf;
};
bool
pbat_ctr(struct pbat* pbat,
struct cbd_params* params)
{
memset(pbat, 0, sizeof(struct pbat));
pbat->zone = ZONE_NONE;
mutex_init(&pbat->reflock);
pbat->ref = 0;
mutex_init(&pbat->lock);
pbat->state = CACHE_STATE_UNCACHED;
pbat->params = params;
pbat->pages = cbd_alloc_pages(pbat_len(params));
if (!pbat->pages) {
printk(KERN_ERR "%s: Failed to alloc pbat_buf\n", __func__);
return false;
}
pbat->buf = page_address(pbat->pages);
return true;
}
void
pbat_dtr(struct pbat* pbat)
{
pbat->buf = NULL;
cbd_free_pages(pbat->pages, pbat_len(pbat->params));
pbat->pages = NULL;
}
static void
pbat_flush_endio(struct bio* bio)
{
int ret;
unsigned int n;
for (n = 0; n < bio->bi_max_vecs; ++n) {
cbd_free_page(bio->bi_io_vec[0].bv_page);
}
ret = pblk_endio(bio);
if (ret) {
/*
* XXX:
* Set dm_compress.io_error?
* Set pbat.io_error?
* Set pbat.zone = ZONE_ERR?
*/
printk(KERN_ERR "%s: XXX: I/O failed\n", __func__);
}
}
int
pbat_flush(struct pbat* pbat)
{
int ret = 0;
u32 count = pbat_len(pbat->params);
struct page* iopagev[count];
u64 pblk;
u32 n;
u8* iobuf;
mutex_lock(&pbat->lock);
if (pbat->state != CACHE_STATE_DIRTY) {
goto out;
}
pblk = pbat_off(pbat->params, pbat->zone);
if (!cbd_alloc_pagev(iopagev, count)) {
printk(KERN_ERR "%s: out of memory\n", __func__);
ret = -ENOMEM;
goto out;
}
for (n = 0; n < count; ++n) {
iobuf = page_address(iopagev[n]);
memcpy(iobuf, pbat->buf + n * PBLK_SIZE, PBLK_SIZE);
}
pblk_write(pbat->params, pblk, count, iopagev, pbat_flush_endio, pbat);
pbat->state = CACHE_STATE_CLEAN;
out:
mutex_unlock(&pbat->lock);
return ret;
}
int
pbat_read(struct pbat* pbat)
{
int ret = 0;
u32 count = pbat_len(pbat->params);
struct page* pagev[count];
u64 pblk;
u32 n;
ret = pbat_flush(pbat);
if (ret) {
return ret;
}
mutex_lock(&pbat->lock);
if (pbat->state == CACHE_STATE_CLEAN) {
goto out;
}
pblk = pbat_off(pbat->params, pbat->zone);
for (n = 0; n < count; ++n) {
pagev[n] = virt_to_page(pbat->buf + n * PBLK_SIZE);
}
ret = pblk_read_wait(pbat->params, pblk, count, pagev);
if (ret) {
goto out;
}
pbat->state = CACHE_STATE_CLEAN;
out:
mutex_unlock(&pbat->lock);
return ret;
}
void
pbat_reset(struct pbat* pbat, u32 zone)
{
BUG_ON(pbat->zone == zone);
pbat->zone = zone;
pbat->state = CACHE_STATE_UNCACHED;
}
u32
pbat_zone(struct pbat* pbat)
{
return pbat->zone;
}
u64
pbat_alloc(struct pbat* pbat)
{
u32 pblk_count = pbat_len(pbat->params) * PBLK_SIZE_BITS;
u64 idx;
mutex_lock(&pbat->lock);
idx = cbd_bitmap_alloc(pbat->buf, pblk_count);
if (idx == pblk_count) {
idx = PBLK_NONE;
goto out;
}
pbat->state = CACHE_STATE_DIRTY;
out:
mutex_unlock(&pbat->lock);
return idx + zone_data_off(pbat->params, pbat->zone);
}
int
pbat_free(struct pbat* pbat, u64 pblk)
{
u32 zone_pblk_count = pbat_len(pbat->params) * PBLK_SIZE_BITS;
u32 zone;
u32 idx;
BUG_ON(pblk < CBD_HEADER_BLOCKS);
zone = (pblk - CBD_HEADER_BLOCKS) / zone_len(pbat->params);
BUG_ON(zone != pbat->zone);
if (pblk < zone_data_off(pbat->params, zone)) {
printk(KERN_ERR "%s: pblk in metadata\n", __func__);
return -EINVAL;
}
idx = pblk - zone_data_off(pbat->params, zone);
BUG_ON(idx >= zone_pblk_count);
mutex_lock(&pbat->lock);
cbd_bitmap_free(pbat->buf, idx);
pbat->state = CACHE_STATE_DIRTY;
mutex_unlock(&pbat->lock);
return 0;
}
struct pbatcache {
struct mutex lock;
struct cbd_params* params;
unsigned int len;
struct pbat** cache;
};
size_t
pbatcache_size(void)
{
return sizeof(struct pbatcache);
}
static bool
pbatcache_realloc(struct pbatcache* pc, unsigned int len)
{
struct pbat** cache;
unsigned int n;
struct pbat* pbat;
cache = kzalloc(len * sizeof(struct pbat*), GFP_KERNEL);
if (!cache) {
return false;
}
n = 0;
if (pc->len) {
memcpy(cache, pc->cache, pc->len * sizeof(struct pbat*));
n = pc->len;
kfree(pc->cache);
}
pc->len = len;
pc->cache = cache;
while (n < len) {
pbat = kmalloc(sizeof(struct pbat), GFP_KERNEL);
if (!pbat) {
return false;
}
cache[n++] = pbat;
if (!pbat_ctr(pbat, pc->params)) {
return false;
}
}
return true;
}
bool
pbatcache_ctr(struct pbatcache* pc,
struct cbd_params* params)
{
memset(pc, 0, sizeof(struct pbatcache));
mutex_init(&pc->lock);
pc->params = params;
return pbatcache_realloc(pc, 1);
}
void
pbatcache_dtr(struct pbatcache* pc)
{
unsigned int n;
struct pbat* pbat;
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
if (!pbat) {
continue;
}
pbat_dtr(pbat);
if (pbat->ref) {
printk(KERN_ERR "%s: pbat ref leak: n=%u ref=%u\n", __func__, n, pbat->ref);
}
kfree(pbat);
}
kfree(pc->cache);
pc->cache = NULL;
pc->len = 0;
pc->params = NULL;
}
struct pbat*
pbatcache_get(struct pbatcache* pc, u32 zone)
{
unsigned int n;
struct pbat* pbat;
mutex_lock(&pc->lock);
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
if (pbat->zone == zone) {
++pbat->ref;
mutex_unlock(&pbat->reflock);
goto out;
}
mutex_unlock(&pbat->reflock);
}
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
if (pbat->zone == ZONE_NONE) {
goto found;
}
mutex_unlock(&pbat->reflock);
}
for (n = 0; n < pc->len; ++n) {
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
if (pbat->ref == 0) {
goto found;
}
mutex_unlock(&pbat->reflock);
}
printk(KERN_INFO "%s: all objects in use, realloc...\n", __func__);
n = pc->len;
if (!pbatcache_realloc(pc, pc->len * 2)) {
printk(KERN_ERR "%s: realloc failed\n", __func__);
pbat = NULL;
goto out;
}
pbat = pc->cache[n];
mutex_lock(&pbat->reflock);
found:
pbat_reset(pbat, zone);
pbat->ref = 1;
mutex_unlock(&pbat->reflock);
out:
mutex_unlock(&pc->lock);
return pbat;
}
int
pbatcache_put(struct pbatcache* pc, struct pbat* pbat)
{
int ret = 0;
if (!pbat) {
return 0;
}
mutex_lock(&pc->lock);
mutex_lock(&pbat->reflock);
if (--pbat->ref == 0) {
ret = pbat_flush(pbat);
if (ret) {
printk(KERN_ERR "%s: pbat_flush failed\n", __func__);
}
}
mutex_unlock(&pbat->reflock);
mutex_unlock(&pc->lock);
return ret;
}