cbd/dm-compress/lbd.c

896 lines
24 KiB
C

/*
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/lz4.h>
#include <linux/zlib.h>
#include <linux/dm-compress.h>
struct lbd {
struct list_head lru_list;
struct list_head flush_list;
unsigned long flush_jiffies;
u64 lblk;
struct mutex reflock;
unsigned int ref;
struct mutex lock;
struct compress_params* kparams;
struct compress_stats* kstats;
struct lbatviewcache* lvc;
struct lbatview* lv;
void* percpu;
struct page** pagev;
u8* buf;
u32 c_len;
};
static inline bool
lblk_is_zeros(struct cbd_params* params, struct lbd* lbd)
{
u32 off;
u32 len = lblk_per_pblk(params) * pblk_size(params);
if (!(params->flags & CBD_FLAG_DETECT_ZEROS)) {
return false;
}
for (off = 0; off < len; ++off) {
if (lbd->buf[off]) {
return false;
}
}
return true;
}
struct lblk_compress_state {
u8* buf;
#ifdef COMPRESS_HAVE_LZ4
u8* lz4_workmem;
#endif
#ifdef COMPRESS_HAVE_ZLIB
z_stream zlib_cstream;
z_stream zlib_dstream;
#endif
};
static struct lblk_compress_state*
lblk_get_compress_state(void* percpu, int cpu)
{
struct lblk_compress_state** statep;
statep = per_cpu_ptr(percpu, cpu);
return *statep;
}
#ifdef COMPRESS_HAVE_LZ4
static size_t
lblk_compress_lz4(struct lbd* lbd)
{
int clen;
int cpu;
struct lblk_compress_state* state;
cpu = get_cpu();
state = lblk_get_compress_state(lbd->percpu, cpu);
BUG_ON(state == NULL);
clen = LZ4_compress_fast(lbd->buf, state->buf,
lblk_size(&lbd->kparams->params),
lblk_size(&lbd->kparams->params) -
pblk_size(&lbd->kparams->params),
cbd_compression_level_get(&lbd->kparams->params),
state->lz4_workmem);
if (clen <= 0) {
put_cpu();
return 0;
}
memcpy(lbd->buf, state->buf, clen);
put_cpu();
return (size_t)clen;
}
static bool
lblk_decompress_lz4(struct lbd* lbd)
{
int ret;
int cpu;
struct lblk_compress_state* state;
u32 dlen = lblk_size(&lbd->kparams->params);
cpu = get_cpu();
state = lblk_get_compress_state(lbd->percpu, cpu);
BUG_ON(state == NULL);
ret = LZ4_decompress_safe(lbd->buf,
state->buf,
lbd->c_len,
dlen);
if (ret != dlen) {
put_cpu();
return false;
}
memcpy(lbd->buf, state->buf, dlen);
put_cpu();
return true;
}
#endif
#ifdef COMPRESS_HAVE_ZLIB
static size_t
lblk_compress_zlib(struct lbd* lbd)
{
int ret;
int clen;
int cpu;
struct lblk_compress_state* state;
z_stream* stream;
cpu = get_cpu();
state = lblk_get_compress_state(lbd->percpu, cpu);
BUG_ON(state == NULL);
stream = &state->zlib_cstream;
ret = zlib_deflateReset(stream);
BUG_ON(ret != Z_OK);
stream->next_in = lbd->buf;
stream->avail_in = lblk_size(&lbd->kparams->params);
stream->next_out = state->buf;
stream->avail_out = lblk_size(&lbd->kparams->params) -
pblk_size(&lbd->kparams->params);
ret = zlib_deflate(stream, Z_FINISH);
if (ret != Z_STREAM_END) {
put_cpu();
return 0;
}
clen = stream->total_out;
memcpy(lbd->buf, state->buf, clen);
put_cpu();
return (size_t)clen;
}
static bool
lblk_decompress_zlib(struct lbd* lbd)
{
int ret;
int cpu;
struct lblk_compress_state* state;
z_stream* stream;
u32 dlen = lblk_size(&lbd->kparams->params);
cpu = get_cpu();
state = lblk_get_compress_state(lbd->percpu, cpu);
BUG_ON(state == NULL);
stream = &state->zlib_dstream;
ret = zlib_inflateReset(stream);
BUG_ON(ret != Z_OK);
stream->next_in = lbd->buf;
stream->avail_in = lbd->c_len;
stream->next_out = state->buf;
stream->avail_out = dlen;
ret = zlib_inflate(stream, Z_SYNC_FLUSH);
/* See xxx */
if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
u8 zerostuff = 0;
stream->next_in = &zerostuff;
stream->avail_in = 1;
ret = zlib_inflate(stream, Z_FINISH);
}
if (ret != Z_STREAM_END || stream->total_out != dlen) {
put_cpu();
return false;
}
memcpy(lbd->buf, state->buf, dlen);
put_cpu();
return true;
}
#endif
/*
* Compress dc->lblk into dc->lz4_cbuf
*
* Returns number of bytes in cbuf or 0 for failure.
*/
static size_t
lblk_compress(struct lbd* lbd)
{
#ifdef COMPRESS_HAVE_LZ4
if (cbd_compression_alg_get(&lbd->kparams->params) == CBD_ALG_LZ4) {
return lblk_compress_lz4(lbd);
}
#endif
#ifdef COMPRESS_HAVE_ZLIB
if (cbd_compression_alg_get(&lbd->kparams->params) == CBD_ALG_ZLIB) {
return lblk_compress_zlib(lbd);
}
#endif
return 0;
}
/*
* Decompress dc->lz4_cbuf of size clen into dc->lblk
*/
static bool
lblk_decompress(struct lbd* lbd)
{
#ifdef COMPRESS_HAVE_LZ4
if (cbd_compression_alg_get(&lbd->kparams->params) == CBD_ALG_LZ4) {
return lblk_decompress_lz4(lbd);
}
#endif
#ifdef COMPRESS_HAVE_ZLIB
if (cbd_compression_alg_get(&lbd->kparams->params) == CBD_ALG_ZLIB) {
return lblk_decompress_zlib(lbd);
}
#endif
return false;
}
static bool
lbd_ctr(struct lbd* lbd,
struct compress_params* kparams,
struct compress_stats* kstats,
struct lbatviewcache* lvc,
void* percpu)
{
u32 nr_pages = DIV_ROUND_UP(lblk_size(&kparams->params), PAGE_SIZE);
memset(lbd, 0, sizeof(struct lbd));
INIT_LIST_HEAD(&lbd->lru_list);
INIT_LIST_HEAD(&lbd->flush_list);
lbd->flush_jiffies = 0;
lbd->lblk = LBLK_NONE;
mutex_init(&lbd->reflock);
lbd->ref = 0;
mutex_init(&lbd->lock);
lbd->kparams = kparams;
lbd->kstats = kstats;
lbd->lvc = lvc;
lbd->lv = NULL;
lbd->percpu = percpu;
lbd->pagev = kzalloc(nr_pages * sizeof(struct page*), GFP_KERNEL);
if (!lbd->pagev) {
return false;
}
if (!cbd_alloc_pagev(lbd->pagev, nr_pages)) {
return false;
}
if (nr_pages == 1) {
lbd->buf = page_address(lbd->pagev[0]);
}
else {
lbd->buf = vmap(lbd->pagev, nr_pages, VM_MAP, PAGE_KERNEL);
if (!lbd->buf) {
return false;
}
}
lbd->c_len = 0;
return true;
}
static void
lbd_dtr(struct lbd* lbd)
{
u32 nr_pages = DIV_ROUND_UP(lblk_size(&lbd->kparams->params), PAGE_SIZE);
u32 n;
if (lbd->pagev) {
for (n = 0; n < nr_pages; ++n) {
if (lbd->pagev[n]) {
lock_page(lbd->pagev[n]);
}
}
}
if (lbatviewcache_put(lbd->lvc, lbd->lv) != 0) {
printk(KERN_ERR "%s: lbatviewcache_put failed\n", __func__);
}
lbd->c_len = 0;
if (nr_pages != 1) {
vunmap(lbd->buf);
}
lbd->buf = NULL;
if (lbd->pagev) {
cbd_free_pagev(lbd->pagev, nr_pages);
kfree(lbd->pagev);
lbd->pagev = NULL;
}
lbd->percpu = NULL;
lbd->lv = NULL;
lbd->lvc = NULL;
}
static bool
lbd_error(struct lbd* lbd)
{
return PageError(lbd->pagev[0]);
}
static int
lbd_flush(struct lbd* lbd)
{
int ret = 0;
u32 nr_pages = DIV_ROUND_UP(lblk_size(&lbd->kparams->params), PAGE_SIZE);
u32 pblk_per_page = PAGE_SIZE / pblk_size(&lbd->kparams->params);
int err;
u32 nr_pblk;
u32 pblk_idx;
u32 pg_idx;
u64 pblkv[PBLK_IOV_MAX];
u32 iov_len;
u32 n;
mutex_lock(&lbd->lock);
if (!PageDirty(lbd->pagev[0])) {
goto unlock;
}
if (lbd_error(lbd)) {
ret = -EIO;
goto unlock;
}
if (lblk_is_zeros(&lbd->kparams->params, lbd)) {
lbd->c_len = CBD_UNCOMPRESSED;
ret = lbatview_elem_realloc(lbd->lv, lbd->lblk, 0);
goto unlock;
}
lbd->c_len = lblk_compress(lbd);
if (lbd->c_len > 0) {
u32 c_blkrem = lbd->c_len % pblk_size(&lbd->kparams->params);
if (c_blkrem) {
memset(lbd->buf + lbd->c_len, 0, c_blkrem);
}
nr_pblk = DIV_ROUND_UP(lbd->c_len, pblk_size(&lbd->kparams->params));
}
else {
lbd->c_len = CBD_UNCOMPRESSED;
nr_pblk = lblk_per_pblk(&lbd->kparams->params);
}
ret = lbatview_elem_realloc(lbd->lv, lbd->lblk, lbd->c_len);
if (ret) {
lbd->kparams->params.flags |= CBD_FLAG_ERROR;
goto unlock;
}
for (pblk_idx = 0, pg_idx = 0; pblk_idx < nr_pblk; ++pg_idx) {
iov_len = min(nr_pblk - pblk_idx, pblk_per_page);
for (n = 0; n < iov_len; ++n) {
pblkv[n] = lbatview_elem_pblk(lbd->lv, lbd->lblk, pblk_idx++);
BUG_ON(pblkv[n] == PBLK_NONE);
}
pblk_writev(lbd->kparams, pblkv, iov_len, lbd->pagev[pg_idx]);
}
while (pg_idx < nr_pages) {
unlock_page(lbd->pagev[pg_idx++]);
}
mutex_lock(&lbd->kstats->lock);
++lbd->kstats->lbd_w;
mutex_unlock(&lbd->kstats->lock);
goto out;
unlock:
for (n = 0; n < nr_pages; ++n) {
unlock_page(lbd->pagev[n]);
}
out:
err = lbatviewcache_put(lbd->lvc, lbd->lv);
lbd->lv = NULL;
if (err) {
ret = err;
}
mutex_unlock(&lbd->lock);
return ret;
}
static int
lbd_read(struct lbd* lbd)
{
int ret = 0;
u32 pblk_per_page = PAGE_SIZE / pblk_size(&lbd->kparams->params);
u32 nr_pblk;
u32 pblk_idx;
u32 pg_idx;
u64 pblkv[PBLK_IOV_MAX];
u32 iov_len;
u32 n;
BUG_ON(lbd_error(lbd));
lbd->c_len = lbatview_elem_len(lbd->lv, lbd->lblk);
if (lbd->c_len == 0) {
memset(lbd->buf, 0, lblk_size(&lbd->kparams->params));
}
else {
nr_pblk = (lbd->c_len == CBD_UNCOMPRESSED) ?
lblk_per_pblk(&lbd->kparams->params) :
DIV_ROUND_UP(lbd->c_len, pblk_size(&lbd->kparams->params));
if (nr_pblk > lblk_per_pblk(&lbd->kparams->params)) {
printk(KERN_ERR "%s: elem len %u out of range at lblk=%lu\n",
__func__, lbd->c_len, (unsigned long)lbd->lblk);
ret = -EIO;
goto out;
}
for (pblk_idx = 0, pg_idx = 0; pblk_idx < nr_pblk; ++pg_idx) {
iov_len = min(nr_pblk - pblk_idx, pblk_per_page);
for (n = 0; n < iov_len; ++n) {
pblkv[n] = lbatview_elem_pblk(lbd->lv, lbd->lblk, pblk_idx++);
if (pblkv[n] == PBLK_NONE) {
printk(KERN_ERR "%s: bad pblk\n", __func__);
ret = -EIO;
goto out;
}
}
ret = pblk_readv_wait(lbd->kparams, pblkv, iov_len, lbd->pagev[pg_idx]);
if (ret) {
goto out;
}
}
if (lbd->c_len != CBD_UNCOMPRESSED) {
if (!lblk_decompress(lbd)) {
printk(KERN_ERR "%s: decompress failed\n", __func__);
ret = -EIO;
goto out;
}
}
}
lbd->c_len = CBD_UNCOMPRESSED;
mutex_lock(&lbd->kstats->lock);
++lbd->kstats->lbd_r;
mutex_unlock(&lbd->kstats->lock);
out:
return ret;
}
static int
lbd_reset(struct lbd* lbd, u64 lblk)
{
int ret = 0;
u32 nr_pages = DIV_ROUND_UP(lblk_size(&lbd->kparams->params), PAGE_SIZE);
u32 n;
if (lbd->lv) { printk(KERN_ERR "%s: lbatview leak\n", __func__); }
for (n = 0; n < nr_pages; ++n) {
lock_page(lbd->pagev[n]);
}
lbd->lv = lbatviewcache_get(lbd->lvc, lblk);
if (!lbd->lv) {
printk(KERN_ERR "%s: lbatviewcache_get failed\n", __func__);
ret = -EIO;
goto out;
}
if (lbd->lblk != lblk) {
lbd->lblk = lblk;
ret = lbd_read(lbd);
if (ret) {
printk(KERN_ERR "%s: lbd_read failed\n", __func__);
}
}
else {
if (lbd->c_len != CBD_UNCOMPRESSED) {
if (!lblk_decompress(lbd)) {
printk(KERN_ERR "%s: lblk_decompress failed\n", __func__);
ret = -EIO;
}
lbd->c_len = CBD_UNCOMPRESSED;
}
}
out:
if (ret) {
lbatviewcache_put(lbd->lvc, lbd->lv);
lbd->lv = NULL;
for (n = 0; n < nr_pages; ++n) {
unlock_page(lbd->pagev[n]);
}
lbd->lblk = LBLK_NONE;
}
return ret;
}
u64
lbd_lblk(struct lbd* lbd)
{
return lbd->lblk;
}
void
lbd_data_read(struct lbd* lbd, u32 off, u32 len, u8* buf)
{
BUG_ON(off + len > lblk_size(&lbd->kparams->params));
mutex_lock(&lbd->lock);
memcpy(buf, lbd->buf + off, len);
mutex_unlock(&lbd->lock);
}
void
lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf)
{
BUG_ON(off + len > lblk_size(&lbd->kparams->params));
mutex_lock(&lbd->lock);
memcpy(lbd->buf + off, buf, len);
SetPageDirty(lbd->pagev[0]);
mutex_unlock(&lbd->lock);
}
struct lbdcache
{
struct cbd_params* params;
bool sync;
void* percpu;
struct lbatviewcache* lvc;
struct mutex cache_lock;
struct list_head cache_head;
unsigned int cache_len;
struct lbd* cache;
struct mutex flush_lock;
struct delayed_work flush_dwork;
struct list_head flush_head;
};
static void lbdcache_flush(struct work_struct*);
size_t
lbdcache_size(void)
{
return sizeof(struct lbdcache);
}
static bool
lbdcache_alloc_compress_state(void* percpu, const struct cbd_params* params, int cpu)
{
struct lblk_compress_state* state;
struct lblk_compress_state** statep;
size_t workmem_len;
#ifdef COMPRESS_HAVE_ZLIB
int ret;
#endif
state = kzalloc(sizeof(struct lblk_compress_state), GFP_NOWAIT);
if (!state) {
printk(KERN_ERR "%s: failed to alloc state\n", __func__);
return false;
}
statep = per_cpu_ptr(percpu, cpu);
*statep = state;
state->buf = vmalloc(lblk_size(params));
if (!state->buf) {
return false;
}
#ifdef COMPRESS_HAVE_LZ4
workmem_len = LZ4_MEM_COMPRESS;
state->lz4_workmem = vzalloc(workmem_len);
if (!state->lz4_workmem) {
return false;
}
#endif
#ifdef COMPRESS_HAVE_ZLIB
workmem_len = zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL);
state->zlib_cstream.workspace = vzalloc(workmem_len);
if (!state->zlib_cstream.workspace) {
return false;
}
ret = zlib_deflateInit2(&state->zlib_cstream,
cbd_compression_level_get(params),
Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
Z_DEFAULT_STRATEGY);
BUG_ON(ret != Z_OK);
workmem_len = zlib_inflate_workspacesize();
state->zlib_dstream.workspace = vzalloc(workmem_len);
if (!state->zlib_dstream.workspace) {
return false;
}
ret = zlib_inflateInit2(&state->zlib_dstream, DEF_WBITS);
BUG_ON(ret != Z_OK);
#endif
return true;
}
static void
lbdcache_free_compress_state(void* percpu, const struct cbd_params* params, int cpu)
{
struct lblk_compress_state** statep;
struct lblk_compress_state* state;
statep = per_cpu_ptr(percpu, cpu);
state = *statep;
if (!state) {
return;
}
#ifdef COMPRESS_HAVE_ZLIB
vfree(state->zlib_dstream.workspace);
vfree(state->zlib_cstream.workspace);
#endif
#ifdef COMPRESS_HAVE_LZ4
vfree(state->lz4_workmem);
#endif
vfree(state->buf);
kfree(state);
}
bool
lbdcache_ctr(struct lbdcache* lc,
struct compress_params* kparams, struct compress_stats* kstats,
u32 cache_pages, bool sync)
{
int cpu;
struct lbd* cache;
u32 cache_len;
u32 n;
memset(lc, 0, sizeof(struct lbdcache));
lc->params = &kparams->params;
lc->sync = sync;
lc->percpu = alloc_percpu(void*);
if (!lc->percpu) {
printk(KERN_ERR "%s: Out of memory\n", __func__);
return false;
}
for (cpu = 0; cpu < num_online_cpus(); ++cpu) {
if (!lbdcache_alloc_compress_state(lc->percpu, lc->params, cpu)) {
return false;
}
}
lc->lvc = kzalloc(lbatviewcache_size(), GFP_KERNEL);
if (!lc->lvc) {
return false;
}
if (!lbatviewcache_ctr(lc->lvc, kparams, kstats, cache_pages)) {
return false;
}
/* lbdcache gets 1/2 of cache_pages */
cache_len = (cache_pages * 1 / 2) / lblk_per_pblk(lc->params);
if (!cache_len) {
printk(KERN_ERR "%s: Cache too small\n", __func__);
return false;
}
printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len);
cache = kzalloc(cache_len * sizeof(struct lbd), GFP_KERNEL);
if (!cache) {
return false;
}
mutex_init(&lc->cache_lock);
INIT_LIST_HEAD(&lc->cache_head);
lc->cache_len = cache_len;
lc->cache = cache;
for (n = 0; n < cache_len; ++n) {
if (!lbd_ctr(&cache[n], kparams, kstats, lc->lvc, lc->percpu)) {
return false;
}
list_add_tail(&cache[n].lru_list, &lc->cache_head);
}
mutex_init(&lc->flush_lock);
INIT_DELAYED_WORK(&lc->flush_dwork, lbdcache_flush);
INIT_LIST_HEAD(&lc->flush_head);
return true;
}
void
lbdcache_dtr(struct lbdcache* lc)
{
int ret;
unsigned int n;
struct lbd* lbd;
int cpu;
if (lc->flush_dwork.work.func) {
cancel_delayed_work_sync(&lc->flush_dwork);
flush_delayed_work(&lc->flush_dwork);
}
if (lc->flush_head.next) {
list_for_each_entry(lbd, &lc->flush_head, flush_list) {
ret = lbd_flush(lbd);
if (ret) {
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
}
}
}
for (n = 0; n < lc->cache_len; ++n) {
lbd = &lc->cache[n];
if (!lbd) {
continue;
}
lbd_dtr(lbd);
if (lbd->ref) {
printk(KERN_ERR "%s: lbd ref leak: n=%u ref=%u\n", __func__, n, lbd->ref);
}
}
kfree(lc->cache);
lc->cache = NULL;
lc->cache_len = 0;
if (lc->lvc) {
lbatviewcache_dtr(lc->lvc);
kfree(lc->lvc);
lc->lvc = NULL;
}
if (lc->percpu) {
for (cpu = 0; cpu < num_online_cpus(); ++cpu) {
lbdcache_free_compress_state(lc->percpu, lc->params, cpu);
}
free_percpu(lc->percpu);
lc->percpu = NULL;
}
lc->params = NULL;
}
static void
lbdcache_flush(struct work_struct* work)
{
struct lbdcache* lc = container_of(work, struct lbdcache, flush_dwork.work);
unsigned long now = jiffies;
struct list_head flushq;
int ret;
struct lbd* lbd;
INIT_LIST_HEAD(&flushq);
mutex_lock(&lc->flush_lock);
while (!list_empty(&lc->flush_head)) {
lbd = list_first_entry(&lc->flush_head, struct lbd, flush_list);
mutex_lock(&lbd->reflock);
BUG_ON(lbd->ref != 1);
if (time_after(lbd->flush_jiffies, now)) {
mutex_unlock(&lbd->reflock);
break;
}
list_del_init(&lbd->flush_list);
list_add_tail(&lbd->flush_list, &flushq);
}
if (!list_empty(&lc->flush_head)) {
schedule_delayed_work(&lc->flush_dwork, COMPRESS_FLUSH_DELAY);
}
mutex_unlock(&lc->flush_lock);
list_for_each_entry(lbd, &flushq, flush_list) {
lbd->ref = 0;
ret = lbd_flush(lbd);
if (ret) {
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
}
mutex_unlock(&lbd->reflock);
}
}
struct lbd*
lbdcache_get(struct lbdcache* lc, u64 lblk)
{
int ret;
struct lbd* lbd;
mutex_lock(&lc->flush_lock);
mutex_lock(&lc->cache_lock);
list_for_each_entry(lbd, &lc->cache_head, lru_list) {
mutex_lock(&lbd->reflock);
if (lbd->lblk == lblk) {
list_move(&lbd->lru_list, &lc->cache_head);
mutex_unlock(&lc->cache_lock);
if (lbd->ref == 0) {
mutex_unlock(&lc->flush_lock);
goto found;
}
if (lbd->ref == 1) {
if (!list_empty(&lc->flush_head)) {
struct lbd* entry;
list_for_each_entry(entry, &lc->flush_head, flush_list) {
if (entry == lbd) {
list_del_init(&lbd->flush_list);
lbd->ref = 0;
break;
}
}
}
}
mutex_unlock(&lc->flush_lock);
++lbd->ref;
mutex_unlock(&lbd->reflock);
return lbd;
}
if (lbd->lblk == LBLK_NONE) {
list_move(&lbd->lru_list, &lc->cache_head);
mutex_unlock(&lc->cache_lock);
mutex_unlock(&lc->flush_lock);
goto found;
}
mutex_unlock(&lbd->reflock);
}
list_for_each_entry_reverse(lbd, &lc->cache_head, lru_list) {
mutex_lock(&lbd->reflock);
if (lbd->ref == 0 && !lbd_error(lbd)) {
list_move(&lbd->lru_list, &lc->cache_head);
mutex_unlock(&lc->cache_lock);
mutex_unlock(&lc->flush_lock);
goto found;
}
mutex_unlock(&lbd->reflock);
}
if (list_empty(&lc->flush_head)) {
mutex_unlock(&lc->cache_lock);
mutex_unlock(&lc->flush_lock);
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
return NULL;
}
lbd = list_first_entry(&lc->flush_head, struct lbd, flush_list);
mutex_lock(&lbd->reflock);
BUG_ON(lbd->ref != 1);
list_del_init(&lbd->flush_list);
list_move(&lbd->lru_list, &lc->cache_head);
mutex_unlock(&lc->cache_lock);
mutex_unlock(&lc->flush_lock);
lbd->ref = 0;
ret = lbd_flush(lbd);
if (ret) {
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
}
found:
if (lbd_reset(lbd, lblk) != 0) {
mutex_unlock(&lbd->reflock);
return NULL;
}
lbd->ref = 1;
mutex_unlock(&lbd->reflock);
return lbd;
}
int
lbdcache_put(struct lbdcache* lc, struct lbd* lbd)
{
int ret = 0;
if (!lbd) {
return 0;
}
mutex_lock(&lc->flush_lock);
mutex_lock(&lbd->reflock);
if (--lbd->ref == 0) {
if (lc->sync) {
ret = lbd_flush(lbd);
if (ret) {
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
}
}
else {
lbd->flush_jiffies = jiffies + COMPRESS_FLUSH_DELAY;
lbd->ref = 1;
list_add_tail(&lbd->flush_list, &lc->flush_head);
if (!delayed_work_pending(&lc->flush_dwork)) {
schedule_delayed_work(&lc->flush_dwork, COMPRESS_FLUSH_DELAY);
}
}
}
mutex_unlock(&lbd->reflock);
mutex_unlock(&lc->flush_lock);
return ret;
}