/* * Copyright (c) 2019 Tom Marshall * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include struct lbatpage { u64 pblk; struct mutex reflock; unsigned int ref; struct mutex lock; struct cbd_params* params; struct page* page; u8* buf; bool dirty; }; static bool lbatpage_ctr(struct lbatpage* lp, struct cbd_params* params) { lp->pblk = PBLK_NONE; mutex_init(&lp->reflock); lp->ref = 0; mutex_init(&lp->lock); lp->params = params; lp->page = cbd_alloc_page(); if (!lp->page) { return false; } lp->buf = page_address(lp->page); lp->dirty = false; return true; } static void lbatpage_dtr(struct lbatpage* lp) { lp->buf = NULL; cbd_free_page(lp->page); lp->page = NULL; } static bool lbatpage_error(struct lbatpage* lp) { return PageError(lp->page); } static int lbatpage_flush(struct lbatpage* lp) { int ret = 0; struct page* iopagev[1]; mutex_lock(&lp->lock); if (!PageDirty(lp->page)) { goto unlock; } if (lbatpage_error(lp)) { ret = -EIO; goto unlock; } iopagev[0] = lp->page; pblk_write(lp->params->priv, lp->pblk, 1, iopagev); mutex_unlock(&lp->lock); return ret; unlock: unlock_page(lp->page); mutex_unlock(&lp->lock); return ret; } static int lbatpage_read(struct lbatpage* lp) { int ret = 0; struct page* pagev[1]; pagev[0] = lp->page; ret = pblk_read_wait(lp->params->priv, lp->pblk, 1, pagev); return ret; } static int lbatpage_reset(struct lbatpage* lp, u64 pblk) { int ret = 0; lock_page(lp->page); if (lp->pblk != pblk) { lp->pblk = pblk; ret = lbatpage_read(lp); } if (ret) { unlock_page(lp->page); lp->pblk = PBLK_NONE; } return ret; } u8* lbatpage_get_buf(struct lbatpage* lp, bool rw) { mutex_lock(&lp->lock); if (rw) { SetPageDirty(lp->page); } return lp->buf; } void lbatpage_put_buf(struct lbatpage* lp) { mutex_unlock(&lp->lock); } struct lbatpagecache { struct mutex lock; struct cbd_params* params; unsigned int len; struct lbatpage** cache; }; size_t lbatpagecache_size(void) { return sizeof(struct lbatpagecache); } static bool lbatpagecache_realloc(struct lbatpagecache* lpc, unsigned int len) { struct lbatpage** cache; unsigned int n; struct lbatpage* lp; cache = kzalloc(len * sizeof(struct lbatpage*), GFP_KERNEL); if (!cache) { return false; } n = 0; if (lpc->len) { memcpy(cache, lpc->cache, lpc->len * sizeof(struct lbatpage*)); n = lpc->len; kfree(lpc->cache); } lpc->len = len; lpc->cache = cache; while (n < len) { lp = kmalloc(sizeof(struct lbatpage), GFP_KERNEL); if (!lp) { return false; } cache[n++] = lp; if (!lbatpage_ctr(lp, lpc->params)) { return false; } } return true; } bool lbatpagecache_ctr(struct lbatpagecache* lpc, struct cbd_params* params) { memset(lpc, 0, sizeof(struct lbatpagecache)); mutex_init(&lpc->lock); lpc->params = params; return lbatpagecache_realloc(lpc, 1); } void lbatpagecache_dtr(struct lbatpagecache* lpc) { unsigned int n; struct lbatpage* lp; for (n = 0; n < lpc->len; ++n) { lp = lpc->cache[n]; if (!lp) { continue; } lbatpage_dtr(lp); if (lp->ref) { printk(KERN_ERR "%s: lbatpage ref leak: n=%u ref=%u\n", __func__, n, lp->ref); } kfree(lp); } kfree(lpc->cache); lpc->cache = NULL; lpc->len = 0; lpc->params = NULL; } struct lbatpage* lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk) { unsigned int n; struct lbatpage* lp; mutex_lock(&lpc->lock); for (n = 0; n < lpc->len; ++n) { lp = lpc->cache[n]; mutex_lock(&lp->reflock); if (lp->pblk == pblk) { if (lp->ref == 0) { goto found; } mutex_unlock(&lpc->lock); ++lp->ref; mutex_unlock(&lp->reflock); return lp; } mutex_unlock(&lp->reflock); } for (n = 0; n < lpc->len; ++n) { lp = lpc->cache[n]; mutex_lock(&lp->reflock); if (lp->pblk == PBLK_NONE) { goto found; } mutex_unlock(&lp->reflock); } for (n = 0; n < lpc->len; ++n) { lp = lpc->cache[n]; mutex_lock(&lp->reflock); if (lp->ref == 0 && !lbatpage_error(lp)) { goto found; } mutex_unlock(&lp->reflock); } n = lpc->len; if (!lbatpagecache_realloc(lpc, lpc->len * 2)) { printk(KERN_ERR "%s: realloc failed\n", __func__); mutex_unlock(&lpc->lock); return NULL; } lp = lpc->cache[n]; mutex_lock(&lp->reflock); found: mutex_unlock(&lpc->lock); if (lbatpage_reset(lp, pblk) != 0) { mutex_unlock(&lp->reflock); return NULL; } lp->ref = 1; mutex_unlock(&lp->reflock); return lp; } int lbatpagecache_put(struct lbatpagecache* lpc, struct lbatpage* lp) { int ret = 0; if (!lp) { return 0; } mutex_lock(&lp->reflock); if (--lp->ref == 0) { ret = lbatpage_flush(lp); if (ret) { printk(KERN_ERR "%s: lbatpage_flush failed\n", __func__); } } mutex_unlock(&lp->reflock); return ret; }