/* * Copyright (c) 2019 Tom Marshall * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include struct lbatpage { struct list_head list; u64 pblk; struct mutex reflock; unsigned int ref; struct mutex lock; struct cbd_params* params; struct page* page; u8* buf; bool dirty; }; static bool lbatpage_ctr(struct lbatpage* lp, struct cbd_params* params) { memset(lp, 0, sizeof(struct lbatpage)); INIT_LIST_HEAD(&lp->list); lp->pblk = PBLK_NONE; mutex_init(&lp->reflock); lp->ref = 0; mutex_init(&lp->lock); lp->params = params; lp->page = cbd_alloc_page(); if (!lp->page) { return false; } lp->buf = page_address(lp->page); lp->dirty = false; return true; } static void lbatpage_dtr(struct lbatpage* lp) { lp->buf = NULL; cbd_free_page(lp->page); lp->page = NULL; } static bool lbatpage_error(struct lbatpage* lp) { return PageError(lp->page); } static int lbatpage_flush(struct lbatpage* lp) { int ret = 0; struct page* iopagev[1]; mutex_lock(&lp->lock); if (!PageDirty(lp->page)) { goto unlock; } if (lbatpage_error(lp)) { ret = -EIO; goto unlock; } iopagev[0] = lp->page; pblk_write(lp->params->priv, lp->pblk, 1, iopagev); mutex_unlock(&lp->lock); return ret; unlock: unlock_page(lp->page); mutex_unlock(&lp->lock); return ret; } static int lbatpage_read(struct lbatpage* lp) { int ret = 0; struct page* pagev[1]; pagev[0] = lp->page; ret = pblk_read_wait(lp->params->priv, lp->pblk, 1, pagev); return ret; } static int lbatpage_reset(struct lbatpage* lp, u64 pblk) { int ret = 0; lock_page(lp->page); if (lp->pblk != pblk) { lp->pblk = pblk; ret = lbatpage_read(lp); } if (ret) { unlock_page(lp->page); lp->pblk = PBLK_NONE; } return ret; } u8* lbatpage_get_buf(struct lbatpage* lp, bool rw) { mutex_lock(&lp->lock); if (rw) { SetPageDirty(lp->page); } return lp->buf; } void lbatpage_put_buf(struct lbatpage* lp) { mutex_unlock(&lp->lock); } struct lbatpagecache { struct mutex lock; struct cbd_params* params; struct list_head cache_head; unsigned int cache_len; struct lbatpage* cache; }; size_t lbatpagecache_size(void) { return sizeof(struct lbatpagecache); } bool lbatpagecache_ctr(struct lbatpagecache* lpc, struct cbd_params* params, u32 cache_pages) { struct lbatpage* cache; u32 cache_len; u32 n; memset(lpc, 0, sizeof(struct lbatpagecache)); mutex_init(&lpc->lock); lpc->params = params; /* lbatpagecache gets 15/32 of cache pages */ cache_len = (cache_pages * 15 / 32); if (!cache_len) { printk(KERN_ERR "%s: Cache too small\n", __func__); return false; } printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len); cache = kzalloc(cache_len * sizeof(struct lbatpage), GFP_KERNEL); if (!cache) { return false; } INIT_LIST_HEAD(&lpc->cache_head); lpc->cache_len = cache_len; lpc->cache = cache; for (n = 0; n < cache_len; ++n) { if (!lbatpage_ctr(&cache[n], lpc->params)) { return false; } list_add_tail(&cache[n].list, &lpc->cache_head); } return true; } void lbatpagecache_dtr(struct lbatpagecache* lpc) { unsigned int n; struct lbatpage* lp; for (n = 0; n < lpc->cache_len; ++n) { lp = &lpc->cache[n]; if (!lp) { continue; } lbatpage_dtr(lp); if (lp->ref) { printk(KERN_ERR "%s: lbatpage ref leak: n=%u ref=%u\n", __func__, n, lp->ref); } } kfree(lpc->cache); lpc->cache = NULL; lpc->cache_len = 0; INIT_LIST_HEAD(&lpc->cache_head); lpc->params = NULL; } struct lbatpage* lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk) { struct lbatpage* lp; mutex_lock(&lpc->lock); list_for_each_entry(lp, &lpc->cache_head, list) { mutex_lock(&lp->reflock); if (lp->pblk == pblk) { list_move(&lp->list, &lpc->cache_head); if (lp->ref == 0) { goto found; } mutex_unlock(&lpc->lock); ++lp->ref; mutex_unlock(&lp->reflock); return lp; } if (lp->pblk == PBLK_NONE) { list_move(&lp->list, &lpc->cache_head); goto found; } mutex_unlock(&lp->reflock); } list_for_each_entry_reverse(lp, &lpc->cache_head, list) { mutex_lock(&lp->reflock); if (lp->ref == 0 && !lbatpage_error(lp)) { list_move(&lp->list, &lpc->cache_head); goto found; } mutex_unlock(&lp->reflock); } printk(KERN_ERR "%s: failed to find free entry\n", __func__); mutex_unlock(&lpc->lock); return NULL; found: mutex_unlock(&lpc->lock); if (lbatpage_reset(lp, pblk) != 0) { mutex_unlock(&lp->reflock); return NULL; } lp->ref = 1; mutex_unlock(&lp->reflock); return lp; } int lbatpagecache_put(struct lbatpagecache* lpc, struct lbatpage* lp) { int ret = 0; if (!lp) { return 0; } mutex_lock(&lp->reflock); if (--lp->ref == 0) { ret = lbatpage_flush(lp); if (ret) { printk(KERN_ERR "%s: lbatpage_flush failed\n", __func__); } } mutex_unlock(&lp->reflock); return ret; }