/* * Copyright (c) 2019 Tom Marshall * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include #include #include #include #include #include #include #include #include struct pbat { struct list_head list; u32 zone; struct mutex reflock; unsigned int ref; struct mutex lock; struct cbd_params* params; bool full; u32 last_alloc; struct page** pagev; u8* buf; }; static bool pbat_ctr(struct pbat* pbat, struct cbd_params* params) { u32 nr_pages = pbat_len(params); memset(pbat, 0, sizeof(struct pbat)); INIT_LIST_HEAD(&pbat->list); pbat->zone = ZONE_NONE; mutex_init(&pbat->reflock); pbat->ref = 0; mutex_init(&pbat->lock); pbat->params = params; pbat->full = false; pbat->last_alloc = 0; pbat->pagev = kzalloc(nr_pages * sizeof(struct page*), GFP_KERNEL); if (!pbat->pagev) { return false; } if (!cbd_alloc_pagev(pbat->pagev, nr_pages)) { return false; } pbat->buf = vmap(pbat->pagev, nr_pages, VM_MAP, PAGE_KERNEL); if (!pbat->buf) { return false; } return true; } static void pbat_dtr(struct pbat* pbat) { u32 nr_pages = pbat_len(pbat->params); u32 n; for (n = 0; n < nr_pages; ++n) { lock_page(pbat->pagev[n]); } vunmap(pbat->buf); pbat->buf = NULL; cbd_free_pagev(pbat->pagev, nr_pages); kfree(pbat->pagev); pbat->pagev = NULL; } static bool pbat_error(struct pbat* pbat) { return PageError(pbat->pagev[0]); } static int pbat_flush(struct pbat* pbat) { int ret = 0; u32 nr_pages = pbat_len(pbat->params); u32 n; u64 pblk; mutex_lock(&pbat->lock); if (!PageDirty(pbat->pagev[0])) { goto unlock; } if (pbat_error(pbat)) { ret = -EIO; goto unlock; } pblk = pbat_off(pbat->params, pbat->zone); pblk_write(pbat->params->priv, pblk, nr_pages, pbat->pagev); mutex_unlock(&pbat->lock); return ret; unlock: for (n = 0; n < nr_pages; ++n) { unlock_page(pbat->pagev[n]); } mutex_unlock(&pbat->lock); return ret; } static int pbat_read(struct pbat* pbat) { int ret = 0; u32 nr_pages = pbat_len(pbat->params); u64 pblk; /* XXX: can't happen because pbatcache will not use a page with an error */ if (PageError(pbat->pagev[0])) { return -EIO; } pblk = pbat_off(pbat->params, pbat->zone); ret = pblk_read_wait(pbat->params->priv, pblk, nr_pages, pbat->pagev); return ret; } static int pbat_reset(struct pbat* pbat, u32 zone) { int ret = 0; u32 nr_pages = pbat_len(pbat->params); u32 n; for (n = 0; n < nr_pages; ++n) { lock_page(pbat->pagev[n]); } if (pbat->zone != zone) { pbat->zone = zone; pbat->full = false; pbat->last_alloc = 0; ret = pbat_read(pbat); } if (ret) { for (n = 0; n < nr_pages; ++n) { unlock_page(pbat->pagev[n]); } pbat->zone = ZONE_NONE; } return ret; } u32 pbat_zone(struct pbat* pbat) { return pbat->zone; } u64 pbat_alloc(struct pbat* pbat) { u32 pblk_count = pbat_len(pbat->params) * PBLK_SIZE_BITS; u32 idx; u64 pblk; mutex_lock(&pbat->lock); if (pbat->full) { pblk = PBLK_NONE; goto out; } idx = cbd_bitmap_alloc(pbat->buf, pblk_count, pbat->last_alloc); if (idx == pblk_count) { pbat->full = true; pblk = PBLK_NONE; goto out; } pbat->last_alloc = idx; pblk = idx + zone_data_off(pbat->params, pbat->zone); SetPageDirty(pbat->pagev[0]); out: mutex_unlock(&pbat->lock); return pblk; } int pbat_free(struct pbat* pbat, u64 pblk) { u32 pblk_count = pbat_len(pbat->params) * PBLK_SIZE_BITS; u32 zone; u32 idx; zone = zone_for_pblk(pbat->params, pblk); BUG_ON(zone != pbat->zone); if (pblk < zone_data_off(pbat->params, zone)) { printk(KERN_ERR "%s: pblk in metadata\n", __func__); return -EINVAL; } idx = pblk - zone_data_off(pbat->params, zone); BUG_ON(idx >= pblk_count); mutex_lock(&pbat->lock); cbd_bitmap_free(pbat->buf, idx); pbat->full = false; SetPageDirty(pbat->pagev[0]); mutex_unlock(&pbat->lock); return 0; } struct pbatcache { struct mutex lock; struct cbd_params* params; struct list_head cache_head; unsigned int cache_len; struct pbat* cache; }; size_t pbatcache_size(void) { return sizeof(struct pbatcache); } bool pbatcache_ctr(struct pbatcache* pc, struct cbd_params* params, u32 cache_pages) { struct pbat* cache; u32 cache_len; u32 n; memset(pc, 0, sizeof(struct pbatcache)); mutex_init(&pc->lock); pc->params = params; /* pbatcache gets 1/32 of cache_pages */ cache_len = (cache_pages * 1 / 32) / pbat_len(params); if (!cache_len) { printk(KERN_ERR "%s: Cache too small\n", __func__); return false; } printk(KERN_INFO "%s: cache_len=%u\n", __func__, cache_len); cache = kzalloc(cache_len * sizeof(struct pbat), GFP_KERNEL); if (!cache) { return false; } INIT_LIST_HEAD(&pc->cache_head); pc->cache_len = cache_len; pc->cache = cache; for (n = 0; n < cache_len; ++n) { if (!pbat_ctr(&cache[n], pc->params)) { return false; } list_add_tail(&cache[n].list, &pc->cache_head); } return true; } void pbatcache_dtr(struct pbatcache* pc) { unsigned int n; struct pbat* pbat; for (n = 0; n < pc->cache_len; ++n) { pbat = &pc->cache[n]; if (!pbat) { continue; } pbat_dtr(pbat); if (pbat->ref) { printk(KERN_ERR "%s: pbat ref leak: n=%u ref=%u\n", __func__, n, pbat->ref); } } kfree(pc->cache); pc->cache = NULL; pc->cache_len = 0; INIT_LIST_HEAD(&pc->cache_head); pc->params = NULL; } struct pbat* pbatcache_get(struct pbatcache* pc, u32 zone) { struct pbat* pbat; mutex_lock(&pc->lock); list_for_each_entry(pbat, &pc->cache_head, list) { mutex_lock(&pbat->reflock); if (pbat->zone == zone) { list_move(&pbat->list, &pc->cache_head); if (pbat->ref == 0) { goto found; } mutex_unlock(&pc->lock); ++pbat->ref; mutex_unlock(&pbat->reflock); return pbat; } if (pbat->zone == ZONE_NONE) { list_move(&pbat->list, &pc->cache_head); goto found; } mutex_unlock(&pbat->reflock); } list_for_each_entry_reverse(pbat, &pc->cache_head, list) { mutex_lock(&pbat->reflock); if (pbat->ref == 0 && !pbat_error(pbat)) { list_move(&pbat->list, &pc->cache_head); goto found; } mutex_unlock(&pbat->reflock); } printk(KERN_ERR "%s: failed to find free entry\n", __func__); mutex_unlock(&pc->lock); return NULL; found: mutex_unlock(&pc->lock); if (pbat_reset(pbat, zone) != 0) { mutex_unlock(&pbat->reflock); return NULL; } pbat->ref = 1; mutex_unlock(&pbat->reflock); return pbat; } int pbatcache_put(struct pbatcache* pc, struct pbat* pbat) { int ret = 0; if (!pbat) { return 0; } mutex_lock(&pbat->reflock); if (--pbat->ref == 0) { ret = pbat_flush(pbat); if (ret) { printk(KERN_ERR "%s: pbat_flush failed\n", __func__); } } mutex_unlock(&pbat->reflock); return ret; }