Improve lbdcache locking
* Separate cache and flush locks. Note lock order: flush -> cache * Never flush lbd with either cache or flush lock held. * Rename locks in other caches for consistency.
This commit is contained in:
parent
bd51b7cb89
commit
97eb421b58
|
@ -152,8 +152,8 @@ lbatpage_put_buf(struct lbatpage* lp)
|
|||
}
|
||||
|
||||
struct lbatpagecache {
|
||||
struct mutex lock;
|
||||
struct cbd_params* params;
|
||||
struct mutex cache_lock;
|
||||
struct list_head cache_head;
|
||||
unsigned int cache_len;
|
||||
struct lbatpage* cache;
|
||||
|
@ -174,7 +174,6 @@ lbatpagecache_ctr(struct lbatpagecache* lpc,
|
|||
u32 n;
|
||||
|
||||
memset(lpc, 0, sizeof(struct lbatpagecache));
|
||||
mutex_init(&lpc->lock);
|
||||
lpc->params = params;
|
||||
|
||||
/* lbatpagecache gets 15/32 of cache pages */
|
||||
|
@ -188,6 +187,7 @@ lbatpagecache_ctr(struct lbatpagecache* lpc,
|
|||
if (!cache) {
|
||||
return false;
|
||||
}
|
||||
mutex_init(&lpc->cache_lock);
|
||||
INIT_LIST_HEAD(&lpc->cache_head);
|
||||
lpc->cache_len = cache_len;
|
||||
lpc->cache = cache;
|
||||
|
@ -229,21 +229,22 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
|
|||
{
|
||||
struct lbatpage* lp;
|
||||
|
||||
mutex_lock(&lpc->lock);
|
||||
mutex_lock(&lpc->cache_lock);
|
||||
list_for_each_entry(lp, &lpc->cache_head, list) {
|
||||
mutex_lock(&lp->reflock);
|
||||
if (lp->pblk == pblk) {
|
||||
list_move(&lp->list, &lpc->cache_head);
|
||||
mutex_unlock(&lpc->cache_lock);
|
||||
if (lp->ref == 0) {
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lpc->lock);
|
||||
++lp->ref;
|
||||
mutex_unlock(&lp->reflock);
|
||||
return lp;
|
||||
}
|
||||
if (lp->pblk == PBLK_NONE) {
|
||||
list_move(&lp->list, &lpc->cache_head);
|
||||
mutex_unlock(&lpc->cache_lock);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lp->reflock);
|
||||
|
@ -252,16 +253,16 @@ lbatpagecache_get(struct lbatpagecache* lpc, u64 pblk)
|
|||
mutex_lock(&lp->reflock);
|
||||
if (lp->ref == 0 && !lbatpage_error(lp)) {
|
||||
list_move(&lp->list, &lpc->cache_head);
|
||||
mutex_unlock(&lpc->cache_lock);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lp->reflock);
|
||||
}
|
||||
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||
mutex_unlock(&lpc->lock);
|
||||
mutex_unlock(&lpc->cache_lock);
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
mutex_unlock(&lpc->lock);
|
||||
if (lbatpage_reset(lp, pblk) != 0) {
|
||||
mutex_unlock(&lp->reflock);
|
||||
return NULL;
|
||||
|
|
|
@ -431,10 +431,10 @@ lbatview_elem_pblk(struct lbatview* lv, u64 lblk, u32 idx)
|
|||
}
|
||||
|
||||
struct lbatviewcache {
|
||||
struct mutex lock;
|
||||
struct cbd_params* params;
|
||||
struct pbatcache* pc;
|
||||
struct lbatpagecache* lpc;
|
||||
struct mutex cache_lock;
|
||||
struct list_head cache_head;
|
||||
unsigned int cache_len;
|
||||
struct lbatview* cache;
|
||||
|
@ -455,7 +455,6 @@ lbatviewcache_ctr(struct lbatviewcache* lvc,
|
|||
u32 n;
|
||||
|
||||
memset(lvc, 0, sizeof(struct lbatviewcache));
|
||||
mutex_init(&lvc->lock);
|
||||
lvc->params = params;
|
||||
lvc->pc = kmalloc(pbatcache_size(), GFP_KERNEL);
|
||||
if (!lvc->pc) {
|
||||
|
@ -482,6 +481,7 @@ lbatviewcache_ctr(struct lbatviewcache* lvc,
|
|||
if (!cache) {
|
||||
return false;
|
||||
}
|
||||
mutex_init(&lvc->cache_lock);
|
||||
INIT_LIST_HEAD(&lvc->cache_head);
|
||||
lvc->cache_len = cache_len;
|
||||
lvc->cache = cache;
|
||||
|
@ -545,21 +545,22 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
|
|||
pblk = zone_lbat_pblk + rel_pblk;
|
||||
count = (rel_pblk == lbat_len(lvc->params) - 1) ? 1 : 2;
|
||||
|
||||
mutex_lock(&lvc->lock);
|
||||
mutex_lock(&lvc->cache_lock);
|
||||
list_for_each_entry(lv, &lvc->cache_head, list) {
|
||||
mutex_lock(&lv->reflock);
|
||||
if (lv->pblk == pblk) {
|
||||
list_move(&lv->list, &lvc->cache_head);
|
||||
mutex_unlock(&lvc->cache_lock);
|
||||
if (lv->ref == 0) {
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lvc->lock);
|
||||
++lv->ref;
|
||||
mutex_unlock(&lv->reflock);
|
||||
return lv;
|
||||
}
|
||||
if (lv->pblk == PBLK_NONE) {
|
||||
list_move(&lv->list, &lvc->cache_head);
|
||||
mutex_unlock(&lvc->cache_lock);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lv->reflock);
|
||||
|
@ -568,16 +569,16 @@ lbatviewcache_get(struct lbatviewcache* lvc, u64 lblk)
|
|||
mutex_lock(&lv->reflock);
|
||||
if (lv->ref == 0) {
|
||||
list_move(&lv->list, &lvc->cache_head);
|
||||
mutex_unlock(&lvc->cache_lock);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lv->reflock);
|
||||
}
|
||||
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||
mutex_unlock(&lvc->lock);
|
||||
mutex_unlock(&lvc->cache_lock);
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
mutex_unlock(&lvc->lock);
|
||||
if (lbatview_reset(lv, pblk, count) != 0) {
|
||||
mutex_unlock(&lv->reflock);
|
||||
return NULL;
|
||||
|
|
|
@ -509,14 +509,15 @@ lbd_data_write(struct lbd* lbd, u32 off, u32 len, const u8* buf)
|
|||
|
||||
struct lbdcache
|
||||
{
|
||||
struct mutex lock;
|
||||
struct cbd_params* params;
|
||||
struct cbd_stats* stats;
|
||||
void* percpu;
|
||||
struct lbatviewcache* lvc;
|
||||
struct mutex cache_lock;
|
||||
struct list_head cache_head;
|
||||
unsigned int cache_len;
|
||||
struct lbd* cache;
|
||||
struct mutex flush_lock;
|
||||
struct delayed_work flush_dwork;
|
||||
struct list_head flush_head;
|
||||
};
|
||||
|
@ -612,7 +613,6 @@ lbdcache_ctr(struct lbdcache* lc,
|
|||
u32 n;
|
||||
|
||||
memset(lc, 0, sizeof(struct lbdcache));
|
||||
mutex_init(&lc->lock);
|
||||
lc->params = params;
|
||||
lc->stats = stats;
|
||||
lc->percpu = alloc_percpu(void*);
|
||||
|
@ -640,6 +640,7 @@ lbdcache_ctr(struct lbdcache* lc,
|
|||
if (!cache) {
|
||||
return false;
|
||||
}
|
||||
mutex_init(&lc->cache_lock);
|
||||
INIT_LIST_HEAD(&lc->cache_head);
|
||||
lc->cache_len = cache_len;
|
||||
lc->cache = cache;
|
||||
|
@ -649,6 +650,7 @@ lbdcache_ctr(struct lbdcache* lc,
|
|||
}
|
||||
list_add_tail(&cache[n].lru_list, &lc->cache_head);
|
||||
}
|
||||
mutex_init(&lc->flush_lock);
|
||||
INIT_DELAYED_WORK(&lc->flush_dwork, lbdcache_flush);
|
||||
INIT_LIST_HEAD(&lc->flush_head);
|
||||
|
||||
|
@ -701,10 +703,12 @@ lbdcache_flush(struct work_struct* work)
|
|||
{
|
||||
struct lbdcache* lc = container_of(work, struct lbdcache, flush_dwork.work);
|
||||
unsigned long now = jiffies;
|
||||
struct list_head flushq;
|
||||
int ret;
|
||||
struct lbd* lbd;
|
||||
|
||||
mutex_lock(&lc->lock);
|
||||
INIT_LIST_HEAD(&flushq);
|
||||
mutex_lock(&lc->flush_lock);
|
||||
while (!list_empty(&lc->flush_head)) {
|
||||
lbd = list_first_entry(&lc->flush_head, struct lbd, flush_list);
|
||||
mutex_lock(&lbd->reflock);
|
||||
|
@ -714,6 +718,13 @@ lbdcache_flush(struct work_struct* work)
|
|||
break;
|
||||
}
|
||||
list_del_init(&lbd->flush_list);
|
||||
list_add_tail(&lbd->flush_list, &flushq);
|
||||
}
|
||||
if (!list_empty(&lc->flush_head)) {
|
||||
schedule_delayed_work(&lc->flush_dwork, COMPRESS_FLUSH_DELAY);
|
||||
}
|
||||
mutex_unlock(&lc->flush_lock);
|
||||
list_for_each_entry(lbd, &flushq, flush_list) {
|
||||
lbd->ref = 0;
|
||||
ret = lbd_flush(lbd, lc->stats);
|
||||
if (ret) {
|
||||
|
@ -721,26 +732,27 @@ lbdcache_flush(struct work_struct* work)
|
|||
}
|
||||
mutex_unlock(&lbd->reflock);
|
||||
}
|
||||
if (!list_empty(&lc->flush_head)) {
|
||||
schedule_delayed_work(&lc->flush_dwork, COMPRESS_FLUSH_DELAY);
|
||||
}
|
||||
mutex_unlock(&lc->lock);
|
||||
}
|
||||
|
||||
struct lbd*
|
||||
lbdcache_get(struct lbdcache* lc, u64 lblk)
|
||||
{
|
||||
int ret;
|
||||
struct lbd* lbd;
|
||||
|
||||
mutex_lock(&lc->lock);
|
||||
mutex_lock(&lc->flush_lock);
|
||||
mutex_lock(&lc->cache_lock);
|
||||
list_for_each_entry(lbd, &lc->cache_head, lru_list) {
|
||||
mutex_lock(&lbd->reflock);
|
||||
if (lbd->lblk == lblk) {
|
||||
list_move(&lbd->lru_list, &lc->cache_head);
|
||||
mutex_unlock(&lc->cache_lock);
|
||||
if (lbd->ref == 0) {
|
||||
mutex_unlock(&lc->flush_lock);
|
||||
goto found;
|
||||
}
|
||||
if (lbd->ref == 1 && !list_empty(&lc->flush_head)) {
|
||||
if (lbd->ref == 1) {
|
||||
if (!list_empty(&lc->flush_head)) {
|
||||
struct lbd* entry;
|
||||
list_for_each_entry(entry, &lc->flush_head, flush_list) {
|
||||
if (entry == lbd) {
|
||||
|
@ -750,13 +762,16 @@ lbdcache_get(struct lbdcache* lc, u64 lblk)
|
|||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&lc->lock);
|
||||
}
|
||||
mutex_unlock(&lc->flush_lock);
|
||||
++lbd->ref;
|
||||
mutex_unlock(&lbd->reflock);
|
||||
return lbd;
|
||||
}
|
||||
if (lbd->lblk == LBLK_NONE) {
|
||||
list_move(&lbd->lru_list, &lc->cache_head);
|
||||
mutex_unlock(&lc->cache_lock);
|
||||
mutex_unlock(&lc->flush_lock);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lbd->reflock);
|
||||
|
@ -765,29 +780,32 @@ lbdcache_get(struct lbdcache* lc, u64 lblk)
|
|||
mutex_lock(&lbd->reflock);
|
||||
if (lbd->ref == 0 && !lbd_error(lbd)) {
|
||||
list_move(&lbd->lru_list, &lc->cache_head);
|
||||
mutex_unlock(&lc->cache_lock);
|
||||
mutex_unlock(&lc->flush_lock);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&lbd->reflock);
|
||||
}
|
||||
if (!list_empty(&lc->flush_head)) {
|
||||
int ret;
|
||||
if (list_empty(&lc->flush_head)) {
|
||||
mutex_unlock(&lc->cache_lock);
|
||||
mutex_unlock(&lc->flush_lock);
|
||||
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
lbd = list_first_entry(&lc->flush_head, struct lbd, flush_list);
|
||||
mutex_lock(&lbd->reflock);
|
||||
BUG_ON(lbd->ref != 1);
|
||||
list_del_init(&lbd->flush_list);
|
||||
list_move(&lbd->lru_list, &lc->cache_head);
|
||||
mutex_unlock(&lc->cache_lock);
|
||||
mutex_unlock(&lc->flush_lock);
|
||||
lbd->ref = 0;
|
||||
ret = lbd_flush(lbd, lc->stats);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s: lbd_flush failed\n", __func__);
|
||||
}
|
||||
goto found;
|
||||
}
|
||||
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||
mutex_unlock(&lc->lock);
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
mutex_unlock(&lc->lock);
|
||||
if (lbd_reset(lbd, lblk) != 0) {
|
||||
mutex_unlock(&lbd->reflock);
|
||||
return NULL;
|
||||
|
@ -806,19 +824,18 @@ lbdcache_put(struct lbdcache* lc, struct lbd* lbd)
|
|||
if (!lbd) {
|
||||
return 0;
|
||||
}
|
||||
mutex_lock(&lc->lock);
|
||||
mutex_lock(&lc->flush_lock);
|
||||
mutex_lock(&lbd->reflock);
|
||||
if (--lbd->ref == 0) {
|
||||
lbd->flush_jiffies = jiffies + COMPRESS_FLUSH_DELAY;
|
||||
lbd->ref = 1;
|
||||
list_add_tail(&lbd->flush_list, &lc->flush_head);
|
||||
/* This is racy, but it does not matter. */
|
||||
if (!delayed_work_pending(&lc->flush_dwork)) {
|
||||
schedule_delayed_work(&lc->flush_dwork, COMPRESS_FLUSH_DELAY);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&lbd->reflock);
|
||||
mutex_unlock(&lc->lock);
|
||||
mutex_unlock(&lc->flush_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -228,8 +228,8 @@ pbat_free(struct pbat* pbat, u64 pblk)
|
|||
}
|
||||
|
||||
struct pbatcache {
|
||||
struct mutex lock;
|
||||
struct cbd_params* params;
|
||||
struct mutex cache_lock;
|
||||
struct list_head cache_head;
|
||||
unsigned int cache_len;
|
||||
struct pbat* cache;
|
||||
|
@ -251,7 +251,6 @@ pbatcache_ctr(struct pbatcache* pc,
|
|||
u32 n;
|
||||
|
||||
memset(pc, 0, sizeof(struct pbatcache));
|
||||
mutex_init(&pc->lock);
|
||||
pc->params = params;
|
||||
|
||||
/* pbatcache gets 1/32 of cache_pages */
|
||||
|
@ -265,6 +264,7 @@ pbatcache_ctr(struct pbatcache* pc,
|
|||
if (!cache) {
|
||||
return false;
|
||||
}
|
||||
mutex_init(&pc->cache_lock);
|
||||
INIT_LIST_HEAD(&pc->cache_head);
|
||||
pc->cache_len = cache_len;
|
||||
pc->cache = cache;
|
||||
|
@ -308,25 +308,26 @@ pbatcache_get(struct pbatcache* pc, u32 zone, bool avail)
|
|||
{
|
||||
struct pbat* pbat;
|
||||
|
||||
mutex_lock(&pc->lock);
|
||||
mutex_lock(&pc->cache_lock);
|
||||
if (avail && cbd_bitmap_isset(pc->full, zone)) {
|
||||
mutex_unlock(&pc->lock);
|
||||
mutex_unlock(&pc->cache_lock);
|
||||
return NULL;
|
||||
}
|
||||
list_for_each_entry(pbat, &pc->cache_head, list) {
|
||||
mutex_lock(&pbat->reflock);
|
||||
if (pbat->zone == zone) {
|
||||
list_move(&pbat->list, &pc->cache_head);
|
||||
mutex_unlock(&pc->cache_lock);
|
||||
if (pbat->ref == 0) {
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&pc->lock);
|
||||
++pbat->ref;
|
||||
mutex_unlock(&pbat->reflock);
|
||||
return pbat;
|
||||
}
|
||||
if (pbat->zone == ZONE_NONE) {
|
||||
list_move(&pbat->list, &pc->cache_head);
|
||||
mutex_unlock(&pc->cache_lock);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&pbat->reflock);
|
||||
|
@ -335,16 +336,16 @@ pbatcache_get(struct pbatcache* pc, u32 zone, bool avail)
|
|||
mutex_lock(&pbat->reflock);
|
||||
if (pbat->ref == 0 && !pbat_error(pbat)) {
|
||||
list_move(&pbat->list, &pc->cache_head);
|
||||
mutex_unlock(&pc->cache_lock);
|
||||
goto found;
|
||||
}
|
||||
mutex_unlock(&pbat->reflock);
|
||||
}
|
||||
printk(KERN_ERR "%s: failed to find free entry\n", __func__);
|
||||
mutex_unlock(&pc->lock);
|
||||
mutex_unlock(&pc->cache_lock);
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
mutex_unlock(&pc->lock);
|
||||
if (pbat_reset(pbat, zone) != 0) {
|
||||
mutex_unlock(&pbat->reflock);
|
||||
return NULL;
|
||||
|
|
Loading…
Reference in New Issue