2019-10-22 04:39:27 +02:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
|
|
* 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/device-mapper.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
|
|
|
|
#include <linux/lz4.h>
|
|
|
|
|
|
|
|
#include <linux/dm-compress.h>
|
|
|
|
|
|
|
|
/**************************************
|
|
|
|
* Core memory management.
|
|
|
|
**************************************/
|
|
|
|
|
|
|
|
struct page*
|
|
|
|
cbd_alloc_page(void)
|
|
|
|
{
|
|
|
|
return alloc_page(GFP_KERNEL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
cbd_free_page(struct page* page)
|
|
|
|
{
|
|
|
|
__free_page(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct page*
|
|
|
|
cbd_alloc_pages(size_t len)
|
|
|
|
{
|
|
|
|
return alloc_pages(GFP_KERNEL, get_order(len * PAGE_SIZE));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
cbd_free_pages(struct page* pages, size_t len)
|
|
|
|
{
|
|
|
|
__free_pages(pages, get_order(len * PAGE_SIZE));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
cbd_alloc_pagev(struct page** pagev, size_t len)
|
|
|
|
{
|
|
|
|
size_t n;
|
|
|
|
|
|
|
|
for (n = 0; n < len; ++n) {
|
|
|
|
pagev[n] = cbd_alloc_page();
|
|
|
|
if (!pagev[n]) {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
err:
|
|
|
|
while (n--) {
|
|
|
|
cbd_free_page(pagev[n]);
|
|
|
|
pagev[n] = NULL;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
cbd_free_pagev(struct page** pagev, size_t len)
|
|
|
|
{
|
|
|
|
size_t n;
|
|
|
|
|
|
|
|
for (n = 0; n < len; ++n) {
|
|
|
|
cbd_free_page(pagev[n]);
|
|
|
|
pagev[n] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**************************************
|
|
|
|
* Core low-level I/O.
|
|
|
|
*
|
|
|
|
* pblk count are in units of physical blocks (4096 bytes), NOT sectors.
|
|
|
|
* data is a page address (obtained via __get_free_pages and friends).
|
|
|
|
**************************************/
|
|
|
|
|
|
|
|
static struct bio*
|
2019-10-30 18:05:31 +01:00
|
|
|
pblk_io_prepare(struct block_device* bdev, unsigned int op,
|
2019-11-14 00:13:12 +01:00
|
|
|
u32 pblk_len, u64 pblk, u32 count, struct page* page, u32 page_off)
|
2019-10-22 04:39:27 +02:00
|
|
|
{
|
|
|
|
struct bio* bio;
|
|
|
|
|
2019-11-14 00:13:12 +01:00
|
|
|
BUG_ON(page_off + pblk_len * count > PAGE_SIZE);
|
|
|
|
bio = bio_alloc(GFP_KERNEL, 1);
|
2019-10-22 04:39:27 +02:00
|
|
|
if (!bio) {
|
|
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-10-30 18:05:31 +01:00
|
|
|
bio_set_dev(bio, bdev);
|
2019-10-22 04:39:27 +02:00
|
|
|
bio->bi_opf = op;
|
|
|
|
|
2019-11-14 00:13:12 +01:00
|
|
|
bio->bi_iter.bi_sector = pblk * (pblk_len / SECTOR_SIZE);
|
|
|
|
if (bio_add_page(bio, page, pblk_len * count, page_off) == 0) {
|
|
|
|
BUG();
|
2019-10-22 04:39:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return bio;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-11-11 20:48:46 +01:00
|
|
|
pblk_read_wait(struct compress_params* kparams,
|
2019-11-14 00:13:12 +01:00
|
|
|
u64 pblk, u32 count, struct page* page)
|
2019-10-22 04:39:27 +02:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct bio* bio;
|
|
|
|
|
2019-11-12 22:25:46 +01:00
|
|
|
bio = pblk_io_prepare(kparams->dev, REQ_OP_READ,
|
2019-11-14 00:13:12 +01:00
|
|
|
pblk_size(&kparams->params), pblk, count, page, 0);
|
2019-10-22 04:39:27 +02:00
|
|
|
if (!bio) {
|
|
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = submit_bio_wait(bio);
|
2019-10-24 22:02:03 +02:00
|
|
|
if (ret) {
|
2019-10-22 04:39:27 +02:00
|
|
|
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
|
|
|
|
}
|
|
|
|
bio_put(bio);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-11-14 00:13:12 +01:00
|
|
|
int
|
|
|
|
pblk_readv_wait(struct compress_params* kparams,
|
|
|
|
u64* pblkv, u32 count, struct page* page)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
u32 pblk_len = pblk_size(&kparams->params);
|
|
|
|
u32 n;
|
|
|
|
u32 page_off;
|
|
|
|
struct bio* bio;
|
|
|
|
|
|
|
|
/* XXX: Issue no-blocking reads for parallelism? */
|
|
|
|
for (n = 0, page_off = 0; n < count; ++n, page_off += pblk_len) {
|
|
|
|
bio = pblk_io_prepare(kparams->dev, REQ_OP_READ,
|
|
|
|
pblk_len, pblkv[n], 1, page, page_off);
|
|
|
|
if (!bio) {
|
|
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ret = submit_bio_wait(bio);
|
|
|
|
if (ret) {
|
|
|
|
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
bio_put(bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-10-22 04:39:27 +02:00
|
|
|
int
|
2019-11-11 20:48:46 +01:00
|
|
|
pblk_write_wait(struct compress_params* kparams,
|
2019-11-14 00:13:12 +01:00
|
|
|
u64 pblk, u32 count, struct page* page)
|
2019-10-22 04:39:27 +02:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct bio* bio;
|
|
|
|
|
2019-11-12 22:25:46 +01:00
|
|
|
bio = pblk_io_prepare(kparams->dev, REQ_OP_WRITE,
|
2019-11-14 00:13:12 +01:00
|
|
|
pblk_size(&kparams->params), pblk, count, page, 0);
|
2019-10-22 04:39:27 +02:00
|
|
|
if (!bio) {
|
|
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2019-11-04 15:14:37 +01:00
|
|
|
ret = submit_bio_wait(bio);
|
|
|
|
if (ret) {
|
2019-10-22 04:39:27 +02:00
|
|
|
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
|
2019-11-11 20:48:46 +01:00
|
|
|
kparams->params.flags |= CBD_FLAG_ERROR;
|
2019-10-22 04:39:27 +02:00
|
|
|
}
|
|
|
|
bio_put(bio);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-11-01 22:41:11 +01:00
|
|
|
void
|
2019-11-02 15:58:09 +01:00
|
|
|
pblk_write_endio(struct bio* bio)
|
2019-11-01 22:41:11 +01:00
|
|
|
{
|
2019-11-11 20:48:46 +01:00
|
|
|
struct compress_params* kparams = bio->bi_private;
|
2019-11-14 00:13:12 +01:00
|
|
|
struct page* page = bio->bi_io_vec[0].bv_page;
|
2019-11-01 22:41:11 +01:00
|
|
|
|
|
|
|
if (bio->bi_status != BLK_STS_OK) {
|
2019-11-14 00:13:12 +01:00
|
|
|
printk(KERN_ERR "%s: I/O error\n", __func__);
|
2019-11-11 20:48:46 +01:00
|
|
|
kparams->params.flags |= CBD_FLAG_ERROR;
|
2019-11-14 00:13:12 +01:00
|
|
|
SetPageError(page);
|
2019-11-02 15:58:09 +01:00
|
|
|
}
|
2019-11-14 00:13:12 +01:00
|
|
|
ClearPageDirty(page);
|
|
|
|
unlock_page(page);
|
2019-11-01 22:41:11 +01:00
|
|
|
bio_put(bio);
|
|
|
|
}
|
|
|
|
|
2019-10-22 04:39:27 +02:00
|
|
|
void
|
2019-11-11 20:48:46 +01:00
|
|
|
pblk_write(struct compress_params* kparams,
|
2019-11-14 00:13:12 +01:00
|
|
|
u64 pblk, u32 count, struct page* page)
|
2019-10-22 04:39:27 +02:00
|
|
|
{
|
|
|
|
struct bio* bio;
|
|
|
|
|
2019-11-12 22:25:46 +01:00
|
|
|
bio = pblk_io_prepare(kparams->dev, REQ_OP_WRITE,
|
2019-11-14 00:13:12 +01:00
|
|
|
pblk_size(&kparams->params), pblk, count, page, 0);
|
2019-10-22 04:39:27 +02:00
|
|
|
if (!bio) {
|
|
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
2019-11-11 20:48:46 +01:00
|
|
|
kparams->params.flags |= CBD_FLAG_ERROR;
|
2019-11-14 00:13:12 +01:00
|
|
|
SetPageError(page);
|
|
|
|
unlock_page(page);
|
2019-10-22 04:39:27 +02:00
|
|
|
return;
|
|
|
|
}
|
2019-11-02 15:58:09 +01:00
|
|
|
bio->bi_end_io = pblk_write_endio;
|
2019-11-11 20:48:46 +01:00
|
|
|
bio->bi_private = kparams;
|
2019-10-22 04:39:27 +02:00
|
|
|
submit_bio(bio);
|
|
|
|
}
|
2019-11-14 00:13:12 +01:00
|
|
|
|
|
|
|
struct pblk_iov
|
|
|
|
{
|
|
|
|
struct compress_params* kparams;
|
|
|
|
atomic_t remain;
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
pblk_writev_endio(struct bio* bio)
|
|
|
|
{
|
|
|
|
struct pblk_iov* iov = bio->bi_private;
|
|
|
|
struct compress_params* kparams = iov->kparams;
|
|
|
|
struct page* page = bio->bi_io_vec[0].bv_page;
|
|
|
|
|
|
|
|
if (bio->bi_status != BLK_STS_OK) {
|
|
|
|
printk(KERN_ERR "%s: I/O error\n", __func__);
|
|
|
|
kparams->params.flags |= CBD_FLAG_ERROR;
|
|
|
|
SetPageError(page);
|
|
|
|
}
|
|
|
|
if (atomic_dec_and_test(&iov->remain)) {
|
|
|
|
ClearPageDirty(page);
|
|
|
|
unlock_page(page);
|
|
|
|
kfree(iov);
|
|
|
|
}
|
|
|
|
bio_put(bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pblk_writev(struct compress_params* kparams,
|
|
|
|
u64* pblkv, u32 count, struct page* page)
|
|
|
|
{
|
|
|
|
u32 pblk_len = pblk_size(&kparams->params);
|
|
|
|
struct pblk_iov* iov;
|
|
|
|
u32 idx;
|
|
|
|
u32 page_off;
|
|
|
|
u32 nr_bio;
|
|
|
|
u64 pblk;
|
|
|
|
u32 iov_nr_pblk;
|
|
|
|
struct bio* bio;
|
|
|
|
|
|
|
|
BUG_ON(pblk_len * count > PAGE_SIZE);
|
|
|
|
iov = kmalloc(sizeof(struct pblk_iov), GFP_KERNEL);
|
|
|
|
if (!iov) {
|
|
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
iov->kparams = kparams;
|
|
|
|
atomic_set(&iov->remain, count);
|
|
|
|
idx = 0;
|
|
|
|
page_off = 0;
|
|
|
|
nr_bio = 0;
|
|
|
|
while (idx < count) {
|
|
|
|
pblk = pblkv[idx];
|
|
|
|
iov_nr_pblk = 1;
|
|
|
|
++idx;
|
|
|
|
while (idx < count && pblkv[idx] == pblk + iov_nr_pblk) {
|
|
|
|
++iov_nr_pblk;
|
|
|
|
++idx;
|
|
|
|
}
|
|
|
|
bio = pblk_io_prepare(kparams->dev, REQ_OP_WRITE,
|
|
|
|
pblk_len, pblk, iov_nr_pblk, page, page_off);
|
|
|
|
if (!bio) {
|
|
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
|
|
goto err_free;
|
|
|
|
}
|
|
|
|
++nr_bio;
|
|
|
|
bio->bi_end_io = pblk_writev_endio;
|
|
|
|
bio->bi_private = iov;
|
|
|
|
submit_bio(bio);
|
|
|
|
page_off += pblk_len * iov_nr_pblk;
|
|
|
|
}
|
|
|
|
if (atomic_sub_and_test(count - nr_bio, &iov->remain)) {
|
|
|
|
ClearPageDirty(page);
|
|
|
|
unlock_page(page);
|
|
|
|
kfree(iov);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err_free:
|
|
|
|
kfree(iov);
|
|
|
|
err:
|
|
|
|
kparams->params.flags |= CBD_FLAG_ERROR;
|
|
|
|
SetPageError(page);
|
|
|
|
unlock_page(page);
|
|
|
|
}
|