cbd/dm-compress/util.c

318 lines
7.6 KiB
C

/*
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/lz4.h>
#include <linux/dm-compress.h>
/**************************************
* Core memory management.
**************************************/
struct page*
cbd_alloc_page(void)
{
return alloc_page(GFP_KERNEL);
}
void
cbd_free_page(struct page* page)
{
__free_page(page);
}
struct page*
cbd_alloc_pages(size_t len)
{
return alloc_pages(GFP_KERNEL, get_order(len * PAGE_SIZE));
}
void
cbd_free_pages(struct page* pages, size_t len)
{
__free_pages(pages, get_order(len * PAGE_SIZE));
}
bool
cbd_alloc_pagev(struct page** pagev, size_t len)
{
size_t n;
for (n = 0; n < len; ++n) {
pagev[n] = cbd_alloc_page();
if (!pagev[n]) {
goto err;
}
}
return true;
err:
while (n--) {
cbd_free_page(pagev[n]);
pagev[n] = NULL;
}
return false;
}
void
cbd_free_pagev(struct page** pagev, size_t len)
{
size_t n;
for (n = 0; n < len; ++n) {
cbd_free_page(pagev[n]);
pagev[n] = NULL;
}
}
/**************************************
* Core low-level I/O.
*
* pblk count are in units of physical blocks (4096 bytes), NOT sectors.
* data is a page address (obtained via __get_free_pages and friends).
**************************************/
static struct bio*
pblk_io_prepare(struct block_device* bdev, unsigned int op,
u32 pblk_len, u64 pblk, u32 count, struct page* page, u32 page_off)
{
struct bio* bio;
BUG_ON(page_off + pblk_len * count > PAGE_SIZE);
bio = bio_alloc(GFP_KERNEL, 1);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return NULL;
}
bio_set_dev(bio, bdev);
bio->bi_opf = op;
bio->bi_iter.bi_sector = pblk * (pblk_len / SECTOR_SIZE);
if (bio_add_page(bio, page, pblk_len * count, page_off) == 0) {
BUG();
}
return bio;
}
int
pblk_read_wait(struct compress_params* kparams,
u64 pblk, u32 count, struct page* page)
{
int ret;
struct bio* bio;
bio = pblk_io_prepare(kparams->dev, REQ_OP_READ,
pblk_size(&kparams->params), pblk, count, page, 0);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return -ENOMEM;
}
ret = submit_bio_wait(bio);
if (ret) {
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
}
bio_put(bio);
return ret;
}
int
pblk_readv_wait(struct compress_params* kparams,
u64* pblkv, u32 count, struct page* page)
{
int ret = 0;
u32 pblk_len = pblk_size(&kparams->params);
u32 n;
u32 page_off;
struct bio* bio;
/* XXX: Issue no-blocking reads for parallelism? */
for (n = 0, page_off = 0; n < count; ++n, page_off += pblk_len) {
bio = pblk_io_prepare(kparams->dev, REQ_OP_READ,
pblk_len, pblkv[n], 1, page, page_off);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return -ENOMEM;
}
ret = submit_bio_wait(bio);
if (ret) {
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
return ret;
}
bio_put(bio);
}
return ret;
}
int
pblk_write_wait(struct compress_params* kparams,
u64 pblk, u32 count, struct page* page)
{
int ret;
struct bio* bio;
bio = pblk_io_prepare(kparams->dev, REQ_OP_WRITE,
pblk_size(&kparams->params), pblk, count, page, 0);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return -ENOMEM;
}
ret = submit_bio_wait(bio);
if (ret) {
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
kparams->params.flags |= CBD_FLAG_ERROR;
}
bio_put(bio);
return ret;
}
void
pblk_write_endio(struct bio* bio)
{
struct compress_params* kparams = bio->bi_private;
struct page* page = bio->bi_io_vec[0].bv_page;
if (bio->bi_status != BLK_STS_OK) {
printk(KERN_ERR "%s: I/O error\n", __func__);
kparams->params.flags |= CBD_FLAG_ERROR;
SetPageError(page);
}
ClearPageDirty(page);
unlock_page(page);
bio_put(bio);
}
void
pblk_write(struct compress_params* kparams,
u64 pblk, u32 count, struct page* page)
{
struct bio* bio;
bio = pblk_io_prepare(kparams->dev, REQ_OP_WRITE,
pblk_size(&kparams->params), pblk, count, page, 0);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
kparams->params.flags |= CBD_FLAG_ERROR;
SetPageError(page);
unlock_page(page);
return;
}
bio->bi_end_io = pblk_write_endio;
bio->bi_private = kparams;
submit_bio(bio);
}
struct pblk_iov
{
struct compress_params* kparams;
atomic_t remain;
};
void
pblk_writev_endio(struct bio* bio)
{
struct pblk_iov* iov = bio->bi_private;
struct compress_params* kparams = iov->kparams;
struct page* page = bio->bi_io_vec[0].bv_page;
if (bio->bi_status != BLK_STS_OK) {
printk(KERN_ERR "%s: I/O error\n", __func__);
kparams->params.flags |= CBD_FLAG_ERROR;
SetPageError(page);
}
if (atomic_dec_and_test(&iov->remain)) {
ClearPageDirty(page);
unlock_page(page);
kfree(iov);
}
bio_put(bio);
}
void
pblk_writev(struct compress_params* kparams,
u64* pblkv, u32 count, struct page* page)
{
u32 pblk_len = pblk_size(&kparams->params);
struct pblk_iov* iov;
u32 idx;
u32 page_off;
u32 nr_bio;
u64 pblk;
u32 iov_nr_pblk;
struct bio* bio;
BUG_ON(pblk_len * count > PAGE_SIZE);
iov = kmalloc(sizeof(struct pblk_iov), GFP_KERNEL);
if (!iov) {
printk(KERN_ERR "%s: out of memory\n", __func__);
goto err;
}
iov->kparams = kparams;
atomic_set(&iov->remain, count);
idx = 0;
page_off = 0;
nr_bio = 0;
while (idx < count) {
pblk = pblkv[idx];
iov_nr_pblk = 1;
++idx;
while (idx < count && pblkv[idx] == pblk + iov_nr_pblk) {
++iov_nr_pblk;
++idx;
}
bio = pblk_io_prepare(kparams->dev, REQ_OP_WRITE,
pblk_len, pblk, iov_nr_pblk, page, page_off);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
goto err_free;
}
++nr_bio;
bio->bi_end_io = pblk_writev_endio;
bio->bi_private = iov;
submit_bio(bio);
page_off += pblk_len * iov_nr_pblk;
}
if (atomic_sub_and_test(count - nr_bio, &iov->remain)) {
ClearPageDirty(page);
unlock_page(page);
kfree(iov);
}
return;
err_free:
kfree(iov);
err:
kparams->params.flags |= CBD_FLAG_ERROR;
SetPageError(page);
unlock_page(page);
}