227 lines
5.0 KiB
C
227 lines
5.0 KiB
C
/*
|
|
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version 2
|
|
* of the License, or (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
|
* 02110-1301, USA.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/device-mapper.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/lz4.h>
|
|
|
|
#include <linux/dm-compress.h>
|
|
|
|
/**************************************
|
|
* Core memory management.
|
|
**************************************/
|
|
|
|
struct page*
|
|
cbd_alloc_page(void)
|
|
{
|
|
return alloc_page(GFP_KERNEL);
|
|
}
|
|
|
|
struct page*
|
|
cbd_alloc_page_nowait(void)
|
|
{
|
|
return alloc_page(GFP_NOWAIT);
|
|
}
|
|
|
|
void
|
|
cbd_free_page(struct page* page)
|
|
{
|
|
__free_page(page);
|
|
}
|
|
|
|
struct page*
|
|
cbd_alloc_pages(size_t len)
|
|
{
|
|
return alloc_pages(GFP_KERNEL, get_order(len * PAGE_SIZE));
|
|
}
|
|
|
|
struct page*
|
|
cbd_alloc_pages_nowait(size_t len)
|
|
{
|
|
return alloc_pages(GFP_NOWAIT, get_order(len * PAGE_SIZE));
|
|
}
|
|
|
|
void
|
|
cbd_free_pages(struct page* pages, size_t len)
|
|
{
|
|
__free_pages(pages, get_order(len * PAGE_SIZE));
|
|
}
|
|
|
|
bool
|
|
cbd_alloc_pagev(struct page** pagev, size_t len)
|
|
{
|
|
size_t n;
|
|
|
|
for (n = 0; n < len; ++n) {
|
|
pagev[n] = cbd_alloc_page();
|
|
if (!pagev[n]) {
|
|
goto err;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
|
|
err:
|
|
while (n--) {
|
|
cbd_free_page(pagev[n]);
|
|
pagev[n] = NULL;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
void
|
|
cbd_free_pagev(struct page** pagev, size_t len)
|
|
{
|
|
size_t n;
|
|
|
|
for (n = 0; n < len; ++n) {
|
|
cbd_free_page(pagev[n]);
|
|
pagev[n] = NULL;
|
|
}
|
|
}
|
|
|
|
/**************************************
|
|
* Core low-level I/O.
|
|
*
|
|
* pblk count are in units of physical blocks (4096 bytes), NOT sectors.
|
|
* data is a page address (obtained via __get_free_pages and friends).
|
|
**************************************/
|
|
|
|
static struct bio*
|
|
pblk_io_prepare(struct block_device* bdev, unsigned int op,
|
|
u64 pblk, u32 count, struct page** pagev)
|
|
{
|
|
struct bio* bio;
|
|
u32 n;
|
|
|
|
bio = bio_alloc(GFP_KERNEL, count);
|
|
if (!bio) {
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
return NULL;
|
|
}
|
|
bio_set_dev(bio, bdev);
|
|
bio->bi_opf = op;
|
|
|
|
bio->bi_iter.bi_sector = (pblk << (PBLK_SHIFT - SECTOR_SHIFT));
|
|
for (n = 0; n < count; ++n) {
|
|
if (bio_add_page(bio, pagev[n], PAGE_SIZE, 0) != PAGE_SIZE) {
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
return bio;
|
|
}
|
|
|
|
int
|
|
pblk_read_wait(struct cbd_params* params,
|
|
u64 pblk, u32 count, struct page** pagev)
|
|
{
|
|
int ret;
|
|
struct bio* bio;
|
|
|
|
bio = pblk_io_prepare(params->priv, REQ_OP_READ, pblk, count, pagev);
|
|
if (!bio) {
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
return -ENOMEM;
|
|
}
|
|
ret = submit_bio_wait(bio);
|
|
if (ret) {
|
|
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
|
|
}
|
|
bio_put(bio);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int
|
|
pblk_read(struct cbd_params* params,
|
|
u64 pblk, u32 count, struct page** pagev,
|
|
pblk_endio_t endio, void* endio_priv)
|
|
{
|
|
int ret;
|
|
struct bio* bio;
|
|
|
|
bio = pblk_io_prepare(params->priv, REQ_OP_READ, pblk, count, pagev);
|
|
if (!bio) {
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
return -ENOMEM;
|
|
}
|
|
bio->bi_end_io = endio;
|
|
bio->bi_private = endio_priv;
|
|
|
|
ret = submit_bio(bio);
|
|
if (ret != 0) {
|
|
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
|
|
}
|
|
bio_put(bio);
|
|
|
|
return ret;
|
|
}
|
|
|
|
void
|
|
pblk_endio(struct bio* bio)
|
|
{
|
|
u32 n;
|
|
struct page* page;
|
|
|
|
BUG_ON(!bio);
|
|
for (n = 0; n < bio->bi_max_vecs; ++n) {
|
|
page = bio->bi_io_vec[n].bv_page;
|
|
unlock_page(page);
|
|
ClearPageDirty(page);
|
|
}
|
|
if (bio->bi_status != BLK_STS_OK) {
|
|
for (n = 0; n < bio->bi_max_vecs; ++n) {
|
|
page = bio->bi_io_vec[n].bv_page;
|
|
SetPageError(page);
|
|
}
|
|
}
|
|
bio_put(bio);
|
|
}
|
|
|
|
void
|
|
pblk_write(struct cbd_params* params,
|
|
u64 pblk, u32 count, struct page** pagev)
|
|
{
|
|
struct bio* bio;
|
|
|
|
bio = pblk_io_prepare(params->priv, REQ_OP_WRITE, pblk, count, pagev);
|
|
if (!bio) {
|
|
printk(KERN_ERR "%s: out of memory\n", __func__);
|
|
return;
|
|
}
|
|
bio->bi_end_io = pblk_endio;
|
|
|
|
if (pblk < CBD_HEADER_BLOCKS) {
|
|
printk(KERN_ERR "%s: *** Attempt to write header\n", __func__);
|
|
dump_stack();
|
|
bio->bi_status = BLK_STS_IOERR;
|
|
pblk_endio(bio);
|
|
return;
|
|
}
|
|
|
|
submit_bio(bio);
|
|
}
|