cbd/dm-compress/util.c

439 lines
10 KiB
C
Raw Normal View History

/*
* Copyright (c) 2019 Tom Marshall <tdm.code@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/lz4.h>
#include <linux/dm-compress.h>
/**************************************
* Core memory management.
**************************************/
struct page*
cbd_alloc_page(void)
{
return alloc_page(GFP_KERNEL);
}
void
cbd_free_page(struct page* page)
{
__free_page(page);
}
struct page*
cbd_alloc_pages(size_t len)
{
return alloc_pages(GFP_KERNEL, get_order(len * PAGE_SIZE));
}
void
cbd_free_pages(struct page* pages, size_t len)
{
__free_pages(pages, get_order(len * PAGE_SIZE));
}
bool
cbd_alloc_pagev(struct page** pagev, size_t len)
{
size_t n;
for (n = 0; n < len; ++n) {
pagev[n] = cbd_alloc_page();
if (!pagev[n]) {
goto err;
}
}
return true;
err:
while (n--) {
cbd_free_page(pagev[n]);
pagev[n] = NULL;
}
return false;
}
void
cbd_free_pagev(struct page** pagev, size_t len)
{
size_t n;
for (n = 0; n < len; ++n) {
cbd_free_page(pagev[n]);
pagev[n] = NULL;
}
}
/**************************************
* Core low-level I/O.
*
* pblk count are in units of physical blocks (4096 bytes), NOT sectors.
* data is a page address (obtained via __get_free_pages and friends).
**************************************/
static struct bio*
pblk_io_prepare(struct cbd_params* params, unsigned int op,
u64 pblk, u32 count, struct page** pagev)
{
struct bio* bio;
u32 n;
bio = bio_alloc(GFP_KERNEL, count);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return NULL;
}
bio_set_dev(bio, (struct block_device*)params->priv);
bio->bi_opf = op;
bio->bi_iter.bi_sector = (pblk << (PBLK_SHIFT - SECTOR_SHIFT));
for (n = 0; n < count; ++n) {
if (bio_add_page(bio, pagev[n], PAGE_SIZE, 0) != PAGE_SIZE) {
BUG();
}
}
return bio;
}
int
pblk_read_wait(struct cbd_params* params,
u64 pblk, u32 count, struct page** pagev)
{
int ret;
struct bio* bio;
bio = pblk_io_prepare(params, REQ_OP_READ, pblk, count, pagev);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return -ENOMEM;
}
ret = submit_bio_wait(bio);
if (ret) {
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
}
bio_put(bio);
return ret;
}
int
pblk_read(struct cbd_params* params,
u64 pblk, u32 count, struct page** pagev,
pblk_endio_t endio, void* endio_priv)
{
int ret;
struct bio* bio;
bio = pblk_io_prepare(params, REQ_OP_READ, pblk, count, pagev);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return -ENOMEM;
}
bio->bi_end_io = endio;
bio->bi_private = endio_priv;
ret = submit_bio(bio);
if (ret != 0) {
printk(KERN_ERR "%s: submit_bio_wait failed: %d\n", __func__, ret);
}
bio_put(bio);
return ret;
}
void
pblk_write(struct cbd_params* params,
u64 pblk, u32 count, struct page** pagev,
pblk_endio_t endio, void* endio_priv)
{
struct bio* bio;
bio = pblk_io_prepare(params, REQ_OP_WRITE, pblk, count, pagev);
if (!bio) {
printk(KERN_ERR "%s: out of memory\n", __func__);
return;
}
bio->bi_end_io = endio;
bio->bi_private = endio_priv;
if (pblk < CBD_HEADER_BLOCKS) {
printk(KERN_ERR "%s: *** Attempt to write header\n", __func__);
dump_stack();
bio->bi_status = BLK_STS_IOERR;
endio(bio);
return;
}
submit_bio(bio);
}
int
pblk_endio(struct bio* bio)
{
int ret;
ret = blk_status_to_errno(bio->bi_status);
bio_put(bio);
return ret;
}
2019-10-25 14:56:20 +02:00
static inline u32
cshift (u32 x, uint n)
{
return (x << n) | (x >> (32 - n));
}
#define A ctx->counter[0]
#define B ctx->counter[1]
#define C ctx->counter[2]
#define D ctx->counter[3]
#define X data
void
md5_init(struct md5* ctx)
{
ctx->sz[0] = 0;
ctx->sz[1] = 0;
D = 0x10325476;
C = 0x98badcfe;
B = 0xefcdab89;
A = 0x67452301;
}
#define F(x,y,z) ((x & y) | (~x & z))
#define G(x,y,z) ((x & z) | (y & ~z))
#define H(x,y,z) (x ^ y ^ z)
#define I(x,y,z) (y ^ (x | ~z))
#define DOIT(a,b,c,d,k,s,i,OP) \
a = b + cshift(a + OP(b,c,d) + X[k] + (i), s)
#define DO1(a,b,c,d,k,s,i) DOIT(a,b,c,d,k,s,i,F)
#define DO2(a,b,c,d,k,s,i) DOIT(a,b,c,d,k,s,i,G)
#define DO3(a,b,c,d,k,s,i) DOIT(a,b,c,d,k,s,i,H)
#define DO4(a,b,c,d,k,s,i) DOIT(a,b,c,d,k,s,i,I)
static inline void
calc(struct md5* ctx, u32* data)
{
u32 AA, BB, CC, DD;
AA = A;
BB = B;
CC = C;
DD = D;
/* Round 1 */
DO1(A,B,C,D,0,7,0xd76aa478);
DO1(D,A,B,C,1,12,0xe8c7b756);
DO1(C,D,A,B,2,17,0x242070db);
DO1(B,C,D,A,3,22,0xc1bdceee);
DO1(A,B,C,D,4,7,0xf57c0faf);
DO1(D,A,B,C,5,12,0x4787c62a);
DO1(C,D,A,B,6,17,0xa8304613);
DO1(B,C,D,A,7,22,0xfd469501);
DO1(A,B,C,D,8,7,0x698098d8);
DO1(D,A,B,C,9,12,0x8b44f7af);
DO1(C,D,A,B,10,17,0xffff5bb1);
DO1(B,C,D,A,11,22,0x895cd7be);
DO1(A,B,C,D,12,7,0x6b901122);
DO1(D,A,B,C,13,12,0xfd987193);
DO1(C,D,A,B,14,17,0xa679438e);
DO1(B,C,D,A,15,22,0x49b40821);
/* Round 2 */
DO2(A,B,C,D,1,5,0xf61e2562);
DO2(D,A,B,C,6,9,0xc040b340);
DO2(C,D,A,B,11,14,0x265e5a51);
DO2(B,C,D,A,0,20,0xe9b6c7aa);
DO2(A,B,C,D,5,5,0xd62f105d);
DO2(D,A,B,C,10,9,0x2441453);
DO2(C,D,A,B,15,14,0xd8a1e681);
DO2(B,C,D,A,4,20,0xe7d3fbc8);
DO2(A,B,C,D,9,5,0x21e1cde6);
DO2(D,A,B,C,14,9,0xc33707d6);
DO2(C,D,A,B,3,14,0xf4d50d87);
DO2(B,C,D,A,8,20,0x455a14ed);
DO2(A,B,C,D,13,5,0xa9e3e905);
DO2(D,A,B,C,2,9,0xfcefa3f8);
DO2(C,D,A,B,7,14,0x676f02d9);
DO2(B,C,D,A,12,20,0x8d2a4c8a);
/* Round 3 */
DO3(A,B,C,D,5,4,0xfffa3942);
DO3(D,A,B,C,8,11,0x8771f681);
DO3(C,D,A,B,11,16,0x6d9d6122);
DO3(B,C,D,A,14,23,0xfde5380c);
DO3(A,B,C,D,1,4,0xa4beea44);
DO3(D,A,B,C,4,11,0x4bdecfa9);
DO3(C,D,A,B,7,16,0xf6bb4b60);
DO3(B,C,D,A,10,23,0xbebfbc70);
DO3(A,B,C,D,13,4,0x289b7ec6);
DO3(D,A,B,C,0,11,0xeaa127fa);
DO3(C,D,A,B,3,16,0xd4ef3085);
DO3(B,C,D,A,6,23,0x4881d05);
DO3(A,B,C,D,9,4,0xd9d4d039);
DO3(D,A,B,C,12,11,0xe6db99e5);
DO3(C,D,A,B,15,16,0x1fa27cf8);
DO3(B,C,D,A,2,23,0xc4ac5665);
/* Round 4 */
DO4(A,B,C,D,0,6,0xf4292244);
DO4(D,A,B,C,7,10,0x432aff97);
DO4(C,D,A,B,14,15,0xab9423a7);
DO4(B,C,D,A,5,21,0xfc93a039);
DO4(A,B,C,D,12,6,0x655b59c3);
DO4(D,A,B,C,3,10,0x8f0ccc92);
DO4(C,D,A,B,10,15,0xffeff47d);
DO4(B,C,D,A,1,21,0x85845dd1);
DO4(A,B,C,D,8,6,0x6fa87e4f);
DO4(D,A,B,C,15,10,0xfe2ce6e0);
DO4(C,D,A,B,6,15,0xa3014314);
DO4(B,C,D,A,13,21,0x4e0811a1);
DO4(A,B,C,D,4,6,0xf7537e82);
DO4(D,A,B,C,11,10,0xbd3af235);
DO4(C,D,A,B,2,15,0x2ad7d2bb);
DO4(B,C,D,A,9,21,0xeb86d391);
A += AA;
B += BB;
C += CC;
D += DD;
}
/*
* From `Performance analysis of MD5' by Joseph D. Touch <touch@isi.edu>
*/
#if !defined(__BYTE_ORDER__) || !defined (__ORDER_BIG_ENDIAN__)
#error __BYTE_ORDER macros not defined
#endif
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
static inline u32
swap_u32(u32 t)
{
u32 temp1, temp2;
temp1 = cshift(t, 16);
temp2 = temp1 >> 8;
temp1 &= 0x00ff00ff;
temp2 &= 0x00ff00ff;
temp1 <<= 8;
return temp1 | temp2;
}
#endif
struct x32 {
uint a:32;
uint b:32;
};
void
md5_update(struct md5* ctx, const void* data, size_t len)
{
const byte* p = data;
size_t old_sz = ctx->sz[0];
size_t offset;
ctx->sz[0] += len * 8;
if (ctx->sz[0] < old_sz)
++ctx->sz[1];
offset = (old_sz / 8) % 64;
while(len > 0){
size_t l = min(len, 64 - offset);
memcpy(ctx->save + offset, p, l);
offset += l;
p += l;
len -= l;
if (offset == 64) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
int i;
u32 current[16];
struct x32 *u = (struct x32*)ctx->save;
for (i = 0; i < 8; i++) {
current[2*i+0] = swap_u32(u[i].a);
current[2*i+1] = swap_u32(u[i].b);
}
calc(ctx, current);
#else
calc(ctx, (u32*)ctx->save);
#endif
offset = 0;
}
}
}
void
md5_final(struct md5* ctx, byte* buf)
{
byte zeros[72];
uint offset = (ctx->sz[0] / 8) % 64;
uint dstart = (120 - offset - 1) % 64 + 1;
*zeros = 0x80;
memset (zeros + 1, 0, sizeof(zeros) - 1);
zeros[dstart+0] = (ctx->sz[0] >> 0) & 0xff;
zeros[dstart+1] = (ctx->sz[0] >> 8) & 0xff;
zeros[dstart+2] = (ctx->sz[0] >> 16) & 0xff;
zeros[dstart+3] = (ctx->sz[0] >> 24) & 0xff;
zeros[dstart+4] = (ctx->sz[1] >> 0) & 0xff;
zeros[dstart+5] = (ctx->sz[1] >> 8) & 0xff;
zeros[dstart+6] = (ctx->sz[1] >> 16) & 0xff;
zeros[dstart+7] = (ctx->sz[1] >> 24) & 0xff;
md5_update(ctx, zeros, dstart + 8);
{
int i;
byte *r = buf;
for (i = 0; i < 4; ++i) {
r[4*i] = ctx->counter[i] & 0xFF;
r[4*i+1] = (ctx->counter[i] >> 8) & 0xFF;
r[4*i+2] = (ctx->counter[i] >> 16) & 0xFF;
r[4*i+3] = (ctx->counter[i] >> 24) & 0xFF;
}
}
}