Move compress_{alloc,free}_pages up for use by async I/O
This commit is contained in:
parent
7326fe2e14
commit
26152ce1ec
|
@ -114,6 +114,56 @@ dm_target_pblk_size(struct dm_target* ti)
|
|||
return ti->len >> (PBLK_SHIFT - SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
/*************************************
|
||||
* Page level memory allocator
|
||||
**************************************/
|
||||
|
||||
static void*
|
||||
compress_alloc_pages(size_t size)
|
||||
{
|
||||
unsigned int order = get_order(size);
|
||||
void* ret;
|
||||
|
||||
if (size > (PAGE_SIZE * 128) || order > 7) {
|
||||
printk(KERN_ERR "%s: size %zu order %u too large\n", __func__, size, order);
|
||||
return NULL;
|
||||
}
|
||||
ret = (void*)__get_free_pages(GFP_KERNEL, order);
|
||||
if (!ret) {
|
||||
printk(KERN_ERR "%s: failed to alloc %zu bytes\n", __func__, size);
|
||||
}
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
compress_free_pages(void* ptr, size_t size)
|
||||
{
|
||||
unsigned int order = get_order(size);
|
||||
size_t n;
|
||||
size_t in_use = 0;
|
||||
|
||||
if (!ptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (n = 0; n < (1 << order); ++n) {
|
||||
struct page* pg = virt_to_page(ptr + n * PAGE_SIZE);
|
||||
int refcount = page_ref_count(pg);
|
||||
if (n == 0) {
|
||||
--refcount;
|
||||
}
|
||||
if (refcount) {
|
||||
++in_use;
|
||||
}
|
||||
}
|
||||
if (in_use) {
|
||||
printk(KERN_ERR "%s: *** %zu of %zu pages in use ***\n", __func__, in_use, n);
|
||||
return;
|
||||
}
|
||||
free_pages((unsigned long)ptr, order);
|
||||
}
|
||||
|
||||
/**************************************
|
||||
* Core low-level I/O.
|
||||
*
|
||||
|
@ -631,52 +681,6 @@ lblk_read(struct dm_compress* dc, u64 idx)
|
|||
* Main functions
|
||||
**************************************/
|
||||
|
||||
static void*
|
||||
compress_alloc_pages(size_t size)
|
||||
{
|
||||
unsigned int order = get_order(size);
|
||||
void* ret;
|
||||
|
||||
if (size > (PAGE_SIZE * 128) || order > 7) {
|
||||
printk(KERN_ERR "%s: size %zu order %u too large\n", __func__, size, order);
|
||||
return NULL;
|
||||
}
|
||||
ret = (void*)__get_free_pages(GFP_KERNEL, order);
|
||||
if (!ret) {
|
||||
printk(KERN_ERR "%s: failed to alloc %zu bytes\n", __func__, size);
|
||||
}
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
compress_free_pages(void* ptr, size_t size)
|
||||
{
|
||||
unsigned int order = get_order(size);
|
||||
size_t n;
|
||||
size_t in_use = 0;
|
||||
|
||||
if (!ptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (n = 0; n < (1 << order); ++n) {
|
||||
struct page* pg = virt_to_page(ptr + n * PAGE_SIZE);
|
||||
int refcount = page_ref_count(pg);
|
||||
if (n == 0) {
|
||||
--refcount;
|
||||
}
|
||||
if (refcount) {
|
||||
++in_use;
|
||||
}
|
||||
}
|
||||
if (in_use) {
|
||||
printk(KERN_ERR "%s: *** %zu of %zu pages in use ***\n", __func__, in_use, n);
|
||||
return;
|
||||
}
|
||||
free_pages((unsigned long)ptr, order);
|
||||
}
|
||||
|
||||
static void
|
||||
compress_free_buffers(struct dm_compress* dc)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue