[PATCH] zram percpu implementation
mani
manishrma at gmail.com
Tue Jul 16 14:57:34 EDT 2013
>From 4d52438c9640e64203c2434a74e2abfe3a4168ec Mon Sep 17 00:00:00 2001
From: Manish Sharma <manishrma at gmail.com>
Date: Thu, 4 Apr 2013 10:32:08 +0530
Subject: [PATCH] zram percpu implementation
This patch will create a percpu structures compression algo.
1. Takes extra memory for workspace buffers.
I haven't seen any performance gain with this need to find the
root cause.
---
drivers/staging/zram/zram_drv.c | 69
+++++++++++++++++++++++++++++++--------
drivers/staging/zram/zram_drv.h | 9 +++--
2 files changed, 62 insertions(+), 16 deletions(-)
diff --git a/drivers/staging/zram/zram_drv.c
b/drivers/staging/zram/zram_drv.c
index 071e058..5aef8cc 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -271,9 +271,10 @@ static int zram_bvec_write(struct zram *zram, struct
bio_vec *bvec, u32 index,
unsigned long handle;
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
+ struct zram_buff *zbuff;
page = bvec->bv_page;
- src = zram->compress_buffer;
+/* src = zram->compress_buffer; */
if (is_partial_io(bvec)) {
/*
@@ -318,9 +319,11 @@ static int zram_bvec_write(struct zram *zram, struct
bio_vec *bvec, u32 index,
ret = 0;
goto out;
}
+ zbuff = get_cpu_ptr(zram->zbuff);
+ src = zbuff->compress_buffer;
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
- zram->compress_workmem);
+ zbuff->compress_workmem);
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
@@ -329,6 +332,7 @@ static int zram_bvec_write(struct zram *zram, struct
bio_vec *bvec, u32 index,
}
if (unlikely(ret != LZO_E_OK)) {
+ put_cpu_ptr(zram->zbuff);
pr_err("Compression failed! err=%d\n", ret);
goto out;
}
@@ -343,6 +347,7 @@ static int zram_bvec_write(struct zram *zram, struct
bio_vec *bvec, u32 index,
handle = zs_malloc(zram->mem_pool, clen);
if (!handle) {
+ put_cpu_ptr(zram->zbuff);
pr_info("Error allocating memory for compressed "
"page: %u, size=%zu\n", index, clen);
ret = -ENOMEM;
@@ -366,7 +371,7 @@ static int zram_bvec_write(struct zram *zram, struct
bio_vec *bvec, u32 index,
zram_stat_inc(&zram->stats.pages_stored);
if (clen <= PAGE_SIZE / 2)
zram_stat_inc(&zram->stats.good_compress);
-
+ put_cpu_ptr(zram->zbuff);
out:
if (is_partial_io(bvec))
kfree(uncmem);
@@ -382,13 +387,13 @@ static int zram_bvec_rw(struct zram *zram, struct
bio_vec *bvec, u32 index,
int ret;
if (rw == READ) {
- down_read(&zram->lock);
+/* down_read(&zram->lock);*/
ret = zram_bvec_read(zram, bvec, index, offset, bio);
- up_read(&zram->lock);
+/* up_read(&zram->lock);*/
} else {
- down_write(&zram->lock);
+/* down_write(&zram->lock);*/
ret = zram_bvec_write(zram, bvec, index, offset);
- up_write(&zram->lock);
+/* up_write(&zram->lock);*/
}
return ret;
@@ -506,15 +511,17 @@ error:
void __zram_reset_device(struct zram *zram)
{
size_t index;
+ int cpu = 0;
+ struct zram_buff *zbuff;
zram->init_done = 0;
/* Free various per-device buffers */
- kfree(zram->compress_workmem);
+/* kfree(zram->compress_workmem);
free_pages((unsigned long)zram->compress_buffer, 1);
zram->compress_workmem = NULL;
- zram->compress_buffer = NULL;
+ zram->compress_buffer = NULL; */
/* Free all pages that are still in this zram device */
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
@@ -524,10 +531,17 @@ void __zram_reset_device(struct zram *zram)
zs_free(zram->mem_pool, handle);
}
-
+ for_each_possible_cpu(cpu) {
+ zbuff = per_cpu_ptr(zram->zbuff, cpu);
+ if (zbuff->compress_workmem)
+ kfree(zbuff->compress_workmem);
+ if (zbuff->compress_buffer)
+ free_pages((unsigned long)zbuff->compress_buffer, 1);
+ }
vfree(zram->table);
zram->table = NULL;
-
+ free_percpu(zram->zbuff);
+ zram->zbuff = NULL;
zs_destroy_pool(zram->mem_pool);
zram->mem_pool = NULL;
@@ -548,6 +562,8 @@ int zram_init_device(struct zram *zram)
{
int ret;
size_t num_pages;
+ int cpu = 0;
+ struct zram_buff *zbuff;
down_write(&zram->init_lock);
@@ -558,19 +574,44 @@ int zram_init_device(struct zram *zram)
zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
- zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+/* zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!zram->compress_workmem) {
pr_err("Error allocating compressor working memory!\n");
ret = -ENOMEM;
goto fail_no_table;
+ } */
+ zram->zbuff = alloc_percpu(struct zram_buff);
+ if (!zram->zbuff) {
+ printk(KERN_EMERG"ERROR: per cpu zbuff allocation failed\n");
+ mutex_unlock(&zram->init_lock);
+ return -ENOMEM;
}
- zram->compress_buffer =
+
+/* zram->compress_buffer =
(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!zram->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
ret = -ENOMEM;
goto fail_no_table;
+ } */
+
+ for_each_possible_cpu(cpu) {
+ printk(KERN_EMERG"[%s] Initializing for each core %d\n", cpu);
+ zbuff = per_cpu_ptr(zram->zbuff, cpu);
+ zbuff->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!zbuff->compress_workmem) {
+ pr_err("Error allocating compressor working memory!\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ zbuff->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
+ if (!zbuff->compress_buffer) {
+ pr_err("Error allocating compressor buffer space\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
}
num_pages = zram->disksize >> PAGE_SHIFT;
@@ -628,7 +669,7 @@ static int create_device(struct zram *zram, int
device_id)
{
int ret = 0;
- init_rwsem(&zram->lock);
+/* init_rwsem(&zram->lock);*/
init_rwsem(&zram->init_lock);
spin_lock_init(&zram->stat64_lock);
diff --git a/drivers/staging/zram/zram_drv.h
b/drivers/staging/zram/zram_drv.h
index df2eec4..9adca81 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/percpu.h>
#include "../zsmalloc/zsmalloc.h"
@@ -86,10 +87,14 @@ struct zram_stats {
u32 bad_compress; /* % of pages with compression ratio>=75% */
};
-struct zram {
- struct zs_pool *mem_pool;
+struct zram_buff {
void *compress_workmem;
void *compress_buffer;
+};
+
+struct zram {
+ struct zs_pool *mem_pool;
+ struct zram_buff __percpu *zbuff;
struct table *table;
spinlock_t stat64_lock; /* protect 64-bit stats */
struct rw_semaphore lock; /* protect compression buffers and table
--
1.7.9.5
-------------- next part --------------
An HTML attachment was scrubbed...
URL: http://lists.kernelnewbies.org/pipermail/kernelnewbies/attachments/20130717/a71d2662/attachment-0001.html
More information about the Kernelnewbies
mailing list