aboutsummaryrefslogtreecommitdiff
path: root/drivers/block/rbd.c
diff options
context:
space:
mode:
authorAlex Elder <elder@inktank.com>2013-05-01 12:43:04 -0500
committerAlex Elder <elder@inktank.com>2013-05-02 11:58:30 -0500
commit78c2a44aae2950ecf0279590572b861288714946 (patch)
tree0b08672df26d2ecc61726d4577657633871a2269 /drivers/block/rbd.c
parent868311b1ebc9b203bae0d6d1f012ea5cbdadca03 (diff)
rbd: allocate image object names with a slab allocator
The names of objects used for image object requests are always fixed size. So create a slab cache to manage them. Define a new function rbd_segment_name_free() to match rbd_segment_name() (which is what supplies the dynamically-allocated name buffer). This is part of: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
Diffstat (limited to 'drivers/block/rbd.c')
-rw-r--r--drivers/block/rbd.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index a72842aa3b5..390946a078b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -345,8 +345,11 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock);
static LIST_HEAD(rbd_client_list); /* clients */
static DEFINE_SPINLOCK(rbd_client_list_lock);
+/* Slab caches for frequently-allocated structures */
+
static struct kmem_cache *rbd_img_request_cache;
static struct kmem_cache *rbd_obj_request_cache;
+static struct kmem_cache *rbd_segment_name_cache;
static int rbd_img_request_submit(struct rbd_img_request *img_request);
@@ -985,7 +988,7 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
u64 segment;
int ret;
- name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
+ name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
if (!name)
return NULL;
segment = offset >> rbd_dev->header.obj_order;
@@ -1001,6 +1004,13 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
return name;
}
+static void rbd_segment_name_free(const char *name)
+{
+ /* The explicit cast here is needed to drop the const qualifier */
+
+ kmem_cache_free(rbd_segment_name_cache, (void *)name);
+}
+
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
@@ -2033,7 +2043,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
length = rbd_segment_length(rbd_dev, img_offset, resid);
obj_request = rbd_obj_request_create(object_name,
offset, length, type);
- kfree(object_name); /* object request has its own copy */
+ /* object request has its own copy of the object name */
+ rbd_segment_name_free(object_name);
if (!obj_request)
goto out_unwind;
@@ -5018,8 +5029,19 @@ static int rbd_slab_init(void)
sizeof (struct rbd_obj_request),
__alignof__(struct rbd_obj_request),
0, NULL);
- if (rbd_obj_request_cache)
+ if (!rbd_obj_request_cache)
+ goto out_err;
+
+ rbd_assert(!rbd_segment_name_cache);
+ rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
+ MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
+ if (rbd_segment_name_cache)
return 0;
+out_err:
+ if (rbd_obj_request_cache) {
+ kmem_cache_destroy(rbd_obj_request_cache);
+ rbd_obj_request_cache = NULL;
+ }
kmem_cache_destroy(rbd_img_request_cache);
rbd_img_request_cache = NULL;
@@ -5029,6 +5051,10 @@ static int rbd_slab_init(void)
static void rbd_slab_exit(void)
{
+ rbd_assert(rbd_segment_name_cache);
+ kmem_cache_destroy(rbd_segment_name_cache);
+ rbd_segment_name_cache = NULL;
+
rbd_assert(rbd_obj_request_cache);
kmem_cache_destroy(rbd_obj_request_cache);
rbd_obj_request_cache = NULL;