aboutsummaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 23:18:33 +0000
committerPekka Enberg <penberg@kernel.org>2012-09-05 12:00:36 +0300
commitdb265eca77000c5dafc5608975afe8dafb2a02d5 (patch)
tree1c33709fa115e8e814e515f4ee535314ed090ab1 /mm/slub.c
parent12c3667fb780e20360ad0bde32dfb3591ef609ad (diff)
mm/sl[aou]b: Move duping of slab name to slab_common.c
Duping of the slabname has to be done by each slab. Moving this code to slab_common avoids duplicate implementations. With this patch we have common string handling for all slab allocators. Strings passed to kmem_cache_create() are copied internally. Subsystems can create temporary strings to create slab caches. Slabs allocated in early states of bootstrap will never be freed (and those can never be freed since they are essential to slab allocator operations). During bootstrap we therefore do not have to worry about duping names. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c21
1 files changed, 2 insertions, 19 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e5e09873f5e..91c9a2fe676 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -210,10 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
-static inline void sysfs_slab_remove(struct kmem_cache *s)
-{
- kfree(s->name);
-}
+static inline void sysfs_slab_remove(struct kmem_cache *s) { }
#endif
@@ -3929,7 +3926,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
- char *n;
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
@@ -3948,13 +3944,9 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
return s;
}
- n = kstrdup(name, GFP_KERNEL);
- if (!n)
- return NULL;
-
s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
if (s) {
- if (kmem_cache_open(s, n,
+ if (kmem_cache_open(s, name,
size, align, flags, ctor)) {
int r;
@@ -3969,7 +3961,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
}
kmem_cache_free(kmem_cache, s);
}
- kfree(n);
return NULL;
}
@@ -5200,13 +5191,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
return err;
}
-static void kmem_cache_release(struct kobject *kobj)
-{
- struct kmem_cache *s = to_slab(kobj);
-
- kfree(s->name);
-}
-
static const struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
@@ -5214,7 +5198,6 @@ static const struct sysfs_ops slab_sysfs_ops = {
static struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
- .release = kmem_cache_release
};
static int uevent_filter(struct kset *kset, struct kobject *kobj)