aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorAkinobu Mita <akinobu.mita@gmail.com>2007-10-18 03:05:16 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-18 14:37:21 -0700
commitef1d7151d2901295278d5aada39c547ed8601419 (patch)
tree205782480d956655bd70a9f348fdfed6bda16795 /arch/x86
parentd435d862baca3e25e5eec236762a43251b1e7ffc (diff)
cpu hotplug: intel_cacheinfo: fix cpu hotplug error handling
- Fix resource leakage in error case within detect_cache_attributes() - Don't register hotcpu notifier when cache_add_dev() returns error - Introduce cache_dev_map cpumask to track whether cache interface for CPU is successfully added by cache_add_dev() or not. cache_add_dev() may fail with out of memory error. In order to avoid cache_remove_dev() with that uninitialized cache interface when CPU_DEAD event is delivered we need to have the cache_dev_map cpumask. (We cannot change cache_add_dev() from CPU_ONLINE event handler to CPU_UP_PREPARE event handler. Because cache_add_dev() needs to do cpuid and store the results with its CPU online.) [nix.or.die@googlemail.com: fix a section mismatch warning] Cc: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Gautham R Shenoy <ego@in.ibm.com> Cc: Oleg Nesterov <oleg@tv-sign.ru> Cc: Andi Kleen <ak@suse.de> Cc: Jan Beulich <jbeulich@novell.com> Signed-off-by: Gabriel Craciunescu <nix.or.die@googlemail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c66
1 files changed, 45 insertions, 21 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 1826395ebee..297a2411694 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -499,6 +499,11 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {
static void free_cache_attributes(unsigned int cpu)
{
+ int i;
+
+ for (i = 0; i < num_cache_leaves; i++)
+ cache_remove_shared_cpu_map(cpu, i);
+
kfree(cpuid4_info[cpu]);
cpuid4_info[cpu] = NULL;
}
@@ -506,8 +511,8 @@ static void free_cache_attributes(unsigned int cpu)
static int __cpuinit detect_cache_attributes(unsigned int cpu)
{
struct _cpuid4_info *this_leaf;
- unsigned long j;
- int retval;
+ unsigned long j;
+ int retval;
cpumask_t oldmask;
if (num_cache_leaves == 0)
@@ -524,19 +529,26 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
goto out;
/* Do cpuid and store the results */
- retval = 0;
for (j = 0; j < num_cache_leaves; j++) {
this_leaf = CPUID4_INFO_IDX(cpu, j);
retval = cpuid4_cache_lookup(j, this_leaf);
- if (unlikely(retval < 0))
+ if (unlikely(retval < 0)) {
+ int i;
+
+ for (i = 0; i < j; i++)
+ cache_remove_shared_cpu_map(cpu, i);
break;
+ }
cache_shared_cpu_map_setup(cpu, j);
}
set_cpus_allowed(current, oldmask);
out:
- if (retval)
- free_cache_attributes(cpu);
+ if (retval) {
+ kfree(cpuid4_info[cpu]);
+ cpuid4_info[cpu] = NULL;
+ }
+
return retval;
}
@@ -669,7 +681,7 @@ static struct kobj_type ktype_percpu_entry = {
.sysfs_ops = &sysfs_ops,
};
-static void cpuid4_cache_sysfs_exit(unsigned int cpu)
+static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
{
kfree(cache_kobject[cpu]);
kfree(index_kobject[cpu]);
@@ -680,13 +692,14 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
{
+ int err;
if (num_cache_leaves == 0)
return -ENOENT;
- detect_cache_attributes(cpu);
- if (cpuid4_info[cpu] == NULL)
- return -ENOENT;
+ err = detect_cache_attributes(cpu);
+ if (err)
+ return err;
/* Allocate all required memory */
cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
@@ -705,13 +718,15 @@ err_out:
return -ENOMEM;
}
+static cpumask_t cache_dev_map = CPU_MASK_NONE;
+
/* Add/Remove cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i, j;
struct _index_kobject *this_object;
- int retval = 0;
+ int retval;
retval = cpuid4_cache_sysfs_init(cpu);
if (unlikely(retval < 0))
@@ -721,6 +736,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
kobject_set_name(cache_kobject[cpu], "%s", "cache");
cache_kobject[cpu]->ktype = &ktype_percpu_entry;
retval = kobject_register(cache_kobject[cpu]);
+ if (retval < 0) {
+ cpuid4_cache_sysfs_exit(cpu);
+ return retval;
+ }
for (i = 0; i < num_cache_leaves; i++) {
this_object = INDEX_KOBJECT_PTR(cpu,i);
@@ -740,6 +759,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
break;
}
}
+ if (!retval)
+ cpu_set(cpu, cache_dev_map);
+
return retval;
}
@@ -750,13 +772,14 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
if (cpuid4_info[cpu] == NULL)
return;
- for (i = 0; i < num_cache_leaves; i++) {
- cache_remove_shared_cpu_map(cpu, i);
+ if (!cpu_isset(cpu, cache_dev_map))
+ return;
+ cpu_clear(cpu, cache_dev_map);
+
+ for (i = 0; i < num_cache_leaves; i++)
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
- }
kobject_unregister(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu);
- return;
}
static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
@@ -781,7 +804,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
{
- .notifier_call = cacheinfo_cpu_callback,
+ .notifier_call = cacheinfo_cpu_callback,
};
static int __cpuinit cache_sysfs_init(void)
@@ -791,14 +814,15 @@ static int __cpuinit cache_sysfs_init(void)
if (num_cache_leaves == 0)
return 0;
- register_hotcpu_notifier(&cacheinfo_cpu_notifier);
-
for_each_online_cpu(i) {
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
+ int err;
+ struct sys_device *sys_dev = get_cpu_sysdev(i);
- cache_add_dev(sys_dev);
+ err = cache_add_dev(sys_dev);
+ if (err)
+ return err;
}
-
+ register_hotcpu_notifier(&cacheinfo_cpu_notifier);
return 0;
}