aboutsummaryrefslogtreecommitdiff
path: root/kernel/rcutorture.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-30 22:05:16 -0600
committerRusty Russell <rusty@rustcorp.com.au>2009-03-30 22:05:16 +1030
commit73d0a4b107d58908305f272bfae9bd17f74a2c81 (patch)
tree2dd9c3c51e227880abd662a7749bf3f843b9f6d4 /kernel/rcutorture.c
parentaa85ea5b89c36c51200d795dd788139bd9b8cf50 (diff)
cpumask: convert rcutorture.c
We're getting rid of cpumasks on the stack. Simply change tmp_mask to a global, and allocate it in rcu_torture_init(). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Josh Triplett <josh@freedesktop.org>
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r--kernel/rcutorture.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 7c4142a79f0..9b4a975a4b4 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -126,6 +126,7 @@ static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
static long n_rcu_torture_timers = 0;
static struct list_head rcu_torture_removed;
+static cpumask_var_t shuffle_tmp_mask;
static int stutter_pause_test = 0;
@@ -889,10 +890,9 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
*/
static void rcu_torture_shuffle_tasks(void)
{
- cpumask_t tmp_mask;
int i;
- cpus_setall(tmp_mask);
+ cpumask_setall(shuffle_tmp_mask);
get_online_cpus();
/* No point in shuffling if there is only one online CPU (ex: UP) */
@@ -902,29 +902,29 @@ static void rcu_torture_shuffle_tasks(void)
}
if (rcu_idle_cpu != -1)
- cpu_clear(rcu_idle_cpu, tmp_mask);
+ cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
- set_cpus_allowed_ptr(current, &tmp_mask);
+ set_cpus_allowed_ptr(current, shuffle_tmp_mask);
if (reader_tasks) {
for (i = 0; i < nrealreaders; i++)
if (reader_tasks[i])
set_cpus_allowed_ptr(reader_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (fakewriter_tasks) {
for (i = 0; i < nfakewriters; i++)
if (fakewriter_tasks[i])
set_cpus_allowed_ptr(fakewriter_tasks[i],
- &tmp_mask);
+ shuffle_tmp_mask);
}
if (writer_task)
- set_cpus_allowed_ptr(writer_task, &tmp_mask);
+ set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
if (stats_task)
- set_cpus_allowed_ptr(stats_task, &tmp_mask);
+ set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
if (rcu_idle_cpu == -1)
rcu_idle_cpu = num_online_cpus() - 1;
@@ -1012,6 +1012,7 @@ rcu_torture_cleanup(void)
if (shuffler_task) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
kthread_stop(shuffler_task);
+ free_cpumask_var(shuffle_tmp_mask);
}
shuffler_task = NULL;
@@ -1190,10 +1191,18 @@ rcu_torture_init(void)
}
if (test_no_idle_hz) {
rcu_idle_cpu = num_online_cpus() - 1;
+
+ if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
+ firsterr = -ENOMEM;
+ VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
+ goto unwind;
+ }
+
/* Create the shuffler thread */
shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
"rcu_torture_shuffle");
if (IS_ERR(shuffler_task)) {
+ free_cpumask_var(shuffle_tmp_mask);
firsterr = PTR_ERR(shuffler_task);
VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
shuffler_task = NULL;