From a51805efae5dda0da66f79268ffcf0715f9dbea4 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Jul 2013 14:23:49 -0700 Subject: lockdep: Introduce lock_acquire_exclusive()/shared() helper macros In lockdep.h, the spinlock/mutex/rwsem/rwlock/lock_map acquire macros have different definitions based on the value of CONFIG_PROVE_LOCKING. We have separate ifdefs for each of these definitions, which seems redundant. Introduce lock_acquire_{exclusive,shared,shared_recursive} helpers which will have different definitions based on CONFIG_PROVE_LOCKING. Then all other helper macros can be defined based on the above ones, which reduces the amount of ifdefined code. Signed-off-by: Michel Lespinasse Signed-off-by: Andrew Morton Signed-off-by: Peter Zijlstra Cc: Oleg Nesterov Cc: Lai Jiangshan Cc: "Srivatsa S. Bhat" Cc: Rusty Russell Cc: Andi Kleen Cc: "Paul E. McKenney" Cc: Steven Rostedt Link: http://lkml.kernel.org/r/20130708212350.6DD1931C15E@corp2gmr1-1.hot.corp.google.com Signed-off-by: Ingo Molnar --- include/linux/lockdep.h | 92 +++++++++++++------------------------------------ 1 file changed, 23 insertions(+), 69 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f1e877b79ed..cfc2f119779 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -365,7 +365,7 @@ extern void lockdep_trace_alloc(gfp_t mask); #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) -#else /* !LOCKDEP */ +#else /* !CONFIG_LOCKDEP */ static inline void lockdep_off(void) { @@ -479,82 +479,36 @@ static inline void print_irqtrace_events(struct task_struct *curr) * on the per lock-class debug mode: */ -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# ifdef CONFIG_PROVE_LOCKING -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) -# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) -# else -# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) -# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) -# endif -# define spin_release(l, n, i) lock_release(l, n, i) +#ifdef CONFIG_PROVE_LOCKING + #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) + #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i) + #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i) #else -# define spin_acquire(l, s, t, i) do { } while (0) -# define spin_release(l, n, i) do { } while (0) + #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) + #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) + #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# ifdef CONFIG_PROVE_LOCKING -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) -# else -# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) -# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) -# endif -# define rwlock_release(l, n, i) lock_release(l, n, i) -#else -# define rwlock_acquire(l, s, t, i) do { } while (0) -# define rwlock_acquire_read(l, s, t, i) do { } while (0) -# define rwlock_release(l, n, i) do { } while (0) -#endif +#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) +#define spin_release(l, n, i) lock_release(l, n, i) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# ifdef CONFIG_PROVE_LOCKING -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) -# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) -# else -# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) -# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) -# endif -# define mutex_release(l, n, i) lock_release(l, n, i) -#else -# define mutex_acquire(l, s, t, i) do { } while (0) -# define mutex_acquire_nest(l, s, t, n, i) do { } while (0) -# define mutex_release(l, n, i) do { } while (0) -#endif +#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) +#define rwlock_release(l, n, i) lock_release(l, n, i) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# ifdef CONFIG_PROVE_LOCKING -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) -# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) -# else -# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) -# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) -# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) -# endif +#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) +#define mutex_release(l, n, i) lock_release(l, n, i) + +#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) +#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) +#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) # define rwsem_release(l, n, i) lock_release(l, n, i) -#else -# define rwsem_acquire(l, s, t, i) do { } while (0) -# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) -# define rwsem_acquire_read(l, s, t, i) do { } while (0) -# define rwsem_release(l, n, i) do { } while (0) -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# ifdef CONFIG_PROVE_LOCKING -# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) -# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) -# else -# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) -# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) -# endif +#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) +#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) -#else -# define lock_map_acquire(l) do { } while (0) -# define lock_map_acquire_read(l) do { } while (0) -# define lock_map_release(l) do { } while (0) -#endif #ifdef CONFIG_PROVE_LOCKING # define might_lock(lock) \ -- cgit v1.2.3 From c4be9cb4f19cbd534a6c4c334cd48d8bb483e17a Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Mon, 8 Jul 2013 14:23:51 -0700 Subject: lglock: Update lockdep annotations to report recursive local locks Oleg Nesterov recently noticed that the lockdep annotations in lglock.c are not sufficient to detect some obvious deadlocks, such as lg_local_lock(LOCK) + lg_local_lock(LOCK) or spin_lock(X) + lg_local_lock(Y) vs lg_local_lock(Y) + spin_lock(X). Both issues are easily fixed by indicating to lockdep that lglock's local locks are not recursive. We shouldn't use the rwlock acquire/release functions here, as lglock doesn't share the same semantics. Instead we can base our lockdep annotations on the lock_acquire_shared (for local lglock) and lock_acquire_exclusive (for global lglock) helpers. I am not proposing new lglock specific helpers as I don't see the point of the existing second level of helpers :) Noticed-by: Oleg Nesterov Signed-off-by: Michel Lespinasse Cc: Lai Jiangshan Cc: "Srivatsa S. Bhat" Cc: Rusty Russell Cc: Andi Kleen Cc: "Paul E. McKenney" Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20130708212352.1769031C15E@corp2gmr1-1.hot.corp.google.com Signed-off-by: Ingo Molnar --- kernel/lglock.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/lglock.c b/kernel/lglock.c index 6535a667a5a..86ae2aebf00 100644 --- a/kernel/lglock.c +++ b/kernel/lglock.c @@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg) arch_spinlock_t *lock; preempt_disable(); - rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); + lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); lock = this_cpu_ptr(lg->lock); arch_spin_lock(lock); } @@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg) { arch_spinlock_t *lock; - rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock_release(&lg->lock_dep_map, 1, _RET_IP_); lock = this_cpu_ptr(lg->lock); arch_spin_unlock(lock); preempt_enable(); @@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu) arch_spinlock_t *lock; preempt_disable(); - rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); + lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); lock = per_cpu_ptr(lg->lock, cpu); arch_spin_lock(lock); } @@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu) { arch_spinlock_t *lock; - rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock_release(&lg->lock_dep_map, 1, _RET_IP_); lock = per_cpu_ptr(lg->lock, cpu); arch_spin_unlock(lock); preempt_enable(); @@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg) int i; preempt_disable(); - rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); + lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); for_each_possible_cpu(i) { arch_spinlock_t *lock; lock = per_cpu_ptr(lg->lock, i); @@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg) { int i; - rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock_release(&lg->lock_dep_map, 1, _RET_IP_); for_each_possible_cpu(i) { arch_spinlock_t *lock; lock = per_cpu_ptr(lg->lock, i); -- cgit v1.2.3 From 1e40c2edef2537f87f94d0baf80aeaeb7d51cc23 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 19 Jul 2013 20:31:01 +0200 Subject: mutex: Fix/document access-once assumption in mutex_can_spin_on_owner() mutex_can_spin_on_owner() is technically broken in that it would in theory allow the compiler to load lock->owner twice, seeing a pointer first time and a NULL pointer the second time. Linus pointed out that a compiler has to be seriously broken to not compile this correctly - but nevertheless this change is correct as it will better document the implementation. Signed-off-by: Peter Zijlstra Acked-by: Davidlohr Bueso Acked-by: Waiman Long Acked-by: Linus Torvalds Acked-by: Thomas Gleixner Acked-by: Rik van Riel Cc: Paul E. McKenney Cc: David Howells Link: http://lkml.kernel.org/r/20130719183101.GA20909@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar --- kernel/mutex.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86e..7ff48c55a98 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) */ static inline int mutex_can_spin_on_owner(struct mutex *lock) { + struct task_struct *owner; int retval = 1; rcu_read_lock(); - if (lock->owner) - retval = lock->owner->on_cpu; + owner = ACCESS_ONCE(lock->owner); + if (owner) + retval = owner->on_cpu; rcu_read_unlock(); /* * if lock->owner is not set, the mutex owner may have just acquired -- cgit v1.2.3 From ec83f425dbca47e19c6737e8e7db0d0924a5de1b Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Fri, 28 Jun 2013 13:13:18 -0700 Subject: mutex: Do not unnecessarily deal with waiters Upon entering the slowpath, we immediately attempt to acquire the lock by checking if it is already unlocked. If we are lucky enough that this is the case, then we don't need to deal with any waiter related logic. Furthermore any checks for an empty wait_list are unnecessary as we already know that count is non-negative and hence no one is waiting for the lock. Move the count check and xchg calls to be done before any waiters are setup - including waiter debugging. Upon failure to acquire the lock, the xchg sets the counter to 0, instead of -1 as it was originally. This can be done here since we set it back to -1 right at the beginning of the loop so other waiters are woken up when the lock is released. When tested on a 8-socket (80 core) system against a vanilla 3.10-rc1 kernel, this patch provides some small performance benefits (+2-6%). While it could be considered in the noise level, the average percentages were stable across multiple runs and no performance regressions were seen. Two big winners, for small amounts of users (10-100), were the short and compute workloads had a +19.36% and +%15.76% in jobs per minute. Also change some break statements to 'goto slowpath', which IMO makes a little more intuitive to read. Signed-off-by: Davidlohr Bueso Acked-by: Rik van Riel Acked-by: Maarten Lankhorst Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1372450398.2106.1.camel@buesod1.americas.hpqcorp.net Signed-off-by: Ingo Molnar --- kernel/mutex.c | 41 ++++++++++++++++++----------------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 7ff48c55a98..386ad5da47a 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -463,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * performed the optimistic spinning cannot be done. */ if (ACCESS_ONCE(ww->ctx)) - break; + goto slowpath; } /* @@ -474,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, owner = ACCESS_ONCE(lock->owner); if (owner && !mutex_spin_on_owner(lock, owner)) { mspin_unlock(MLOCK(lock), &node); - break; + goto slowpath; } if ((atomic_read(&lock->count) == 1) && @@ -489,8 +489,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, mutex_set_owner(lock); mspin_unlock(MLOCK(lock), &node); - preempt_enable(); - return 0; + goto done; } mspin_unlock(MLOCK(lock), &node); @@ -501,7 +500,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, * the owner complete. */ if (!owner && (need_resched() || rt_task(task))) - break; + goto slowpath; /* * The cpu_relax() call is a compiler barrier which forces @@ -515,6 +514,10 @@ slowpath: #endif spin_lock_mutex(&lock->wait_lock, flags); + /* once more, can we acquire the lock? */ + if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1)) + goto skip_wait; + debug_mutex_lock_common(lock, &waiter); debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); @@ -522,9 +525,6 @@ slowpath: list_add_tail(&waiter.list, &lock->wait_list); waiter.task = task; - if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1)) - goto done; - lock_contended(&lock->dep_map, ip); for (;;) { @@ -538,7 +538,7 @@ slowpath: * other waiters: */ if (MUTEX_SHOW_NO_WAITER(lock) && - (atomic_xchg(&lock->count, -1) == 1)) + (atomic_xchg(&lock->count, -1) == 1)) break; /* @@ -563,24 +563,25 @@ slowpath: schedule_preempt_disabled(); spin_lock_mutex(&lock->wait_lock, flags); } + mutex_remove_waiter(lock, &waiter, current_thread_info()); + /* set it to 0 if there are no waiters left: */ + if (likely(list_empty(&lock->wait_list))) + atomic_set(&lock->count, 0); + debug_mutex_free_waiter(&waiter); -done: +skip_wait: + /* got the lock - cleanup and rejoice! */ lock_acquired(&lock->dep_map, ip); - /* got the lock - rejoice! */ - mutex_remove_waiter(lock, &waiter, current_thread_info()); mutex_set_owner(lock); if (!__builtin_constant_p(ww_ctx == NULL)) { - struct ww_mutex *ww = container_of(lock, - struct ww_mutex, - base); + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); struct mutex_waiter *cur; /* * This branch gets optimized out for the common case, * and is only important for ww_mutex_lock. */ - ww_mutex_lock_acquired(ww, ww_ctx); ww->ctx = ww_ctx; @@ -594,15 +595,9 @@ done: } } - /* set it to 0 if there are no waiters left: */ - if (likely(list_empty(&lock->wait_list))) - atomic_set(&lock->count, 0); - spin_unlock_mutex(&lock->wait_lock, flags); - - debug_mutex_free_waiter(&waiter); +done: preempt_enable(); - return 0; err: -- cgit v1.2.3 From 2b4883972271f8d61de67aa365ade89dfff69db1 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 24 Jul 2013 11:25:17 -0700 Subject: mutex: Avoid label warning when !CONFIG_MUTEX_SPIN_ON_OWNER Fengguang reported the following warning when optimistic spinning is disabled (ie: make allnoconfig): kernel/mutex.c:599:1: warning: label 'done' defined but not used Remove the 'done' label altogether. Reported-by: Fengguang Wu Signed-off-by: Davidlohr Bueso Signed-off-by: Ingo Molnar --- kernel/mutex.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/mutex.c b/kernel/mutex.c index 386ad5da47a..98164a55a4d 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -489,7 +489,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, mutex_set_owner(lock); mspin_unlock(MLOCK(lock), &node); - goto done; + preempt_enable(); + return 0; } mspin_unlock(MLOCK(lock), &node); @@ -596,7 +597,6 @@ skip_wait: } spin_unlock_mutex(&lock->wait_lock, flags); -done: preempt_enable(); return 0; -- cgit v1.2.3 From 41e85ce8220c6e5fdef706fda6696cd291115b63 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 1 Aug 2013 18:59:41 +0200 Subject: hung_task debugging: Print more info when reporting the problem printk(KERN_ERR) from check_hung_task() likely means we have a bug, but unlike BUG_ON()/WARN_ON ()it doesn't show the kernel version, this complicates the bug-reports investigation. Add the additional pr_err() to print tainted/release/version like dump_stack_print_info() does, the output becomes: INFO: task perl:504 blocked for more than 2 seconds. Not tainted 3.11.0-rc1-10367-g136bb46-dirty #1763 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. ... While at it, turn the old printk's into pr_err(). Signed-off-by: Oleg Nesterov Cc: ahecox@redhat.com Cc: Christopher Williams Cc: dwysocha@redhat.com Cc: gavin@redhat.com Cc: Mandeep Singh Baines Cc: nshi@redhat.com Cc: Linus Torvalds Cc: Andrew Morton Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20130801165941.GA17544@redhat.com Signed-off-by: Ingo Molnar --- kernel/hung_task.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 6df614912b9..3e97fb126e6 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -15,6 +15,7 @@ #include #include #include +#include /* * The number of tasks checked: @@ -99,10 +100,14 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) * Ok, the task did not get scheduled for more than 2 minutes, * complain: */ - printk(KERN_ERR "INFO: task %s:%d blocked for more than " - "%ld seconds.\n", t->comm, t->pid, timeout); - printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" - " disables this message.\n"); + pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", + t->comm, t->pid, timeout); + pr_err(" %s %s %.*s\n", + print_tainted(), init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" + " disables this message.\n"); sched_show_task(t); debug_show_held_locks(t); -- cgit v1.2.3 From 15e71911fcc655508e02f767a3d9b8b138051d2b Mon Sep 17 00:00:00 2001 From: Xie XiuQi Date: Mon, 29 Jul 2013 11:52:24 +0800 Subject: generic-ipi/locking: Fix misleading smp_call_function_any() description Fix locking description: after commit 8969a5ede0f9e17da4b9437 ("generic-ipi: remove kmalloc()"), wait = 0 can be guaranteed because we don't kmalloc() anymore. Signed-off-by: Xie XiuQi Cc: Sheng Yang Cc: Peter Zijlstra Cc: Jens Axboe Cc: Rusty Russell Link: http://lkml.kernel.org/r/51F5E6F8.1000801@huawei.com Signed-off-by: Ingo Molnar --- kernel/smp.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/smp.c b/kernel/smp.c index fe9f773d711..b1c9034bdfc 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -278,8 +278,6 @@ EXPORT_SYMBOL(smp_call_function_single); * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). - * Note that @wait will be implicitly turned on in case of allocation failures, - * since we fall back to on-stack allocation. * * Selection preference: * 1) current cpu if in @mask -- cgit v1.2.3