aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-24 09:50:22 +0100
committerIngo Molnar <mingo@elte.hu>2011-09-13 11:11:57 +0200
commit8292c9e15c3b069459794a04f5e2cf0d5665ddc4 (patch)
tree6dd98e626c2dc5fc38a7838295039226b301ee57
parentee30a7b2fc072f139dac44826860d2c1f422137c (diff)
locking, semaphores: Annotate inner lock as raw
There is no reason to have the spin_lock protecting the semaphore preemptible on -rt. Annotate it as a raw_spinlock. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. ( On rt this also solves lockdep complaining about the rt_mutex.wait_lock being not initialized. ) Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/semaphore.h4
-rw-r--r--kernel/semaphore.c28
2 files changed, 16 insertions, 16 deletions
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 39fa04966aa..dc368b8ce21 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -14,14 +14,14 @@
/* Please don't access any members of this structure directly */
struct semaphore {
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned int count;
struct list_head wait_list;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
- .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
.count = n, \
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 94a62c0d4ad..d831841e55a 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
__down(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(down);
@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_interruptible(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_killable(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
unsigned long flags;
int count;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
count = sem->count - 1;
if (likely(count >= 0))
sem->count = count;
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return (count < 0);
}
@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_timeout(sem, jiffies);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(list_empty(&sem->wait_list)))
sem->count++;
else
__up(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(up);
@@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
if (timeout <= 0)
goto timed_out;
__set_task_state(task, state);
- spin_unlock_irq(&sem->lock);
+ raw_spin_unlock_irq(&sem->lock);
timeout = schedule_timeout(timeout);
- spin_lock_irq(&sem->lock);
+ raw_spin_lock_irq(&sem->lock);
if (waiter.up)
return 0;
}