aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/hash_native_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hash_native_64.c')
-rw-r--r--arch/powerpc/mm/hash_native_64.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 056d23a1b10..784a400e078 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -37,7 +37,7 @@
#define HPTE_LOCK_BIT 3
-static DEFINE_SPINLOCK(native_tlbie_lock);
+static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
static inline void __tlbie(unsigned long va, int psize, int ssize)
{
@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
if (use_local)
use_local = mmu_psize_defs[psize].tlbiel;
if (lock_tlbie && !use_local)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
__tlbiel(va, psize, ssize);
@@ -114,7 +114,7 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
static inline void native_lock_hpte(struct hash_pte *hptep)
@@ -122,7 +122,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
unsigned long *word = &hptep->v;
while (1) {
- if (!test_and_set_bit(HPTE_LOCK_BIT, word))
+ if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
break;
while(test_bit(HPTE_LOCK_BIT, word))
cpu_relax();
@@ -133,8 +133,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
{
unsigned long *word = &hptep->v;
- asm volatile("lwsync":::"memory");
- clear_bit(HPTE_LOCK_BIT, word);
+ clear_bit_unlock(HPTE_LOCK_BIT, word);
}
static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
@@ -434,7 +433,7 @@ static void native_hpte_clear(void)
/* we take the tlbie lock and hold it. Some hardware will
* deadlock if we try to tlbie from two processors at once.
*/
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
slots = pteg_count * HPTES_PER_GROUP;
@@ -458,7 +457,7 @@ static void native_hpte_clear(void)
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
local_irq_restore(flags);
}
@@ -521,7 +520,7 @@ static void native_flush_hash_range(unsigned long number, int local)
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
- spin_lock(&native_tlbie_lock);
+ raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
@@ -536,7 +535,7 @@ static void native_flush_hash_range(unsigned long number, int local)
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
- spin_unlock(&native_tlbie_lock);
+ raw_spin_unlock(&native_tlbie_lock);
}
local_irq_restore(flags);