aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile6
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c27
-rw-r--r--arch/mips/mm/c-tx39.c12
-rw-r--r--arch/mips/mm/cerr-sb1.c30
-rw-r--r--arch/mips/mm/cex-gen.S6
-rw-r--r--arch/mips/mm/cex-oct.S36
-rw-r--r--arch/mips/mm/cex-sb1.S8
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/ioremap.c4
-rw-r--r--arch/mips/mm/page.c6
-rw-r--r--arch/mips/mm/pgtable-64.c4
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-r5k.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c33
-rw-r--r--arch/mips/mm/uasm.c16
21 files changed, 110 insertions, 108 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 90ceb963aaf..1dcec30ad1c 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -16,9 +16,9 @@ obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o
obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
-obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
-obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
+obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
+obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 6ec04daf423..8557fb55286 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -106,7 +106,7 @@ static void octeon_flush_icache_all(void)
* Called to flush all memory associated with a memory
* context.
*
- * @mm: Memory context to flush
+ * @mm: Memory context to flush
*/
static void octeon_flush_cache_mm(struct mm_struct *mm)
{
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 031c4c2cdf2..704dc735a59 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -119,7 +119,7 @@ static void r3k_flush_icache_range(unsigned long start, unsigned long end)
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
@@ -176,7 +176,7 @@ static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
write_c0_status((ST0_ISC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
@@ -285,13 +285,13 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
write_c0_status(flags&~ST0_IEC);
/* Fill the TLB to avoid an exception with caches isolated. */
- asm( "lw\t$0, 0x000(%0)\n\t"
+ asm( "lw\t$0, 0x000(%0)\n\t"
"lw\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 0f7d788e881..ecca559b8d7 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -160,7 +160,7 @@ static void __cpuinit r4k_blast_dcache_setup(void)
"1:\n\t" \
)
#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
-#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
static inline void blast_r4600_v1_icache32(void)
{
@@ -177,7 +177,7 @@ static inline void tx49_blast_icache32(void)
unsigned long end = start + current_cpu_data.icache.waysize;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -208,7 +208,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
unsigned long end = start + PAGE_SIZE;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -637,7 +637,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
* for the cache instruction on MIPS processors and
* some processors, among them the RM5200 and RM7000
* QED processors will throw an address error for cache
- * hit ops with insufficient alignment. Solved by
+ * hit ops with insufficient alignment. Solved by
* aligning the address to cache line size.
*/
blast_inv_scache_range(addr, addr + size);
@@ -864,7 +864,7 @@ static void __cpuinit probe_pcache(void)
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -923,7 +923,7 @@ static void __cpuinit probe_pcache(void)
icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -986,8 +986,8 @@ static void __cpuinit probe_pcache(void)
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
- c->icache.ways *
- c->icache.linesz;
+ c->icache.ways *
+ c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
if (config & 0x8) /* VI bit */
@@ -1006,8 +1006,8 @@ static void __cpuinit probe_pcache(void)
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
- c->dcache.ways *
- c->dcache.linesz;
+ c->dcache.ways *
+ c->dcache.linesz;
c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
c->options |= MIPS_CPU_PREFETCH;
@@ -1016,7 +1016,7 @@ static void __cpuinit probe_pcache(void)
/*
* Processor configuration sanity check for the R4000SC erratum
- * #5. With page sizes larger than 32kB there is no possibility
+ * #5. With page sizes larger than 32kB there is no possibility
* to get a VCE exception anymore so we don't care about this
* misconfiguration. The case is rather theoretical anyway;
* presumably no vendor is shipping his hardware in the "bad"
@@ -1057,6 +1057,7 @@ static void __cpuinit probe_pcache(void)
break;
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_74K:
@@ -1088,7 +1089,7 @@ static void __cpuinit probe_pcache(void)
break;
}
-#ifdef CONFIG_CPU_LOONGSON2
+#ifdef CONFIG_CPU_LOONGSON2
/*
* LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways
@@ -1228,7 +1229,7 @@ static void __cpuinit setup_scache(void)
#ifdef CONFIG_R5000_CPU_SCACHE
r5k_sc_init();
#endif
- return;
+ return;
case CPU_RM7000:
#ifdef CONFIG_RM7000_CPU_SCACHE
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 87d23cada6d..ba9da270289 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -33,9 +33,9 @@ extern int r3k_have_wired_reg; /* in r3k-tlb.c */
/* This sequence is required to ensure icache is disabled immediately */
#define TX39_STOP_STREAMING() \
__asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "b 1f\n\t" \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "b 1f\n\t" \
"nop\n\t" \
"1:\n\t" \
".set pop" \
@@ -361,7 +361,7 @@ void __cpuinit tx39_cache_init(void)
/* TX39/H core (writethru direct-map cache) */
__flush_cache_vmap = tx39__flush_cache_vmap;
__flush_cache_vunmap = tx39__flush_cache_vunmap;
- flush_cache_all = tx39h_flush_icache_all;
+ flush_cache_all = tx39h_flush_icache_all;
__flush_cache_all = tx39h_flush_icache_all;
flush_cache_mm = (void *) tx39h_flush_icache_all;
flush_cache_range = (void *) tx39h_flush_icache_all;
@@ -409,8 +409,8 @@ void __cpuinit tx39_cache_init(void)
_dma_cache_inv = tx39_dma_cache_inv;
shm_align_mask = max_t(unsigned long,
- (dcache_size / current_cpu_data.dcache.ways) - 1,
- PAGE_SIZE - 1);
+ (dcache_size / current_cpu_data.dcache.ways) - 1,
+ PAGE_SIZE - 1);
break;
}
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 3571090ba17..576add33bf5 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -27,7 +27,7 @@
/*
* We'd like to dump the L2_ECC_TAG register on errors, but errata make
- * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
+ * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
*/
#undef DUMP_L2_ECC_TAG_ON_ERROR
@@ -48,7 +48,7 @@
#define CP0_CERRI_EXTERNAL (1 << 26)
#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
-#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
+#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
#define CP0_CERRD_MULTIPLE (1 << 31)
#define CP0_CERRD_TAG_STATE (1 << 30)
@@ -56,8 +56,8 @@
#define CP0_CERRD_DATA_SBE (1 << 28)
#define CP0_CERRD_DATA_DBE (1 << 27)
#define CP0_CERRD_EXTERNAL (1 << 26)
-#define CP0_CERRD_LOAD (1 << 25)
-#define CP0_CERRD_STORE (1 << 24)
+#define CP0_CERRD_LOAD (1 << 25)
+#define CP0_CERRD_STORE (1 << 24)
#define CP0_CERRD_FILLWB (1 << 23)
#define CP0_CERRD_COHERENCY (1 << 22)
#define CP0_CERRD_DUPTAG (1 << 21)
@@ -69,10 +69,10 @@
(CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
#define CP0_CERRD_TYPES \
(CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
-#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
+#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
-static uint32_t extract_ic(unsigned short addr, int data);
-static uint32_t extract_dc(unsigned short addr, int data);
+static uint32_t extract_ic(unsigned short addr, int data);
+static uint32_t extract_dc(unsigned short addr, int data);
static inline void breakout_errctl(unsigned int val)
{
@@ -209,11 +209,11 @@ asmlinkage void sb1_cache_error(void)
"=r" (dpahi), "=r" (dpalo), "=r" (eepc));
cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
- printk(" c0_errorepc == %08x\n", eepc);
- printk(" c0_errctl == %08x", errctl);
+ printk(" c0_errorepc == %08x\n", eepc);
+ printk(" c0_errctl == %08x", errctl);
breakout_errctl(errctl);
if (errctl & CP0_ERRCTL_ICACHE) {
- printk(" c0_cerr_i == %08x", cerr_i);
+ printk(" c0_cerr_i == %08x", cerr_i);
breakout_cerri(cerr_i);
if (CP0_CERRI_IDX_VALID(cerr_i)) {
/* Check index of EPC, allowing for delay slot */
@@ -229,7 +229,7 @@ asmlinkage void sb1_cache_error(void)
}
}
if (errctl & CP0_ERRCTL_DCACHE) {
- printk(" c0_cerr_d == %08x", cerr_d);
+ printk(" c0_cerr_d == %08x", cerr_d);
breakout_cerrd(cerr_d);
if (CP0_CERRD_DPA_VALID(cerr_d)) {
printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
@@ -256,7 +256,7 @@ asmlinkage void sb1_cache_error(void)
/*
* Calling panic() when a fatal cache error occurs scrambles the
* state of the system (and the cache), making it difficult to
- * investigate after the fact. However, if you just stall the CPU,
+ * investigate after the fact. However, if you just stall the CPU,
* the other CPU may keep on running, which is typically very
* undesirable.
*/
@@ -411,7 +411,7 @@ static uint32_t extract_ic(unsigned short addr, int data)
" dmfc0 $1, $28, 1\n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
- " .set pop \n"
+ " .set pop \n"
: "=r" (datahi), "=r" (insta), "=r" (instb)
: "r" ((way << 13) | addr | (offset << 3)));
predecode = (datahi >> 8) & 0xff;
@@ -441,8 +441,8 @@ static uint8_t dc_ecc(uint64_t dword)
{
uint64_t t;
uint32_t w;
- uint8_t p;
- int i;
+ uint8_t p;
+ int i;
p = 0;
for (i = 7; i >= 0; i--)
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
index e743622fd24..45dff5cd4b8 100644
--- a/arch/mips/mm/cex-gen.S
+++ b/arch/mips/mm/cex-gen.S
@@ -14,17 +14,17 @@
#include <asm/stackframe.h>
/*
- * Game over. Go to the button. Press gently. Swear where allowed by
+ * Game over. Go to the button. Press gently. Swear where allowed by
* legislation.
*/
LEAF(except_vec2_generic)
.set noreorder
.set noat
- .set mips0
+ .set mips0
/*
* This is a very bad place to be. Our cache error
* detection has triggered. If we have write-back data
- * in the cache, we may not be able to recover. As a
+ * in the cache, we may not be able to recover. As a
* first-order desperate measure, turn off KSEG0 cacheing.
*/
mfc0 k0,CP0_CONFIG
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S
index 3db8553fcd3..9029092aa74 100644
--- a/arch/mips/mm/cex-oct.S
+++ b/arch/mips/mm/cex-oct.S
@@ -18,7 +18,7 @@
*/
LEAF(except_vec2_octeon)
- .set push
+ .set push
.set mips64r2
.set noreorder
.set noat
@@ -27,19 +27,19 @@
/* due to an errata we need to read the COP0 CacheErr (Dcache)
* before any cache/DRAM access */
- rdhwr k0, $0 /* get core_id */
- PTR_LA k1, cache_err_dcache
- sll k0, k0, 3
+ rdhwr k0, $0 /* get core_id */
+ PTR_LA k1, cache_err_dcache
+ sll k0, k0, 3
PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
- dmfc0 k0, CP0_CACHEERR, 1
- sd k0, (k1)
- dmtc0 $0, CP0_CACHEERR, 1
+ dmfc0 k0, CP0_CACHEERR, 1
+ sd k0, (k1)
+ dmtc0 $0, CP0_CACHEERR, 1
- /* check whether this is a nested exception */
- mfc0 k1, CP0_STATUS
- andi k1, k1, ST0_EXL
- beqz k1, 1f
+ /* check whether this is a nested exception */
+ mfc0 k1, CP0_STATUS
+ andi k1, k1, ST0_EXL
+ beqz k1, 1f
nop
j cache_parity_error_octeon_non_recoverable
nop
@@ -48,22 +48,22 @@
1: j handle_cache_err
nop
- .set pop
+ .set pop
END(except_vec2_octeon)
/* We need to jump to handle_cache_err so that the previous handler
* can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
- * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
+ * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
LEAF(handle_cache_err)
- .set push
- .set noreorder
- .set noat
+ .set push
+ .set noreorder
+ .set noat
SAVE_ALL
KMODE
- jal cache_parity_error_octeon_recoverable
+ jal cache_parity_error_octeon_recoverable
nop
- j ret_from_exception
+ j ret_from_exception
nop
.set pop
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 89c412bc4b6..fe1d887e8d7 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -24,9 +24,9 @@
#include <asm/cacheops.h>
#include <asm/sibyte/board.h>
-#define C0_ERRCTL $26 /* CP0: Error info */
-#define C0_CERR_I $27 /* CP0: Icache error */
-#define C0_CERR_D $27,1 /* CP0: Dcache error */
+#define C0_ERRCTL $26 /* CP0: Error info */
+#define C0_CERR_I $27 /* CP0: Icache error */
+#define C0_CERR_D $27,1 /* CP0: Dcache error */
/*
* Based on SiByte sample software cache-err/cerr.S
@@ -88,7 +88,7 @@ attempt_recovery:
/*
* k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
* Dcache errors we can recover from will take more extensive
- * processing. For now, they are considered "unrecoverable".
+ * processing. For now, they are considered "unrecoverable".
* Note that 'DC' becoming set (outside of ERL mode) will
* cause 'IC' to clear; so if there's an Icache error, we'll
* only find out about it if we recover from this error and
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 3fab2046c8a..f9ef83829a5 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index ddcec1e1a0c..0fead53d1c2 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -52,7 +52,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
#ifdef CONFIG_KPROBES
/*
- * This is to notify the fault handler of the kprobes. The
+ * This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
@@ -216,7 +216,7 @@ bad_area_nosemaphore:
}
no_context:
- /* Are we prepared to handle this kernel fault? */
+ /* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index dcfd573871c..d4ea5c9c4a9 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -249,7 +249,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
+ * Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index be9acb2b959..67929251286 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -66,7 +66,7 @@
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. This is necessary only on R4000 / R4400 SC and MC versions
+ * when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
@@ -380,7 +380,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
+ totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = ram = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index cacfd31e8ec..7f840bc08ab 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -22,7 +22,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
phys_t end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
- | __WRITEABLE | flags);
+ | __WRITEABLE | flags);
address &= ~PMD_MASK;
end = address + size;
@@ -185,7 +185,7 @@ void __iounmap(const volatile void __iomem *addr)
if (!p)
printk(KERN_ERR "iounmap: bad address %p\n", addr);
- kfree(p);
+ kfree(p);
}
EXPORT_SYMBOL(__ioremap);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 8e666c55f4d..a29fba55b53 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -271,7 +271,7 @@ void __cpuinit build_clear_page(void)
uasm_i_lui(&buf, AT, 0xa000);
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
- * cache_line_size : 0;
+ * cache_line_size : 0;
while (off) {
build_clear_pref(&buf, -off);
off -= cache_line_size;
@@ -417,13 +417,13 @@ void __cpuinit build_copy_page(void)
uasm_i_lui(&buf, AT, 0xa000);
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
- cache_line_size : 0;
+ cache_line_size : 0;
while (off) {
build_copy_load_pref(&buf, -off);
off -= cache_line_size;
}
off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
- cache_line_size : 0;
+ cache_line_size : 0;
while (off) {
build_copy_store_pref(&buf, -off);
off -= cache_line_size;
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index ee331bbd8f8..e8adc0069d6 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -24,7 +24,7 @@ void pgd_init(unsigned long page)
entry = (unsigned long)invalid_pmd_table;
#endif
- p = (unsigned long *) page;
+ p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
do {
@@ -45,7 +45,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
- p = (unsigned long *) addr;
+ p = (unsigned long *) addr;
end = p + PTRS_PER_PMD;
do {
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index 1eb708ef75f..c6aaed934d5 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -159,7 +159,7 @@ static inline int __init indy_sc_probe(void)
}
/* XXX Check with wje if the Indy caches can differenciate between
- writeback + invalidate and just invalidate. */
+ writeback + invalidate and just invalidate. */
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable,
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8d90ff25b12..8bc67720e14 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -58,7 +58,7 @@ static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
static void r5k_sc_enable(void)
{
- unsigned long flags;
+ unsigned long flags;
local_irq_save(flags);
set_c0_config(R5K_CONF_SE);
@@ -68,7 +68,7 @@ static void r5k_sc_enable(void)
static void r5k_sc_disable(void)
{
- unsigned long flags;
+ unsigned long flags;
local_irq_save(flags);
blast_r5000_scache();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 2a7c9725b2a..493131c81a2 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -424,7 +424,7 @@ void __cpuinit tlb_init(void)
write_c0_pagegrain(pg);
}
- /* From this point on the ARC firmware is dead. */
+ /* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 1c8ac49ec72..820e6612d74 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -5,8 +5,8 @@
*
* Synthesize TLB refill handlers at runtime.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
- * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
* Copyright (C) 2011 MIPS Technologies, Inc.
@@ -212,7 +212,7 @@ static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
/*
* pgtable bits are assigned dynamically depending on processor feature
* and statically based on kernel configuration. This spits out the actual
- * values the kernel is using. Required to make sense from disassembled
+ * values the kernel is using. Required to make sense from disassembled
* TLB exception handlers.
*/
static void output_pgtable_bits_defines(void)
@@ -464,8 +464,8 @@ static u32 final_handler[64] __cpuinitdata;
* From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
* 2. A timing hazard exists for the TLBP instruction.
*
- * stalling_instruction
- * TLBP
+ * stalling_instruction
+ * TLBP
*
* The JTLB is being read for the TLBP throughout the stall generated by the
* previous instruction. This is not really correct as the stalling instruction
@@ -476,7 +476,7 @@ static u32 final_handler[64] __cpuinitdata;
* The software work-around is to not allow the instruction preceding the TLBP
* to stall - make it an NOP or some other instruction guaranteed not to stall.
*
- * Errata 2 will not be fixed. This errata is also on the R5000.
+ * Errata 2 will not be fixed. This errata is also on the R5000.
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
@@ -581,6 +581,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_4KC:
case CPU_4KEC:
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
@@ -748,7 +749,7 @@ static __cpuinit void build_huge_update_entries(u32 **p,
*/
small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
- /* We can clobber tmp. It isn't used after this.*/
+ /* We can clobber tmp. It isn't used after this.*/
if (!small_sequence)
uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
@@ -830,12 +831,12 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
- /* 1 0 1 0 1 << 6 xkphys cached */
+ /* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
}
#elif defined(CONFIG_SMP)
-# ifdef CONFIG_MIPS_MT_SMTC
+# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
*/
@@ -955,7 +956,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
+#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
*/
@@ -965,7 +966,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#else
/*
* smp_processor_id() << 3 is stored in CONTEXT.
- */
+ */
uasm_i_mfc0(p, ptr, C0_CONTEXT);
UASM_i_LA_mostly(p, tmp, pgdc);
uasm_i_srl(p, ptr, ptr, 23);
@@ -1153,7 +1154,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
+ /* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
}
@@ -1171,9 +1172,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_l_vmalloc_done(l, *p);
/*
- * tmp ptr
- * fall-through case = badvaddr *pgd_current
- * vmalloc case = badvaddr swapper_pg_dir
+ * tmp ptr
+ * fall-through case = badvaddr *pgd_current
+ * vmalloc case = badvaddr swapper_pg_dir
*/
if (vmalloc_branch_delay_filled)
@@ -1212,7 +1213,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
/*
* The in the LWX case we don't want to do the load in the
- * delay slot. It cannot issue in the same cycle and may be
+ * delay slot. It cannot issue in the same cycle and may be
* speculative and unneeded.
*/
if (use_lwx_insns())
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 39b89105622..942ff6c2eba 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -7,7 +7,7 @@
* support a subset of instructions, and does not try to hide pipeline
* effects like branch delay slots.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
*/
@@ -119,30 +119,30 @@ static struct insn insn_table[] __uasminitdata = {
{ insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
- { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
+ { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
{ insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
+ { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
@@ -345,7 +345,7 @@ Ip_u2u1msbu3(op) \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
-#define I_u2u1msbdu3(op) \
+#define I_u2u1msbdu3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, d-1, c); \