aboutsummaryrefslogtreecommitdiff
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2006-11-27 12:06:26 +0900
committerPaul Mundt <lethal@linux-sh.org>2006-12-06 10:45:39 +0900
commit510c72ad2dd4e05e6908755f51ac89482c6eb987 (patch)
treefa2e9e9a674e38dd523d937329627560f0bd6b64 /arch/sh
parent1dc41e58a553e612e3d0349bb60eef08f9462bde (diff)
sh: Fixup various PAGE_SIZE == 4096 assumptions.
There were a number of places that made evil PAGE_SIZE == 4k assumptions that ended up breaking when trying to play with 8k and 64k page sizes, this fixes those up. The most significant change is the way we load THREAD_SIZE, previously this was done via: mov #(THREAD_SIZE >> 8), reg shll8 reg to avoid a memory access and allow the immediate load. With a 64k PAGE_SIZE, we're out of range for the immediate load size without resorting to special instructions available in later ISAs (movi20s and so on). The "workaround" for this is to bump up the shift to 10 and insert a shll2, which gives a bit more flexibility while still being much cheaper than a memory access. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S5
-rw-r--r--arch/sh/kernel/head.S5
-rw-r--r--arch/sh/kernel/relocate_kernel.S14
-rw-r--r--arch/sh/mm/cache-sh4.c4
-rw-r--r--arch/sh/mm/clear_page.S18
-rw-r--r--arch/sh/mm/copy_page.S16
-rw-r--r--arch/sh/mm/init.c1
-rw-r--r--arch/sh/mm/pg-dma.c2
8 files changed, 31 insertions, 34 deletions
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 5de99b49873..7ba3dcbe750 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -335,7 +335,7 @@ general_exception:
/* This code makes some assumptions to improve performance.
* Make sure they are stil true. */
#if PTRS_PER_PGD != PTRS_PER_PTE
-#error PDG and PTE sizes don't match
+#error PGD and PTE sizes don't match
#endif
/* gas doesn't flag impossible values for mov #immediate as an error */
@@ -547,8 +547,9 @@ ENTRY(handle_exception)
bt/s 1f ! It's a kernel to kernel transition.
mov r15, k0 ! save original stack to k0
/* User space to kernel */
- mov #(THREAD_SIZE >> 8), k1
+ mov #(THREAD_SIZE >> 10), k1
shll8 k1 ! k1 := THREAD_SIZE
+ shll2 k1
add current, k1
mov k1, r15 ! change to kernel stack
!
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
index b5ff2326461..6aca4bc6ec5 100644
--- a/arch/sh/kernel/head.S
+++ b/arch/sh/kernel/head.S
@@ -33,7 +33,7 @@ ENTRY(empty_zero_page)
.long 0x00360000 /* INITRD_START */
.long 0x000a0000 /* INITRD_SIZE */
.long 0
- .balign 4096,0,4096
+ .balign PAGE_SIZE,0,PAGE_SIZE
.text
/*
@@ -70,8 +70,9 @@ ENTRY(_stext)
!
mov.l 2f, r0
mov r0, r15 ! Set initial r15 (stack pointer)
- mov #(THREAD_SIZE >> 8), r1
+ mov #(THREAD_SIZE >> 10), r1
shll8 r1 ! r1 = THREAD_SIZE
+ shll2 r1
sub r1, r0 !
#ifdef CONFIG_CPU_HAS_SR_RB
ldc r0, r7_bank ! ... and initial thread_info
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S
index 8221b37c977..c66cb3209db 100644
--- a/arch/sh/kernel/relocate_kernel.S
+++ b/arch/sh/kernel/relocate_kernel.S
@@ -7,11 +7,9 @@
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
-
#include <linux/linkage.h>
-
-#define PAGE_SIZE 4096 /* must be same value as in <asm/page.h> */
-
+#include <asm/addrspace.h>
+#include <asm/page.h>
.globl relocate_new_kernel
relocate_new_kernel:
@@ -20,8 +18,8 @@ relocate_new_kernel:
/* r6 = start_address */
/* r7 = vbr_reg */
- mov.l 10f,r8 /* 4096 */
- mov.l 11f,r9 /* 0xa0000000 */
+ mov.l 10f,r8 /* PAGE_SIZE */
+ mov.l 11f,r9 /* P2SEG */
/* stack setting */
add r8,r5
@@ -32,7 +30,7 @@ relocate_new_kernel:
0:
mov.l @r4+,r0 /* cmd = *ind++ */
-1: /* addr = (cmd | 0xa0000000) & 0xfffffff0 */
+1: /* addr = (cmd | P2SEG) & 0xfffffff0 */
mov r0,r2
or r9,r2
mov #-16,r1
@@ -92,7 +90,7 @@ relocate_new_kernel:
10:
.long PAGE_SIZE
11:
- .long 0xa0000000
+ .long P2SEG
relocate_new_kernel_end:
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 7e62ba071d6..ae531affccb 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -225,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start,
*/
if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
(start < CACHE_OC_ADDRESS_ARRAY))
- exec_offset = 0x20000000;
+ exec_offset = 0x20000000;
local_irq_save(flags);
__flush_cache_4096(start | SH_CACHE_ASSOC,
@@ -246,7 +246,7 @@ void flush_dcache_page(struct page *page)
/* Loop all the D-cache */
n = cpu_data->dcache.n_aliases;
- for (i = 0; i < n; i++, addr += PAGE_SIZE)
+ for (i = 0; i < n; i++, addr += 4096)
flush_cache_4096(addr, phys);
}
diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S
index 7b96425ae27..8a706131e52 100644
--- a/arch/sh/mm/clear_page.S
+++ b/arch/sh/mm/clear_page.S
@@ -1,12 +1,12 @@
-/* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $
- *
+/*
* __clear_user_page, __clear_user, clear_page implementation of SuperH
*
* Copyright (C) 2001 Kaz Kojima
* Copyright (C) 2001, 2002 Niibe Yutaka
- *
+ * Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
+#include <asm/page.h>
/*
* clear_page_slow
@@ -18,11 +18,11 @@
/*
* r0 --- scratch
* r4 --- to
- * r5 --- to + 4096
+ * r5 --- to + PAGE_SIZE
*/
ENTRY(clear_page_slow)
mov r4,r5
- mov.w .Llimit,r0
+ mov.l .Llimit,r0
add r0,r5
mov #0,r0
!
@@ -50,7 +50,7 @@ ENTRY(clear_page_slow)
!
rts
nop
-.Llimit: .word (4096-28)
+.Llimit: .long (PAGE_SIZE-28)
ENTRY(__clear_user)
!
@@ -164,10 +164,10 @@ ENTRY(__clear_user)
* r0 --- scratch
* r4 --- to
* r5 --- orig_to
- * r6 --- to + 4096
+ * r6 --- to + PAGE_SIZE
*/
ENTRY(__clear_user_page)
- mov.w .L4096,r0
+ mov.l .Lpsz,r0
mov r4,r6
add r0,r6
mov #0,r0
@@ -191,7 +191,7 @@ ENTRY(__clear_user_page)
!
rts
nop
-.L4096: .word 4096
+.Lpsz: .long PAGE_SIZE
#endif
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S
index 1addffe117c..397c94c9731 100644
--- a/arch/sh/mm/copy_page.S
+++ b/arch/sh/mm/copy_page.S
@@ -1,12 +1,12 @@
-/* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $
- *
+/*
* copy_page, __copy_user_page, __copy_user implementation of SuperH
*
* Copyright (C) 2001 Niibe Yutaka & Kaz Kojima
* Copyright (C) 2002 Toshinobu Sugioka
- *
+ * Copyright (C) 2006 Paul Mundt
*/
#include <linux/linkage.h>
+#include <asm/page.h>
/*
* copy_page_slow
@@ -18,7 +18,7 @@
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
* r9 --- not used
* r10 --- to
* r11 --- from
@@ -30,7 +30,7 @@ ENTRY(copy_page_slow)
mov r4,r10
mov r5,r11
mov r5,r8
- mov.w .L4096,r0
+ mov.l .Lpsz,r0
add r0,r8
!
1: mov.l @r11+,r0
@@ -80,7 +80,7 @@ ENTRY(copy_page_slow)
/*
* r0, r1, r2, r3, r4, r5, r6, r7 --- scratch
- * r8 --- from + 4096
+ * r8 --- from + PAGE_SIZE
* r9 --- orig_to
* r10 --- to
* r11 --- from
@@ -94,7 +94,7 @@ ENTRY(__copy_user_page)
mov r5,r11
mov r6,r9
mov r5,r8
- mov.w .L4096,r0
+ mov.l .Lpsz,r0
add r0,r8
!
1: ocbi @r9
@@ -129,7 +129,7 @@ ENTRY(__copy_user_page)
rts
nop
#endif
-.L4096: .word 4096
+.Lpsz: .long PAGE_SIZE
/*
* __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
* Return the number of bytes NOT copied
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 462bfeac6d9..59f4cc18235 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -217,7 +217,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc;
void __init mem_init(void)
{
- extern unsigned long empty_zero_page[1024];
int codesize, reservedpages, datasize, initsize;
int tmp;
extern unsigned long memory_start;
diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c
index 1406d2e348c..bb23679369d 100644
--- a/arch/sh/mm/pg-dma.c
+++ b/arch/sh/mm/pg-dma.c
@@ -39,8 +39,6 @@ static void copy_page_dma(void *to, void *from)
static void clear_page_dma(void *to)
{
- extern unsigned long empty_zero_page[1024];
-
/*
* We get invoked quite early on, if the DMAC hasn't been initialized
* yet, fall back on the slow manual implementation.