aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 14:27:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 14:27:45 -0800
commitcebfa85eb86d92bf85d3b041c6b044184517a988 (patch)
treebe0a374556fe335ce96dfdb296c89537750d5868 /arch/mips/mm
parentd42b3a2906a10b732ea7d7f849d49be79d242ef0 (diff)
parent241738bd51cb0efe58e6c570223153e970afe3ae (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: "The MIPS bits for 3.8. This also includes a bunch fixes that were sitting in the linux-mips.org git tree for a long time. This pull request contains updates to several OCTEON drivers and the board support code for BCM47XX, BCM63XX, XLP, XLR, XLS, lantiq, Loongson1B, updates to the SSB bus support, MIPS kexec code and adds support for kdump. When pulling this, there are two expected merge conflicts in include/linux/bcma/bcma_driver_chipcommon.h which are trivial to resolve, just remove the conflict markers and keep both alternatives." * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (90 commits) MIPS: PMC-Sierra Yosemite: Remove support. VIDEO: Newport Fix console crashes MIPS: wrppmc: Fix build of PCI code. MIPS: IP22/IP28: Fix build of EISA code. MIPS: RB532: Fix build of prom code. MIPS: PowerTV: Fix build. MIPS: IP27: Correct fucked grammar in ops-bridge.c MIPS: Highmem: Fix build error if CONFIG_DEBUG_HIGHMEM is disabled MIPS: Fix potencial corruption MIPS: Fix for warning from FPU emulation code MIPS: Handle COP3 Unusable exception as COP1X for FP emulation MIPS: Fix poweroff failure when HOTPLUG_CPU configured. MIPS: MT: Fix build with CONFIG_UIDGID_STRICT_TYPE_CHECKS=y MIPS: Remove unused smvp.h MIPS/EDAC: Improve OCTEON EDAC support. MIPS: OCTEON: Add definitions for OCTEON memory contoller registers. MIPS: OCTEON: Add OCTEON family definitions to octeon-model.h ata: pata_octeon_cf: Use correct byte order for DMA in when built little-endian. MIPS/OCTEON/ata: Convert pata_octeon_cf.c to use device tree. MIPS: Remove usage of CEVT_R4K_LIB config option. ...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-octeon.c67
-rw-r--r--arch/mips/mm/c-r4k.c23
-rw-r--r--arch/mips/mm/highmem.c3
-rw-r--r--arch/mips/mm/page.c9
-rw-r--r--arch/mips/mm/pgtable-64.c31
-rw-r--r--arch/mips/mm/tlb-r4k.c22
-rw-r--r--arch/mips/mm/tlbex.c123
7 files changed, 178 insertions, 100 deletions
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 44e69e7a451..6ec04daf423 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2005-2007 Cavium Networks
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -28,6 +29,7 @@
#include <asm/octeon/octeon.h>
unsigned long long cache_err_dcache[NR_CPUS];
+EXPORT_SYMBOL_GPL(cache_err_dcache);
/**
* Octeon automatically flushes the dcache on tlb changes, so
@@ -284,39 +286,59 @@ void __cpuinit octeon_cache_init(void)
board_cache_error_setup = octeon_cache_error_setup;
}
-/**
+/*
* Handle a cache error exception
*/
+static RAW_NOTIFIER_HEAD(co_cache_error_chain);
-static void cache_parity_error_octeon(int non_recoverable)
+int register_co_cache_error_notifier(struct notifier_block *nb)
{
- unsigned long coreid = cvmx_get_core_num();
- uint64_t icache_err = read_octeon_c0_icacheerr();
-
- pr_err("Cache error exception:\n");
- pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
- if (icache_err & 1) {
- pr_err("CacheErr (Icache) == %llx\n",
- (unsigned long long)icache_err);
- write_octeon_c0_icacheerr(0);
- }
- if (cache_err_dcache[coreid] & 1) {
- pr_err("CacheErr (Dcache) == %llx\n",
- (unsigned long long)cache_err_dcache[coreid]);
- cache_err_dcache[coreid] = 0;
- }
+ return raw_notifier_chain_register(&co_cache_error_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
+
+int unregister_co_cache_error_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
- if (non_recoverable)
- panic("Can't handle cache error: nested exception");
+static void co_cache_error_call_notifiers(unsigned long val)
+{
+ int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
+ if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
+ u64 dcache_err;
+ unsigned long coreid = cvmx_get_core_num();
+ u64 icache_err = read_octeon_c0_icacheerr();
+
+ if (val) {
+ dcache_err = cache_err_dcache[coreid];
+ cache_err_dcache[coreid] = 0;
+ } else {
+ dcache_err = read_octeon_c0_dcacheerr();
+ }
+
+ pr_err("Core%lu: Cache error exception:\n", coreid);
+ pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
+ if (icache_err & 1) {
+ pr_err("CacheErr (Icache) == %llx\n",
+ (unsigned long long)icache_err);
+ write_octeon_c0_icacheerr(0);
+ }
+ if (dcache_err & 1) {
+ pr_err("CacheErr (Dcache) == %llx\n",
+ (unsigned long long)dcache_err);
+ }
+ }
}
-/**
+/*
* Called when the the exception is recoverable
*/
asmlinkage void cache_parity_error_octeon_recoverable(void)
{
- cache_parity_error_octeon(0);
+ co_cache_error_call_notifiers(0);
}
/**
@@ -325,5 +347,6 @@ asmlinkage void cache_parity_error_octeon_recoverable(void)
asmlinkage void cache_parity_error_octeon_non_recoverable(void)
{
- cache_parity_error_octeon(1);
+ co_cache_error_call_notifiers(1);
+ panic("Can't handle cache error: nested exception");
}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4c32ede464b..0f7d788e881 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -632,9 +632,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
if (size >= scache_size)
r4k_blast_scache();
else {
- unsigned long lsize = cpu_scache_line_size();
- unsigned long almask = ~(lsize - 1);
-
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
@@ -643,9 +640,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
* hit ops with insufficient alignment. Solved by
* aligning the address to cache line size.
*/
- cache_op(Hit_Writeback_Inv_SD, addr & almask);
- cache_op(Hit_Writeback_Inv_SD,
- (addr + size - 1) & almask);
blast_inv_scache_range(addr, addr + size);
}
__sync();
@@ -655,12 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
if (cpu_has_safe_index_cacheops && size >= dcache_size) {
r4k_blast_dcache();
} else {
- unsigned long lsize = cpu_dcache_line_size();
- unsigned long almask = ~(lsize - 1);
-
R4600_HIT_CACHEOP_WAR_IMPL;
- cache_op(Hit_Writeback_Inv_D, addr & almask);
- cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
blast_inv_dcache_range(addr, addr + size);
}
@@ -947,7 +936,6 @@ static void __cpuinit probe_pcache(void)
case CPU_RM7000:
rm7k_erratum31();
- case CPU_RM9000:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 4;
@@ -958,9 +946,7 @@ static void __cpuinit probe_pcache(void)
c->dcache.ways = 4;
c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
-#if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
c->options |= MIPS_CPU_CACHE_CDEX_P;
-#endif
c->options |= MIPS_CPU_PREFETCH;
break;
@@ -1245,7 +1231,6 @@ static void __cpuinit setup_scache(void)
return;
case CPU_RM7000:
- case CPU_RM9000:
#ifdef CONFIG_RM7000_CPU_SCACHE
rm7k_sc_init();
#endif
@@ -1348,10 +1333,10 @@ static int __init cca_setup(char *str)
{
get_option(&str, &cca);
- return 1;
+ return 0;
}
-__setup("cca=", cca_setup);
+early_param("cca", cca_setup);
static void __cpuinit coherency_setup(void)
{
@@ -1401,10 +1386,10 @@ static int __init setcoherentio(char *str)
{
coherentio = 1;
- return 1;
+ return 0;
}
-__setup("coherentio", setcoherentio);
+early_param("coherentio", setcoherentio);
#endif
static void __cpuinit r4k_cache_error_setup(void)
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index aff57057a94..da815d29523 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,3 +1,4 @@
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched.h>
@@ -67,7 +68,7 @@ EXPORT_SYMBOL(kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type;
+ int type __maybe_unused;
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 98f530e1821..8e666c55f4d 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -140,15 +140,6 @@ static void __cpuinit set_prefetch_parameters(void)
pref_bias_copy_load = 256;
break;
- case CPU_RM9000:
- /*
- * As a workaround for erratum G105 which make the
- * PrepareForStore hint unusable we fall back to
- * StoreRetained on the RM9000. Once it is known which
- * versions of the RM9000 we'll be able to condition-
- * alize this.
- */
-
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 25407794edb..ee331bbd8f8 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -11,6 +11,7 @@
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
void pgd_init(unsigned long page)
{
@@ -61,6 +62,36 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
}
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ if (!pmd_trans_splitting(*pmdp)) {
+ pmd_t pmd = pmd_mksplitting(*pmdp);
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ }
+}
+
+#endif
+
+pmd_t mk_pmd(struct page *page, pgprot_t prot)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = (page_to_pfn(page) << _PFN_SHIFT) | pgprot_val(prot);
+
+ return pmd;
+}
+
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+ flush_tlb_all();
+}
+
void __init pagetable_init(void)
{
unsigned long vaddr;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 88e79ad6f81..2a7c9725b2a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -295,7 +295,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
pudp = pud_offset(pgdp, address);
pmdp = pmd_offset(pudp, address);
idx = read_c0_index();
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* this could be a huge page */
if (pmd_huge(*pmdp)) {
unsigned long lo;
@@ -367,6 +367,26 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
EXIT_CRITICAL(flags);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+int __init has_transparent_hugepage(void)
+{
+ unsigned int mask;
+ unsigned long flags;
+
+ ENTER_CRITICAL(flags);
+ write_c0_pagemask(PM_HUGE_MASK);
+ back_to_back_c0_hazard();
+ mask = read_c0_pagemask();
+ write_c0_pagemask(PM_DEFAULT_MASK);
+
+ EXIT_CRITICAL(flags);
+
+ return mask == PM_HUGE_MASK;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
static int __cpuinitdata ntlb;
static int __init set_ntlb(char *str)
{
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 2833dcb67b5..05613355627 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -158,7 +158,7 @@ enum label_id {
label_smp_pgtable_change,
label_r3000_write_probe_fail,
label_large_segbits_fault,
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
label_tlb_huge_update,
#endif
};
@@ -177,13 +177,15 @@ UASM_L_LA(_nopage_tlbm)
UASM_L_LA(_smp_pgtable_change)
UASM_L_LA(_r3000_write_probe_fail)
UASM_L_LA(_large_segbits_fault)
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
UASM_L_LA(_tlb_huge_update)
#endif
static int __cpuinitdata hazard_instance;
-static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
+static void __cpuinit uasm_bgezl_hazard(u32 **p,
+ struct uasm_reloc **r,
+ int instance)
{
switch (instance) {
case 0 ... 7:
@@ -194,7 +196,9 @@ static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
}
}
-static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
+static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
+ u32 **p,
+ int instance)
{
switch (instance) {
case 0 ... 7:
@@ -206,19 +210,59 @@ static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
}
/*
- * For debug purposes.
+ * pgtable bits are assigned dynamically depending on processor feature
+ * and statically based on kernel configuration. This spits out the actual
+ * values the kernel is using. Required to make sense from disassembled
+ * TLB exception handlers.
*/
-static inline void dump_handler(const u32 *handler, int count)
+static void output_pgtable_bits_defines(void)
+{
+#define pr_define(fmt, ...) \
+ pr_debug("#define " fmt, ##__VA_ARGS__)
+
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+
+ pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
+ pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
+ pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+ pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
+ pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+ pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
+#endif
+ if (cpu_has_rixi) {
+#ifdef _PAGE_NO_EXEC_SHIFT
+ pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+#endif
+#ifdef _PAGE_NO_READ_SHIFT
+ pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
+#endif
+ }
+ pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
+ pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
+ pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
+ pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
+ pr_debug("\n");
+}
+
+static inline void dump_handler(const char *symbol, const u32 *handler, int count)
{
int i;
+ pr_debug("LEAF(%s)\n", symbol);
+
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
for (i = 0; i < count; i++)
- pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
+ pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
+
+ pr_debug("\t.set\tpop\n");
- pr_debug("\t.set pop\n");
+ pr_debug("\tEND(%s)\n", symbol);
}
/* The only general purpose registers allowed in TLB handlers. */
@@ -401,7 +445,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
memcpy((void *)ebase, tlb_handler, 0x80);
- dump_handler((u32 *)ebase, 32);
+ dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
@@ -443,7 +487,6 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
case CPU_R4600:
case CPU_R4700:
case CPU_R5000:
- case CPU_R5000A:
case CPU_NEVADA:
uasm_i_nop(p);
uasm_i_tlbp(p);
@@ -517,7 +560,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
break;
case CPU_R5000:
- case CPU_R5000A:
case CPU_NEVADA:
uasm_i_nop(p); /* QED specifies 2 nops hazard */
uasm_i_nop(p); /* QED specifies 2 nops hazard */
@@ -565,24 +607,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
tlbw(p);
break;
- case CPU_RM9000:
- /*
- * When the JTLB is updated by tlbwi or tlbwr, a subsequent
- * use of the JTLB for instructions should not occur for 4
- * cpu cycles and use for data translations should not occur
- * for 3 cpu cycles.
- */
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- tlbw(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- uasm_i_ssnop(p);
- break;
-
case CPU_VR4111:
case CPU_VR4121:
case CPU_VR4122:
@@ -629,7 +653,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
}
}
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
static __cpuinit void build_restore_pagemask(u32 **p,
struct uasm_reloc **r,
@@ -755,7 +779,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#ifdef CONFIG_64BIT
/*
@@ -1200,7 +1224,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
/* Adjust the context during the load latency. */
build_adjust_context(p, tmp);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
/*
* The in the LWX case we don't want to do the load in the
@@ -1209,7 +1233,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
*/
if (use_lwx_insns())
uasm_i_nop(p);
-#endif /* CONFIG_HUGETLB_PAGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
/* build_update_entries */
@@ -1312,7 +1336,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
#endif
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
#endif
@@ -1322,7 +1346,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
uasm_l_leave(&l, p);
uasm_i_eret(&p); /* return from trap */
}
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
@@ -1367,7 +1391,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
} else {
-#if defined(CONFIG_HUGETLB_PAGE)
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
const enum label_id ls = label_tlb_huge_update;
#else
const enum label_id ls = label_vmalloc;
@@ -1436,7 +1460,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
memcpy((void *)ebase, final_handler, 0x100);
- dump_handler((u32 *)ebase, 64);
+ dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
}
/*
@@ -1493,7 +1517,8 @@ static void __cpuinit build_r4000_setup_pgd(void)
pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
(unsigned int)(p - tlbmiss_handler_setup_pgd));
- dump_handler(tlbmiss_handler_setup_pgd,
+ dump_handler("tlbmiss_handler",
+ tlbmiss_handler_setup_pgd,
ARRAY_SIZE(tlbmiss_handler_setup_pgd));
}
#endif
@@ -1763,7 +1788,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbl));
- dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
+ dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
}
static void __cpuinit build_r3000_tlb_store_handler(void)
@@ -1793,7 +1818,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbs));
- dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
+ dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
}
static void __cpuinit build_r3000_tlb_modify_handler(void)
@@ -1823,7 +1848,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbm));
- dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
+ dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
}
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
@@ -1842,7 +1867,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
#endif
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* For huge tlb entries, pmd doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
@@ -1958,7 +1983,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
build_make_valid(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when build_r4000_tlbchange_handler_head
* spots a huge page.
@@ -2030,7 +2055,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbl));
- dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
+ dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
}
static void __cpuinit build_r4000_tlb_store_handler(void)
@@ -2051,7 +2076,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
build_make_write(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
@@ -2077,7 +2102,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbs));
- dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
+ dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
}
static void __cpuinit build_r4000_tlb_modify_handler(void)
@@ -2099,7 +2124,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
build_make_write(&p, &r, wr.r1, wr.r2);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/*
* This is the entry point when
* build_r4000_tlbchange_handler_head spots a huge page.
@@ -2125,7 +2150,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
(unsigned int)(p - handle_tlbm));
- dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
+ dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
}
void __cpuinit build_tlb_refill_handler(void)
@@ -2137,6 +2162,8 @@ void __cpuinit build_tlb_refill_handler(void)
*/
static int run_once = 0;
+ output_pgtable_bits_defines();
+
#ifdef CONFIG_64BIT
check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
#endif