aboutsummaryrefslogtreecommitdiff
path: root/arch/metag
diff options
context:
space:
mode:
Diffstat (limited to 'arch/metag')
-rw-r--r--arch/metag/Kconfig12
-rw-r--r--arch/metag/Makefile2
-rw-r--r--arch/metag/boot/dts/Makefile10
-rw-r--r--arch/metag/configs/meta1_defconfig1
-rw-r--r--arch/metag/configs/meta2_defconfig1
-rw-r--r--arch/metag/configs/meta2_smp_defconfig1
-rw-r--r--arch/metag/include/asm/hugetlb.h1
-rw-r--r--arch/metag/include/asm/metag_mem.h3
-rw-r--r--arch/metag/include/asm/thread_info.h2
-rw-r--r--arch/metag/include/uapi/asm/Kbuild1
-rw-r--r--arch/metag/include/uapi/asm/ech.h15
-rw-r--r--arch/metag/kernel/cachepart.c16
-rw-r--r--arch/metag/kernel/da.c2
-rw-r--r--arch/metag/kernel/head.S8
-rw-r--r--arch/metag/kernel/perf/perf_event.c74
-rw-r--r--arch/metag/kernel/process.c37
-rw-r--r--arch/metag/kernel/ptrace.c34
-rw-r--r--arch/metag/kernel/setup.c1
-rw-r--r--arch/metag/kernel/smp.c117
-rw-r--r--arch/metag/kernel/traps.c6
-rw-r--r--arch/metag/mm/Kconfig8
-rw-r--r--arch/metag/mm/init.c31
-rw-r--r--arch/metag/oprofile/Makefile17
-rw-r--r--arch/metag/oprofile/backtrace.c63
-rw-r--r--arch/metag/oprofile/backtrace.h6
-rw-r--r--arch/metag/oprofile/common.c66
26 files changed, 421 insertions, 114 deletions
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
index afc8973d148..dcd94406030 100644
--- a/arch/metag/Kconfig
+++ b/arch/metag/Kconfig
@@ -1,7 +1,3 @@
-config SYMBOL_PREFIX
- string
- default "_"
-
config METAG
def_bool y
select EMBEDDED
@@ -25,8 +21,10 @@ config METAG
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_UNDERSCORE_SYMBOL_PREFIX
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select OF
@@ -54,9 +52,6 @@ config GENERIC_HWEIGHT
config GENERIC_CALIBRATE_DELAY
def_bool y
-config GENERIC_GPIO
- def_bool n
-
config NO_IOPORT
def_bool y
@@ -209,6 +204,9 @@ config METAG_PERFCOUNTER_IRQS
When disabled, Performance Counters information will be collected
based on Timer Interrupt.
+config HW_PERF_EVENTS
+ def_bool METAG_PERFCOUNTER_IRQS && PERF_EVENTS
+
config METAG_DA
bool "DA support"
help
diff --git a/arch/metag/Makefile b/arch/metag/Makefile
index 81bd6a1c748..b566116b171 100644
--- a/arch/metag/Makefile
+++ b/arch/metag/Makefile
@@ -49,6 +49,8 @@ core-y += arch/metag/mm/
libs-y += arch/metag/lib/
libs-y += arch/metag/tbx/
+drivers-$(CONFIG_OPROFILE) += arch/metag/oprofile/
+
boot := arch/metag/boot
boot_targets += uImage
diff --git a/arch/metag/boot/dts/Makefile b/arch/metag/boot/dts/Makefile
index e0b5afd8bde..dbd95217733 100644
--- a/arch/metag/boot/dts/Makefile
+++ b/arch/metag/boot/dts/Makefile
@@ -4,13 +4,17 @@ dtb-y += skeleton.dtb
builtindtb-y := skeleton
ifneq ($(CONFIG_METAG_BUILTIN_DTB_NAME),"")
- builtindtb-y := $(CONFIG_METAG_BUILTIN_DTB_NAME)
+ builtindtb-y := $(patsubst "%",%,$(CONFIG_METAG_BUILTIN_DTB_NAME))
endif
-obj-$(CONFIG_METAG_BUILTIN_DTB) += $(patsubst "%",%,$(builtindtb-y)).dtb.o
+
+dtb-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb
+obj-$(CONFIG_METAG_BUILTIN_DTB) += $(builtindtb-y).dtb.o
targets += dtbs
targets += $(dtb-y)
+.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
+
dtbs: $(addprefix $(obj)/, $(dtb-y))
-clean-files += *.dtb
+clean-files += *.dtb *.dtb.S
diff --git a/arch/metag/configs/meta1_defconfig b/arch/metag/configs/meta1_defconfig
index c35a75e8ecf..01cd67e4403 100644
--- a/arch/metag/configs/meta1_defconfig
+++ b/arch/metag/configs/meta1_defconfig
@@ -1,6 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
-CONFIG_LOG_BUF_SHIFT=13
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_KALLSYMS_ALL=y
diff --git a/arch/metag/configs/meta2_defconfig b/arch/metag/configs/meta2_defconfig
index fb314841018..643392ba7ed 100644
--- a/arch/metag/configs/meta2_defconfig
+++ b/arch/metag/configs/meta2_defconfig
@@ -1,7 +1,6 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=13
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_KALLSYMS_ALL=y
diff --git a/arch/metag/configs/meta2_smp_defconfig b/arch/metag/configs/meta2_smp_defconfig
index 6c7b777ac27..f3306737da2 100644
--- a/arch/metag/configs/meta2_smp_defconfig
+++ b/arch/metag/configs/meta2_smp_defconfig
@@ -1,7 +1,6 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=13
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_KALLSYMS_ALL=y
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h
index f545477e61f..471f481e67f 100644
--- a/arch/metag/include/asm/hugetlb.h
+++ b/arch/metag/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
#define _ASM_METAG_HUGETLB_H
#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/metag/include/asm/metag_mem.h b/arch/metag/include/asm/metag_mem.h
index 3f7b54d8cca..aa5a076df43 100644
--- a/arch/metag/include/asm/metag_mem.h
+++ b/arch/metag/include/asm/metag_mem.h
@@ -700,6 +700,9 @@
#define SYSC_xCPARTG_AND_S 8
#define SYSC_xCPARTL_OR_BITS 0x000F0000 /* Ors into top 4 bits */
#define SYSC_xCPARTL_OR_S 16
+#ifdef METAC_2_1
+#define SYSC_DCPART_GCON_BIT 0x00100000 /* Coherent shared local */
+#endif /* METAC_2_1 */
#define SYSC_xCPARTG_OR_BITS 0x0F000000 /* Ors into top 4 bits */
#define SYSC_xCPARTG_OR_S 24
#define SYSC_CWRMODE_BIT 0x80000000 /* Write cache mode bit */
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 0ecd34d8b5f..7c4a3300614 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -150,6 +150,4 @@ static inline int kstack_end(void *addr)
#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
_TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
-#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
-
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
index 876c71f866d..84e09feb4d5 100644
--- a/arch/metag/include/uapi/asm/Kbuild
+++ b/arch/metag/include/uapi/asm/Kbuild
@@ -2,6 +2,7 @@
include include/uapi/asm-generic/Kbuild.asm
header-y += byteorder.h
+header-y += ech.h
header-y += ptrace.h
header-y += resource.h
header-y += sigcontext.h
diff --git a/arch/metag/include/uapi/asm/ech.h b/arch/metag/include/uapi/asm/ech.h
new file mode 100644
index 00000000000..ac94d1cf9be
--- /dev/null
+++ b/arch/metag/include/uapi/asm/ech.h
@@ -0,0 +1,15 @@
+#ifndef _UAPI_METAG_ECH_H
+#define _UAPI_METAG_ECH_H
+
+/*
+ * These bits can be set in the top half of the D0.8 register when DSP context
+ * switching is enabled, in order to support partial DSP context save/restore.
+ */
+
+#define TBICTX_XEXT_BIT 0x1000 /* Enable extended context save */
+#define TBICTX_XTDP_BIT 0x0800 /* DSP accumulators/RAM/templates */
+#define TBICTX_XHL2_BIT 0x0400 /* Hardware loops */
+#define TBICTX_XAXX_BIT 0x0200 /* Extended AX registers (A*.4-7) */
+#define TBICTX_XDX8_BIT 0x0100 /* Extended DX registers (D*.8-15) */
+
+#endif /* _UAPI_METAG_ECH_H */
diff --git a/arch/metag/kernel/cachepart.c b/arch/metag/kernel/cachepart.c
index 3a589dfb966..954548b1bea 100644
--- a/arch/metag/kernel/cachepart.c
+++ b/arch/metag/kernel/cachepart.c
@@ -24,15 +24,21 @@
unsigned int get_dcache_size(void)
{
unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);
- return 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS)
- >> METAC_CORECFG2_DCSZ_S);
+ unsigned int sz = 0x1000 << ((config2 & METAC_CORECFG2_DCSZ_BITS)
+ >> METAC_CORECFG2_DCSZ_S);
+ if (config2 & METAC_CORECFG2_DCSMALL_BIT)
+ sz >>= 6;
+ return sz;
}
unsigned int get_icache_size(void)
{
unsigned int config2 = metag_in32(METAC_CORE_CONFIG2);
- return 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS)
- >> METAC_CORE_C2ICSZ_S);
+ unsigned int sz = 0x1000 << ((config2 & METAC_CORE_C2ICSZ_BITS)
+ >> METAC_CORE_C2ICSZ_S);
+ if (config2 & METAC_CORECFG2_ICSMALL_BIT)
+ sz >>= 6;
+ return sz;
}
unsigned int get_global_dcache_size(void)
@@ -61,7 +67,7 @@ static unsigned int get_thread_cache_size(unsigned int cache, int thread_id)
return 0;
#if PAGE_OFFSET >= LINGLOBAL_BASE
/* Checking for global cache */
- cache_size = (cache == DCACHE ? get_global_dache_size() :
+ cache_size = (cache == DCACHE ? get_global_dcache_size() :
get_global_icache_size());
offset = 8;
#else
diff --git a/arch/metag/kernel/da.c b/arch/metag/kernel/da.c
index 52aabb658fd..a35dbed6fff 100644
--- a/arch/metag/kernel/da.c
+++ b/arch/metag/kernel/da.c
@@ -5,12 +5,14 @@
*/
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <asm/da.h>
#include <asm/metag_mem.h>
bool _metag_da_present;
+EXPORT_SYMBOL_GPL(_metag_da_present);
int __init metag_da_probe(void)
{
diff --git a/arch/metag/kernel/head.S b/arch/metag/kernel/head.S
index 969dffabc03..713f71d1bdf 100644
--- a/arch/metag/kernel/head.S
+++ b/arch/metag/kernel/head.S
@@ -1,6 +1,7 @@
! Copyright 2005,2006,2007,2009 Imagination Technologies
#include <linux/init.h>
+#include <asm/metag_mem.h>
#include <generated/asm-offsets.h>
#undef __exit
@@ -48,6 +49,13 @@ __exit:
.global _secondary_startup
.type _secondary_startup,function
_secondary_startup:
+#if CONFIG_PAGE_OFFSET < LINGLOBAL_BASE
+ ! In case GCOn has just been turned on we need to fence any writes that
+ ! the boot thread might have performed prior to coherency taking effect.
+ MOVT D0Re0,#HI(LINSYSEVENT_WR_ATOMIC_UNLOCK)
+ MOV D1Re0,#0
+ SETD [D0Re0], D1Re0
+#endif
MOVT A0StP,#HI(_secondary_data_stack)
ADD A0StP,A0StP,#LO(_secondary_data_stack)
GETD A0StP,[A0StP]
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index a876d5ff389..366569425c5 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -22,9 +22,9 @@
#include <linux/slab.h>
#include <asm/core_reg.h>
-#include <asm/hwthread.h>
#include <asm/io.h>
#include <asm/irq.h>
+#include <asm/processor.h>
#include "perf_event.h"
@@ -40,10 +40,10 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
/* PMU admin */
const char *perf_pmu_name(void)
{
- if (metag_pmu)
- return metag_pmu->pmu.name;
+ if (!metag_pmu)
+ return NULL;
- return NULL;
+ return metag_pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);
@@ -171,6 +171,7 @@ static int metag_pmu_event_init(struct perf_event *event)
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
+ case PERF_TYPE_RAW:
err = _hw_perf_event_init(event);
break;
@@ -211,9 +212,10 @@ again:
/*
* Calculate the delta and add it to the counter.
*/
- delta = new_raw_count - prev_raw_count;
+ delta = (new_raw_count - prev_raw_count) & MAX_PERIOD;
local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
}
int metag_pmu_event_set_period(struct perf_event *event,
@@ -223,6 +225,10 @@ int metag_pmu_event_set_period(struct perf_event *event,
s64 period = hwc->sample_period;
int ret = 0;
+ /* The period may have been changed */
+ if (unlikely(period != hwc->last_period))
+ left += period - hwc->last_period;
+
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
@@ -240,8 +246,10 @@ int metag_pmu_event_set_period(struct perf_event *event,
if (left > (s64)metag_pmu->max_period)
left = metag_pmu->max_period;
- if (metag_pmu->write)
- metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD);
+ if (metag_pmu->write) {
+ local64_set(&hwc->prev_count, -(s32)left);
+ metag_pmu->write(idx, -left & MAX_PERIOD);
+ }
perf_event_update_userpage(event);
@@ -549,6 +557,10 @@ static int _hw_perf_event_init(struct perf_event *event)
if (err)
return err;
break;
+
+ case PERF_TYPE_RAW:
+ mapping = attr->config;
+ break;
}
/* Return early if the event is unsupported */
@@ -610,15 +622,13 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
WARN_ONCE((config != 0x100),
"invalid configuration (%d) for counter (%d)\n",
config, idx);
-
- /* Reset the cycle count */
- __core_reg_set(TXTACTCYC, 0);
+ local64_set(&event->prev_count, __core_reg_get(TXTACTCYC));
goto unlock;
}
/* Check for a core internal or performance channel event. */
if (tmp) {
- void *perf_addr = (void *)PERF_COUNT(idx);
+ void *perf_addr;
/*
* Anything other than a cycle count will write the low-
@@ -632,9 +642,14 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
case 0xf0:
perf_addr = (void *)PERF_CHAN(idx);
break;
+
+ default:
+ perf_addr = NULL;
+ break;
}
- metag_out32((tmp & 0x0f), perf_addr);
+ if (perf_addr)
+ metag_out32((config & 0x0f), perf_addr);
/*
* Now we use the high nibble as the performance event to
@@ -643,13 +658,21 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
config = tmp >> 4;
}
- /*
- * Enabled counters start from 0. Early cores clear the count on
- * write but newer cores don't, so we make sure that the count is
- * set to 0.
- */
tmp = ((config & 0xf) << 28) |
- ((1 << 24) << cpu_2_hwthread_id[get_cpu()]);
+ ((1 << 24) << hard_processor_id());
+ if (metag_pmu->max_period)
+ /*
+ * Cores supporting overflow interrupts may have had the counter
+ * set to a specific value that needs preserving.
+ */
+ tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
+ else
+ /*
+ * Older cores reset the counter on write, so prev_count needs
+ * resetting too so we can calculate a correct delta.
+ */
+ local64_set(&event->prev_count, 0);
+
metag_out32(tmp, PERF_COUNT(idx));
unlock:
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
@@ -693,9 +716,8 @@ static u64 metag_pmu_read_counter(int idx)
{
u32 tmp = 0;
- /* The act of reading the cycle counter also clears it */
if (METAG_INST_COUNTER == idx) {
- __core_reg_swap(TXTACTCYC, tmp);
+ tmp = __core_reg_get(TXTACTCYC);
goto out;
}
@@ -764,10 +786,16 @@ static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
/*
* Enable the counter again once core overflow processing has
- * completed.
+ * completed. Note the counter value may have been modified while it was
+ * inactive to set it up ready for the next interrupt.
*/
- if (!perf_event_overflow(event, &sampledata, regs))
+ if (!perf_event_overflow(event, &sampledata, regs)) {
+ __global_lock2(flags);
+ counter = (counter & 0xff000000) |
+ (metag_in32(PERF_COUNT(idx)) & 0x00ffffff);
metag_out32(counter, PERF_COUNT(idx));
+ __global_unlock2(flags);
+ }
return IRQ_HANDLED;
}
@@ -830,7 +858,7 @@ static int __init init_hw_perf_events(void)
metag_pmu->max_period = 0;
}
- metag_pmu->name = "Meta 2";
+ metag_pmu->name = "meta2";
metag_pmu->version = version;
metag_pmu->pmu = pmu;
}
diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
index c6efe62e5b7..483dff986a2 100644
--- a/arch/metag/kernel/process.c
+++ b/arch/metag/kernel/process.c
@@ -22,6 +22,7 @@
#include <linux/pm.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
+#include <linux/smp.h>
#include <asm/core_reg.h>
#include <asm/user_gateway.h>
#include <asm/tcm.h>
@@ -31,7 +32,7 @@
/*
* Wait for the next interrupt and enable local interrupts
*/
-static inline void arch_idle(void)
+void arch_cpu_idle(void)
{
int tmp;
@@ -59,36 +60,12 @@ static inline void arch_idle(void)
: "r" (get_trigger_mask()));
}
-void cpu_idle(void)
-{
- set_thread_flag(TIF_POLLING_NRFLAG);
-
- while (1) {
- tick_nohz_idle_enter();
- rcu_idle_enter();
-
- while (!need_resched()) {
- /*
- * We need to disable interrupts here to ensure we don't
- * miss a wakeup call.
- */
- local_irq_disable();
- if (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id()))
- cpu_die();
-#endif
- arch_idle();
- } else {
- local_irq_enable();
- }
- }
-
- rcu_idle_exit();
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
}
+#endif
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -152,6 +129,8 @@ void show_regs(struct pt_regs *regs)
"D1.7 "
};
+ show_regs_print_info(KERN_INFO);
+
pr_info(" pt_regs @ %p\n", regs);
pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask);
pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags,
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 47a8828615a..7563628822b 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -288,10 +288,36 @@ static int metag_rp_state_set(struct task_struct *target,
return metag_rp_state_copyin(regs, pos, count, kbuf, ubuf);
}
+static int metag_tls_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ void __user *tls = target->thread.tls_ptr;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+}
+
+static int metag_tls_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ void __user *tls;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ if (ret)
+ return ret;
+
+ target->thread.tls_ptr = tls;
+ return ret;
+}
+
enum metag_regset {
REGSET_GENERAL,
REGSET_CBUF,
REGSET_READPIPE,
+ REGSET_TLS,
};
static const struct user_regset metag_regsets[] = {
@@ -319,6 +345,14 @@ static const struct user_regset metag_regsets[] = {
.get = metag_rp_state_get,
.set = metag_rp_state_set,
},
+ [REGSET_TLS] = {
+ .core_note_type = NT_METAG_TLS,
+ .n = 1,
+ .size = sizeof(void *),
+ .align = sizeof(void *),
+ .get = metag_tls_get,
+ .set = metag_tls_set,
+ },
};
static const struct user_regset_view user_metag_view = {
diff --git a/arch/metag/kernel/setup.c b/arch/metag/kernel/setup.c
index 879246170ae..4f5726f1a55 100644
--- a/arch/metag/kernel/setup.c
+++ b/arch/metag/kernel/setup.c
@@ -124,6 +124,7 @@ struct machine_desc *machine_desc __initdata;
u8 cpu_2_hwthread_id[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = BAD_HWTHREAD_ID
};
+EXPORT_SYMBOL_GPL(cpu_2_hwthread_id);
/*
* Map a hardware thread ID to a Linux CPU number
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index 4b6d1f14df3..f443ec9a7cb 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -28,6 +28,8 @@
#include <asm/cachepart.h>
#include <asm/core_reg.h>
#include <asm/cpu.h>
+#include <asm/global_lock.h>
+#include <asm/metag_mem.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -37,6 +39,9 @@
#include <asm/hwthread.h>
#include <asm/traps.h>
+#define SYSC_DCPART(n) (SYSC_DCPART0 + SYSC_xCPARTn_STRIDE * (n))
+#define SYSC_ICPART(n) (SYSC_ICPART0 + SYSC_xCPARTn_STRIDE * (n))
+
DECLARE_PER_CPU(PTBI, pTBI);
void *secondary_data_stack;
@@ -99,6 +104,114 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
return 0;
}
+/**
+ * describe_cachepart_change: describe a change to cache partitions.
+ * @thread: Hardware thread number.
+ * @label: Label of cache type, e.g. "dcache" or "icache".
+ * @sz: Total size of the cache.
+ * @old: Old cache partition configuration (*CPART* register).
+ * @new: New cache partition configuration (*CPART* register).
+ *
+ * If the cache partition has changed, prints a message to the log describing
+ * those changes.
+ */
+static __cpuinit void describe_cachepart_change(unsigned int thread,
+ const char *label,
+ unsigned int sz,
+ unsigned int old,
+ unsigned int new)
+{
+ unsigned int lor1, land1, gor1, gand1;
+ unsigned int lor2, land2, gor2, gand2;
+ unsigned int diff = old ^ new;
+
+ if (!diff)
+ return;
+
+ pr_info("Thread %d: %s partition changed:", thread, label);
+ if (diff & (SYSC_xCPARTL_OR_BITS | SYSC_xCPARTL_AND_BITS)) {
+ lor1 = (old & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
+ lor2 = (new & SYSC_xCPARTL_OR_BITS) >> SYSC_xCPARTL_OR_S;
+ land1 = (old & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S;
+ land2 = (new & SYSC_xCPARTL_AND_BITS) >> SYSC_xCPARTL_AND_S;
+ pr_cont(" L:%#x+%#x->%#x+%#x",
+ (lor1 * sz) >> 4,
+ ((land1 + 1) * sz) >> 4,
+ (lor2 * sz) >> 4,
+ ((land2 + 1) * sz) >> 4);
+ }
+ if (diff & (SYSC_xCPARTG_OR_BITS | SYSC_xCPARTG_AND_BITS)) {
+ gor1 = (old & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S;
+ gor2 = (new & SYSC_xCPARTG_OR_BITS) >> SYSC_xCPARTG_OR_S;
+ gand1 = (old & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S;
+ gand2 = (new & SYSC_xCPARTG_AND_BITS) >> SYSC_xCPARTG_AND_S;
+ pr_cont(" G:%#x+%#x->%#x+%#x",
+ (gor1 * sz) >> 4,
+ ((gand1 + 1) * sz) >> 4,
+ (gor2 * sz) >> 4,
+ ((gand2 + 1) * sz) >> 4);
+ }
+ if (diff & SYSC_CWRMODE_BIT)
+ pr_cont(" %sWR",
+ (new & SYSC_CWRMODE_BIT) ? "+" : "-");
+ if (diff & SYSC_DCPART_GCON_BIT)
+ pr_cont(" %sGCOn",
+ (new & SYSC_DCPART_GCON_BIT) ? "+" : "-");
+ pr_cont("\n");
+}
+
+/**
+ * setup_smp_cache: ensure cache coherency for new SMP thread.
+ * @thread: New hardware thread number.
+ *
+ * Ensures that coherency is enabled and that the threads share the same cache
+ * partitions.
+ */
+static __cpuinit void setup_smp_cache(unsigned int thread)
+{
+ unsigned int this_thread, lflags;
+ unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
+ unsigned int icsz, icpart_old, icpart_new;
+
+ /*
+ * Copy over the current thread's cache partition configuration to the
+ * new thread so that they share cache partitions.
+ */
+ __global_lock2(lflags);
+ this_thread = hard_processor_id();
+ /* Share dcache partition */
+ dcpart_this = metag_in32(SYSC_DCPART(this_thread));
+ dcpart_old = metag_in32(SYSC_DCPART(thread));
+ dcpart_new = dcpart_this;
+#if PAGE_OFFSET < LINGLOBAL_BASE
+ /*
+ * For the local data cache to be coherent the threads must also have
+ * GCOn enabled.
+ */
+ dcpart_new |= SYSC_DCPART_GCON_BIT;
+ metag_out32(dcpart_new, SYSC_DCPART(this_thread));
+#endif
+ metag_out32(dcpart_new, SYSC_DCPART(thread));
+ /* Share icache partition too */
+ icpart_new = metag_in32(SYSC_ICPART(this_thread));
+ icpart_old = metag_in32(SYSC_ICPART(thread));
+ metag_out32(icpart_new, SYSC_ICPART(thread));
+ __global_unlock2(lflags);
+
+ /*
+ * Log if the cache partitions were altered so the user is aware of any
+ * potential unintentional cache wastage.
+ */
+ dcsz = get_dcache_size();
+ icsz = get_dcache_size();
+ describe_cachepart_change(this_thread, "dcache", dcsz,
+ dcpart_this, dcpart_new);
+ describe_cachepart_change(thread, "dcache", dcsz,
+ dcpart_old, dcpart_new);
+ describe_cachepart_change(thread, "icache", icsz,
+ icpart_old, icpart_new);
+}
+
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
unsigned int thread = cpu_2_hwthread_id[cpu];
@@ -108,6 +221,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
flush_tlb_all();
+ setup_smp_cache(thread);
+
/*
* Tell the secondary CPU where to find its idle thread's stack.
*/
@@ -297,7 +412,7 @@ asmlinkage void secondary_start_kernel(void)
/*
* OK, it's off to the idle thread for us
*/
- cpu_idle();
+ cpu_startup_entry(CPUHP_ONLINE);
}
void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index 8961f247b50..2ceeaae5b19 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -987,9 +987,3 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
show_trace(tsk, sp, NULL);
}
-
-void dump_stack(void)
-{
- show_stack(NULL, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
diff --git a/arch/metag/mm/Kconfig b/arch/metag/mm/Kconfig
index 975f2f4e3ec..03fb8f1555a 100644
--- a/arch/metag/mm/Kconfig
+++ b/arch/metag/mm/Kconfig
@@ -93,14 +93,6 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
-config MAX_ACTIVE_REGIONS
- int
- default "2" if SPARSEMEM
- default "1"
-
-config ARCH_POPULATES_NODE_MAP
- def_bool y
-
config ARCH_SELECT_MEMORY_MODEL
def_bool y
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index 504a398d5f8..d05b8455c44 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -380,14 +380,8 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalhigh_pages++;
- }
- totalram_pages += totalhigh_pages;
+ for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
num_physpages += totalhigh_pages;
#endif /* CONFIG_HIGHMEM */
@@ -412,32 +406,15 @@ void __init mem_init(void)
return;
}
-static void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
- unsigned long addr;
-
- for (addr = begin; addr < end; addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- free_page(addr);
- totalram_pages++;
- }
- pr_info("Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-}
-
void free_initmem(void)
{
- free_init_pages("unused kernel memory",
- (unsigned long)(&__init_begin),
- (unsigned long)(&__init_end));
+ free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- end = end & PAGE_MASK;
- free_init_pages("initrd memory", start, end);
+ free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
diff --git a/arch/metag/oprofile/Makefile b/arch/metag/oprofile/Makefile
new file mode 100644
index 00000000000..c9639d4734d
--- /dev/null
+++ b/arch/metag/oprofile/Makefile
@@ -0,0 +1,17 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+oprofile-core-y += buffer_sync.o
+oprofile-core-y += cpu_buffer.o
+oprofile-core-y += event_buffer.o
+oprofile-core-y += oprof.o
+oprofile-core-y += oprofile_files.o
+oprofile-core-y += oprofile_stats.o
+oprofile-core-y += oprofilefs.o
+oprofile-core-y += timer_int.o
+oprofile-core-$(CONFIG_HW_PERF_EVENTS) += oprofile_perf.o
+
+oprofile-y += backtrace.o
+oprofile-y += common.o
+oprofile-y += $(addprefix ../../../drivers/oprofile/,$(oprofile-core-y))
+
+ccflags-y += -Werror
diff --git a/arch/metag/oprofile/backtrace.c b/arch/metag/oprofile/backtrace.c
new file mode 100644
index 00000000000..7cc3f37cb40
--- /dev/null
+++ b/arch/metag/oprofile/backtrace.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2010-2013 Imagination Technologies Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/oprofile.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h>
+#include <asm/stacktrace.h>
+
+#include "backtrace.h"
+
+static void user_backtrace_fp(unsigned long __user *fp, unsigned int depth)
+{
+ while (depth-- && access_ok(VERIFY_READ, fp, 8)) {
+ unsigned long addr;
+ unsigned long __user *fpnew;
+ if (__copy_from_user_inatomic(&addr, fp + 1, sizeof(addr)))
+ break;
+ addr -= 4;
+
+ oprofile_add_trace(addr);
+
+ /* stack grows up, so frame pointers must decrease */
+ if (__copy_from_user_inatomic(&fpnew, fp + 0, sizeof(fpnew)))
+ break;
+ if (fpnew >= fp)
+ break;
+ fp = fpnew;
+ }
+}
+
+static int kernel_backtrace_frame(struct stackframe *frame, void *data)
+{
+ unsigned int *depth = data;
+
+ oprofile_add_trace(frame->pc);
+
+ /* decrement depth and stop if we reach 0 */
+ if ((*depth)-- == 0)
+ return 1;
+
+ /* otherwise onto the next frame */
+ return 0;
+}
+
+void metag_backtrace(struct pt_regs * const regs, unsigned int depth)
+{
+ if (user_mode(regs)) {
+ unsigned long *fp = (unsigned long *)regs->ctx.AX[1].U0;
+ user_backtrace_fp((unsigned long __user __force *)fp, depth);
+ } else {
+ struct stackframe frame;
+ frame.fp = regs->ctx.AX[1].U0; /* A0FrP */
+ frame.sp = user_stack_pointer(regs); /* A0StP */
+ frame.lr = 0; /* from stack */
+ frame.pc = regs->ctx.CurrPC; /* PC */
+ walk_stackframe(&frame, &kernel_backtrace_frame, &depth);
+ }
+}
diff --git a/arch/metag/oprofile/backtrace.h b/arch/metag/oprofile/backtrace.h
new file mode 100644
index 00000000000..c0fcc4265ab
--- /dev/null
+++ b/arch/metag/oprofile/backtrace.h
@@ -0,0 +1,6 @@
+#ifndef _METAG_OPROFILE_BACKTRACE_H
+#define _METAG_OPROFILE_BACKTRACE_H
+
+void metag_backtrace(struct pt_regs * const regs, unsigned int depth);
+
+#endif
diff --git a/arch/metag/oprofile/common.c b/arch/metag/oprofile/common.c
new file mode 100644
index 00000000000..ba26152b3c0
--- /dev/null
+++ b/arch/metag/oprofile/common.c
@@ -0,0 +1,66 @@
+/*
+ * arch/metag/oprofile/common.c
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * Based on arch/sh/oprofile/common.c:
+ *
+ * Copyright (C) 2003 - 2010 Paul Mundt
+ *
+ * Based on arch/mips/oprofile/common.c:
+ *
+ * Copyright (C) 2004, 2005 Ralf Baechle
+ * Copyright (C) 2005 MIPS Technologies, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+
+#include "backtrace.h"
+
+#ifdef CONFIG_HW_PERF_EVENTS
+/*
+ * This will need to be reworked when multiple PMUs are supported.
+ */
+static char *metag_pmu_op_name;
+
+char *op_name_from_perf_id(void)
+{
+ return metag_pmu_op_name;
+}
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ ops->backtrace = metag_backtrace;
+
+ if (perf_num_counters() == 0)
+ return -ENODEV;
+
+ metag_pmu_op_name = kasprintf(GFP_KERNEL, "metag/%s",
+ perf_pmu_name());
+ if (unlikely(!metag_pmu_op_name))
+ return -ENOMEM;
+
+ return oprofile_perf_init(ops);
+}
+
+void oprofile_arch_exit(void)
+{
+ oprofile_perf_exit();
+ kfree(metag_pmu_op_name);
+}
+#else
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ ops->backtrace = metag_backtrace;
+ /* fall back to timer interrupt PC sampling */
+ return -ENODEV;
+}
+void oprofile_arch_exit(void) {}
+#endif /* CONFIG_HW_PERF_EVENTS */