aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/bL_switcher.h67
-rw-r--r--arch/arm/include/asm/cp15.h16
-rw-r--r--arch/arm/include/asm/cputype.h61
-rw-r--r--arch/arm/include/asm/glue-df.h20
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/kvm_arm.h4
-rw-r--r--arch/arm/include/asm/kvm_asm.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h107
-rw-r--r--arch/arm/include/asm/kvm_host.h42
-rw-r--r--arch/arm/include/asm/kvm_mmu.h67
-rw-r--r--arch/arm/include/asm/kvm_vgic.h1
-rw-r--r--arch/arm/include/asm/mcpm.h8
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/uapi/asm/kvm.h12
14 files changed, 357 insertions, 54 deletions
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 00000000000..d60e77d179a
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,67 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by: Nicolas Pitre, April 2012
+ * Copyright: (C) 2012 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id);
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE 0
+#define BL_NOTIFY_POST_ENABLE 1
+#define BL_NOTIFY_PRE_DISABLE 2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled. Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled(). These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index ce4d01c03e6..cedd3721318 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -42,6 +42,8 @@
#define vectors_high() (0)
#endif
+#ifdef CONFIG_CPU_CP15
+
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
@@ -96,6 +98,18 @@ static inline void set_copro_access(unsigned int val)
isb();
}
-#endif
+#else /* ifdef CONFIG_CPU_CP15 */
+
+/*
+ * cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the
+ * minds of the developers). Yielding 0 for machines without a cp15 (and making
+ * it read-only) is fine for most cases and saves quite some #ifdeffery.
+ */
+#define cr_no_alignment UL(0)
+#define cr_alignment UL(0)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+#endif /* ifndef __ASSEMBLY__ */
#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ad41ec2471e..7652712d1d1 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -38,6 +38,24 @@
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
+#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_INTEL 0x69
+
+#define ARM_CPU_PART_ARM1136 0xB360
+#define ARM_CPU_PART_ARM1156 0xB560
+#define ARM_CPU_PART_ARM1176 0xB760
+#define ARM_CPU_PART_ARM11MPCORE 0xB020
+#define ARM_CPU_PART_CORTEX_A8 0xC080
+#define ARM_CPU_PART_CORTEX_A9 0xC090
+#define ARM_CPU_PART_CORTEX_A5 0xC050
+#define ARM_CPU_PART_CORTEX_A15 0xC0F0
+#define ARM_CPU_PART_CORTEX_A7 0xC070
+
+#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
+#define ARM_CPU_XSCALE_ARCH_V1 0x2000
+#define ARM_CPU_XSCALE_ARCH_V2 0x4000
+#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
@@ -50,6 +68,7 @@ extern unsigned int processor_id;
: "cc"); \
__val; \
})
+
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
@@ -59,29 +78,24 @@ extern unsigned int processor_id;
: "cc"); \
__val; \
})
-#else
-#define read_cpuid(reg) (processor_id)
-#define read_cpuid_ext(reg) 0
-#endif
-#define ARM_CPU_IMP_ARM 0x41
-#define ARM_CPU_IMP_INTEL 0x69
+#else /* ifdef CONFIG_CPU_CP15 */
-#define ARM_CPU_PART_ARM1136 0xB360
-#define ARM_CPU_PART_ARM1156 0xB560
-#define ARM_CPU_PART_ARM1176 0xB760
-#define ARM_CPU_PART_ARM11MPCORE 0xB020
-#define ARM_CPU_PART_CORTEX_A8 0xC080
-#define ARM_CPU_PART_CORTEX_A9 0xC090
-#define ARM_CPU_PART_CORTEX_A5 0xC050
-#define ARM_CPU_PART_CORTEX_A15 0xC0F0
-#define ARM_CPU_PART_CORTEX_A7 0xC070
+/*
+ * read_cpuid and read_cpuid_ext should only ever be called on machines that
+ * have cp15 so warn on other usages.
+ */
+#define read_cpuid(reg) \
+ ({ \
+ WARN_ON_ONCE(1); \
+ 0; \
+ })
-#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
-#define ARM_CPU_XSCALE_ARCH_V1 0x2000
-#define ARM_CPU_XSCALE_ARCH_V2 0x4000
-#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+#define read_cpuid_ext(reg) read_cpuid(reg)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+#ifdef CONFIG_CPU_CP15
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
@@ -92,6 +106,15 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
return read_cpuid(CPUID_ID);
}
+#else /* ifdef CONFIG_CPU_CP15 */
+
+static inline unsigned int __attribute_const__ read_cpuid_id(void)
+{
+ return processor_id;
+}
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
{
return (read_cpuid_id() & 0xFF000000) >> 24;
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 8cacbcda76d..b6e9f2c108b 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -18,12 +18,12 @@
* ================
*
* We have the following to choose from:
- * arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
- * v5tej_early - ARMv5 with Thumb and Java early abort handler
+ * v5t_early - ARMv5 with Thumb early abort handler
+ * v5tj_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
@@ -39,19 +39,19 @@
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_LV4T
+#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v4t_late_abort
+# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV4
+#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v4_early_abort
+# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
@@ -63,19 +63,19 @@
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV5TJ
+#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v5tj_early_abort
+# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
-#ifdef CONFIG_CPU_ABRT_EV5T
+#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
-# define CPU_DABORT_HANDLER v5t_early_abort
+# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df6..3d7351c844a 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 7
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 7c3d813e15d..124623e5ef1 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -211,4 +211,8 @@
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
+#define HSR_DABT_S1PTW (1U << 7)
+#define HSR_DABT_CM (1U << 8)
+#define HSR_DABT_EA (1U << 9)
+
#endif /* __ARM_KVM_ARM_H__ */
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index e4956f4e23e..18d50322a9e 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_flush_vm_context(void);
-extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
+extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index fd611996bfb..82b4babead2 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -22,11 +22,12 @@
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#include <asm/kvm_arm.h>
-u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
-u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
+unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
-int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
+bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
return 1;
}
-static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
- return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
+ return &vcpu->arch.regs.usr_regs.ARM_pc;
}
-static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
+static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
{
- return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
+ return &vcpu->arch.regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
return reg == 15;
}
+static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hsr;
+}
+
+static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hxfar;
+}
+
+static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
+{
+ return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
+}
+
+static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.hyp_pc;
+}
+
+static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
+}
+
+static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
+}
+
+static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
+}
+
+static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
+{
+ return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
+}
+
+static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
+}
+
+static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
+}
+
+/* Get Access Size from a data abort */
+static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
+{
+ switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
+ case 0:
+ return 1;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ default:
+ kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
+ return -EFAULT;
+ }
+}
+
+/* This one is not specific to Data Abort */
+static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
+}
+
+static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
+}
+
+static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
+}
+
+static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
+}
+
+static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
+}
+
#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d1736a53b12..0c4e643d939 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
void *objects[KVM_NR_MEM_OBJS];
};
+struct kvm_vcpu_fault_info {
+ u32 hsr; /* Hyp Syndrome Register */
+ u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
+ u32 hpfar; /* Hyp IPA Fault Address Register */
+ u32 hyp_pc; /* PC when exception was taken from Hyp mode */
+};
+
+typedef struct vfp_hard_struct kvm_kernel_vfp_t;
+
struct kvm_vcpu_arch {
struct kvm_regs regs;
@@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
u32 midr;
/* Exception Information */
- u32 hsr; /* Hyp Syndrome Register */
- u32 hxfar; /* Hyp Data/Inst Fault Address Register */
- u32 hpfar; /* Hyp IPA Fault Address Register */
+ struct kvm_vcpu_fault_info fault;
/* Floating point registers (VFP and Advanced SIMD/NEON) */
- struct vfp_hard_struct vfp_guest;
- struct vfp_hard_struct *vfp_host;
+ kvm_kernel_vfp_t vfp_guest;
+ kvm_kernel_vfp_t *vfp_host;
/* VGIC state */
struct vgic_cpu vgic_cpu;
@@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */
- /* Hyp exception information */
- u32 hyp_pc; /* PC when exception was taken from Hyp mode */
-
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
@@ -181,4 +185,26 @@ struct kvm_one_reg;
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
+int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index);
+
+static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr,
+ unsigned long hyp_stack_ptr,
+ unsigned long vector_ptr)
+{
+ unsigned long pgd_low, pgd_high;
+
+ pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
+ pgd_high = (pgd_ptr >> 32ULL);
+
+ /*
+ * Call initialization code, and switch to the full blown
+ * HYP code. The init code doesn't need to preserve these registers as
+ * r1-r3 and r12 are already callee save according to the AAPCS.
+ * Note that we slightly misuse the prototype by casing the pgd_low to
+ * a void *.
+ */
+ kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
+}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 421a20b3487..970f3b5fa10 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -19,6 +19,18 @@
#ifndef __ARM_KVM_MMU_H__
#define __ARM_KVM_MMU_H__
+#include <asm/cacheflush.h>
+#include <asm/pgalloc.h>
+#include <asm/idmap.h>
+
+/*
+ * We directly use the kernel VA for the HYP, as we can directly share
+ * the mapping (HTTBR "covers" TTBR1).
+ */
+#define HYP_PAGE_OFFSET_MASK (~0UL)
+#define HYP_PAGE_OFFSET PAGE_OFFSET
+#define KERN_TO_HYP(kva) (kva)
+
int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_hyp_pmds(void);
@@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
+static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
+{
+ pte_val(*pte) = new_pte;
+ /*
+ * flush_pmd_entry just takes a void pointer and cleans the necessary
+ * cache entries, so we can reuse the function for ptes.
+ */
+ flush_pmd_entry(pte);
+}
+
static inline bool kvm_is_write_fault(unsigned long hsr)
{
unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
@@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
return true;
}
+static inline void kvm_clean_pgd(pgd_t *pgd)
+{
+ clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
+}
+
+static inline void kvm_clean_pmd_entry(pmd_t *pmd)
+{
+ clean_pmd_entry(pmd);
+}
+
+static inline void kvm_clean_pte(pte_t *pte)
+{
+ clean_pte_table(pte);
+}
+
+static inline void kvm_set_s2pte_writable(pte_t *pte)
+{
+ pte_val(*pte) |= L_PTE_S2_RDWR;
+}
+
+struct kvm;
+
+static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+{
+ /*
+ * If we are going to insert an instruction page and the icache is
+ * either VIPT or PIPT, there is a potential problem where the host
+ * (or another VM) may have used the same page as this guest, and we
+ * read incorrect data from the icache. If we're using a PIPT cache,
+ * we can invalidate just that page, but if we are using a VIPT cache
+ * we need to invalidate the entire icache - damn shame - as written
+ * in the ARM ARM (DDI 0406C.b - Page B3-1393).
+ *
+ * VIVT caches are tagged using both the ASID and the VMID and doesn't
+ * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ */
+ if (icache_is_pipt()) {
+ unsigned long hva = gfn_to_hva(kvm, gfn);
+ __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+ } else if (!icache_is_vivt_asid_tagged()) {
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ }
+}
+
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
index ab97207d9cd..343744e4809 100644
--- a/arch/arm/include/asm/kvm_vgic.h
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/kvm.h>
-#include <linux/kvm_host.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 0f7b7620e9a..7626a7fd493 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -42,6 +42,14 @@ extern void mcpm_entry_point(void);
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated. The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+ unsigned long poke_phys_addr, unsigned long poke_val);
+
+/*
* CPU/cluster power operations API for higher subsystems to use.
*/
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index d3a22bebe6c..610ccf33f5e 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -81,6 +81,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
struct smp_operations {
#ifdef CONFIG_SMP
/*
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 023bfeb367b..c1ee007523d 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -53,12 +53,12 @@
#define KVM_ARM_FIQ_spsr fiq_regs[7]
struct kvm_regs {
- struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
- __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
- __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
- __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */
- __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
- __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
+ struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
+ unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
+ unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
+ unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
+ unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
+ unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
};
/* Supported Processor Types */