aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/00README.txt31
-rw-r--r--arch/mips/kvm/Kconfig49
-rw-r--r--arch/mips/kvm/Makefile13
-rw-r--r--arch/mips/kvm/kvm_cb.c14
-rw-r--r--arch/mips/kvm/kvm_locore.S650
-rw-r--r--arch/mips/kvm/kvm_mips.c958
-rw-r--r--arch/mips/kvm/kvm_mips_comm.h23
-rw-r--r--arch/mips/kvm/kvm_mips_commpage.c37
-rw-r--r--arch/mips/kvm/kvm_mips_dyntrans.c149
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c1826
-rw-r--r--arch/mips/kvm/kvm_mips_int.c243
-rw-r--r--arch/mips/kvm/kvm_mips_int.h49
-rw-r--r--arch/mips/kvm/kvm_mips_opcode.h24
-rw-r--r--arch/mips/kvm/kvm_mips_stats.c82
-rw-r--r--arch/mips/kvm/kvm_tlb.c928
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c482
-rw-r--r--arch/mips/kvm/trace.h46
17 files changed, 5604 insertions, 0 deletions
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644
index 00000000000..51617e481aa
--- /dev/null
+++ b/arch/mips/kvm/00README.txt
@@ -0,0 +1,31 @@
+KVM/MIPS Trap & Emulate Release Notes
+=====================================
+
+(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
+ Malta Board with FPGA based 34K
+ Sigma Designs TangoX board with a 24K based 8654 SoC.
+ Malta Board with 74K @ 1GHz
+
+(2) Both Guest kernel and Guest Userspace execute in UM.
+ Guest User address space: 0x00000000 -> 0x40000000
+ Guest Kernel Unmapped: 0x40000000 -> 0x60000000
+ Guest Kernel Mapped: 0x60000000 -> 0x80000000
+
+ Guest Usermode virtual memory is limited to 1GB.
+
+(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
+ Note that due to cache aliasing issues, 4K page sizes are NOT supported.
+
+(3) No HugeTLB Support
+ Both the host kernel and Guest kernel should have the page size set to 16K.
+ This will be implemented in a future release.
+
+(4) KVM/MIPS does not have support for SMP Guests
+ Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
+ LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
+ when we ERET back to the guest. This causes the guest to hang in an infinite loop.
+ This will be fixed in a future release.
+
+(5) Use Host FPU
+ Currently KVM/MIPS emulates a 24K CPU without a FPU.
+ This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 00000000000..2c15590e55f
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+source "virt/kvm/Kconfig"
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ depends on HAVE_KVM
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on HAVE_KVM
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ select KVM_MMIO
+ ---help---
+ Support for hosting Guest kernels.
+ Currently supported on MIPS32 processors.
+
+config KVM_MIPS_DYN_TRANS
+ bool "KVM/MIPS: Dynamic binary translation to reduce traps"
+ depends on KVM
+ ---help---
+ When running in Trap & Emulate mode patch privileged
+ instructions to reduce the number of traps.
+
+ If unsure, say Y.
+
+config KVM_MIPS_DEBUG_COP0_COUNTERS
+ bool "Maintain counters for COP0 accesses"
+ depends on KVM
+ ---help---
+ Maintain statistics for Guest COP0 accesses.
+ A histogram of COP0 accesses is printed when the VM is
+ shutdown.
+
+ If unsure, say N.
+
+source drivers/vhost/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 00000000000..78d87bbc99d
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,13 @@
+# Makefile for KVM support for MIPS
+#
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
+
+kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
+ kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
+ kvm_mips_dyntrans.o kvm_trap_emul.o
+
+obj-$(CONFIG_KVM) += kvm.o
+obj-y += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644
index 00000000000..313c2e37b97
--- /dev/null
+++ b/arch/mips/kvm/kvm_cb.c
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644
index 00000000000..dca2aa66599
--- /dev/null
+++ b/arch/mips/kvm/kvm_locore.S
@@ -0,0 +1,650 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Main entry point for the guest, exception handling.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+
+#define _C_LABEL(x) x
+#define MIPSX(name) mips32_ ## name
+#define CALLFRAME_SIZ 32
+
+/*
+ * VECTOR
+ * exception vector entrypoint
+ */
+#define VECTOR(x, regmask) \
+ .ent _C_LABEL(x),0; \
+ EXPORT(x);
+
+#define VECTOR_END(x) \
+ EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID PT_BVADDR
+#define PT_HOST_USERLOCAL PT_EPC
+
+#define CP0_DDATA_LO $28,3
+#define CP0_EBASE $15,1
+
+#define CP0_INTCTL $12,1
+#define CP0_SRSCTL $12,2
+#define CP0_SRSMAP $12,3
+#define CP0_HWRENA $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
+
+#define RESUME_GUEST 0
+#define RESUME_HOST RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+
+FEXPORT(__kvm_mips_vcpu_run)
+ .set push
+ .set noreorder
+ .set noat
+
+ /* k0/k1 not being used in host kernel context */
+ addiu k1,sp, -PT_SIZE
+ LONG_S $0, PT_R0(k1)
+ LONG_S $1, PT_R1(k1)
+ LONG_S $2, PT_R2(k1)
+ LONG_S $3, PT_R3(k1)
+
+ LONG_S $4, PT_R4(k1)
+ LONG_S $5, PT_R5(k1)
+ LONG_S $6, PT_R6(k1)
+ LONG_S $7, PT_R7(k1)
+
+ LONG_S $8, PT_R8(k1)
+ LONG_S $9, PT_R9(k1)
+ LONG_S $10, PT_R10(k1)
+ LONG_S $11, PT_R11(k1)
+ LONG_S $12, PT_R12(k1)
+ LONG_S $13, PT_R13(k1)
+ LONG_S $14, PT_R14(k1)
+ LONG_S $15, PT_R15(k1)
+ LONG_S $16, PT_R16(k1)
+ LONG_S $17, PT_R17(k1)
+
+ LONG_S $18, PT_R18(k1)
+ LONG_S $19, PT_R19(k1)
+ LONG_S $20, PT_R20(k1)
+ LONG_S $21, PT_R21(k1)
+ LONG_S $22, PT_R22(k1)
+ LONG_S $23, PT_R23(k1)
+ LONG_S $24, PT_R24(k1)
+ LONG_S $25, PT_R25(k1)
+
+ /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
+
+ LONG_S $28, PT_R28(k1)
+ LONG_S $29, PT_R29(k1)
+ LONG_S $30, PT_R30(k1)
+ LONG_S $31, PT_R31(k1)
+
+ /* Save hi/lo */
+ mflo v0
+ LONG_S v0, PT_LO(k1)
+ mfhi v1
+ LONG_S v1, PT_HI(k1)
+
+ /* Save host status */
+ mfc0 v0, CP0_STATUS
+ LONG_S v0, PT_STATUS(k1)
+
+ /* Save host ASID, shove it into the BVADDR location */
+ mfc0 v1,CP0_ENTRYHI
+ andi v1, 0xff
+ LONG_S v1, PT_HOST_ASID(k1)
+
+ /* Save DDATA_LO, will be used to store pointer to vcpu */
+ mfc0 v1, CP0_DDATA_LO
+ LONG_S v1, PT_HOST_USERLOCAL(k1)
+
+ /* DDATA_LO has pointer to vcpu */
+ mtc0 a1,CP0_DDATA_LO
+
+ /* Offset into vcpu->arch */
+ addiu k1, a1, VCPU_HOST_ARCH
+
+ /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
+ LONG_S sp, VCPU_HOST_STACK(k1)
+
+ /* Save the kernel gp as well */
+ LONG_S gp, VCPU_HOST_GP(k1)
+
+ /* Setup status register for running the guest in UM, interrupts are disabled */
+ li k0,(ST0_EXL | KSU_USER| ST0_BEV)
+ mtc0 k0,CP0_STATUS
+ ehb
+
+ /* load up the new EBASE */
+ LONG_L k0, VCPU_GUEST_EBASE(k1)
+ mtc0 k0,CP0_EBASE
+
+ /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
+ * but make sure that timer interrupts are enabled
+ */
+ li k0,(ST0_EXL | KSU_USER | ST0_IE)
+ andi v0, v0, ST0_IM
+ or k0, k0, v0
+ mtc0 k0,CP0_STATUS
+ ehb
+
+
+ /* Set Guest EPC */
+ LONG_L t0, VCPU_PC(k1)
+ mtc0 t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+ /* Set the ASID for the Guest Kernel */
+ sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
+ /* addresses shift to 0x80000000 */
+ bltz t0, 1f /* If kernel */
+ addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
+ addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
+1:
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ LONG_L t2, TI_CPU($28) /* smp_processor_id */
+ sll t2, t2, 2 /* x4 */
+ addu t3, t1, t2
+ LONG_L k0, (t3)
+ andi k0, k0, 0xff
+ mtc0 k0,CP0_ENTRYHI
+ ehb
+
+ /* Disable RDHWR access */
+ mtc0 zero, CP0_HWRENA
+
+ /* Now load up the Guest Context from VCPU */
+ LONG_L $1, VCPU_R1(k1)
+ LONG_L $2, VCPU_R2(k1)
+ LONG_L $3, VCPU_R3(k1)
+
+ LONG_L $4, VCPU_R4(k1)
+ LONG_L $5, VCPU_R5(k1)
+ LONG_L $6, VCPU_R6(k1)
+ LONG_L $7, VCPU_R7(k1)
+
+ LONG_L $8, VCPU_R8(k1)
+ LONG_L $9, VCPU_R9(k1)
+ LONG_L $10, VCPU_R10(k1)
+ LONG_L $11, VCPU_R11(k1)
+ LONG_L $12, VCPU_R12(k1)
+ LONG_L $13, VCPU_R13(k1)
+ LONG_L $14, VCPU_R14(k1)
+ LONG_L $15, VCPU_R15(k1)
+ LONG_L $16, VCPU_R16(k1)
+ LONG_L $17, VCPU_R17(k1)
+ LONG_L $18, VCPU_R18(k1)
+ LONG_L $19, VCPU_R19(k1)
+ LONG_L $20, VCPU_R20(k1)
+ LONG_L $21, VCPU_R21(k1)
+ LONG_L $22, VCPU_R22(k1)
+ LONG_L $23, VCPU_R23(k1)
+ LONG_L $24, VCPU_R24(k1)
+ LONG_L $25, VCPU_R25(k1)
+
+ /* k0/k1 loaded up later */
+
+ LONG_L $28, VCPU_R28(k1)
+ LONG_L $29, VCPU_R29(k1)
+ LONG_L $30, VCPU_R30(k1)
+ LONG_L $31, VCPU_R31(k1)
+
+ /* Restore hi/lo */
+ LONG_L k0, VCPU_LO(k1)
+ mtlo k0
+
+ LONG_L k0, VCPU_HI(k1)
+ mthi k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+ /* Restore the guest's k0/k1 registers */
+ LONG_L k0, VCPU_R26(k1)
+ LONG_L k1, VCPU_R27(k1)
+
+ /* Jump to guest */
+ eret
+ .set pop
+
+VECTOR(MIPSX(exception), unknown)
+/*
+ * Find out what mode we came from and jump to the proper handler.
+ */
+ .set push
+ .set noat
+ .set noreorder
+ mtc0 k0, CP0_ERROREPC #01: Save guest k0
+ ehb #02:
+
+ mfc0 k0, CP0_EBASE #02: Get EBASE
+ srl k0, k0, 10 #03: Get rid of CPUNum
+ sll k0, k0, 10 #04
+ LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
+ addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
+ j k0 #07: jump to the function
+ nop #08: branch delay slot
+ .set push
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+ .set push
+ .set noat
+ .set noreorder
+
+ /* Get the VCPU pointer from DDTATA_LO */
+ mfc0 k1, CP0_DDATA_LO
+ addiu k1, k1, VCPU_HOST_ARCH
+
+ /* Start saving Guest context to VCPU */
+ LONG_S $0, VCPU_R0(k1)
+ LONG_S $1, VCPU_R1(k1)
+ LONG_S $2, VCPU_R2(k1)
+ LONG_S $3, VCPU_R3(k1)
+ LONG_S $4, VCPU_R4(k1)
+ LONG_S $5, VCPU_R5(k1)
+ LONG_S $6, VCPU_R6(k1)
+ LONG_S $7, VCPU_R7(k1)
+ LONG_S $8, VCPU_R8(k1)
+ LONG_S $9, VCPU_R9(k1)
+ LONG_S $10, VCPU_R10(k1)
+ LONG_S $11, VCPU_R11(k1)
+ LONG_S $12, VCPU_R12(k1)
+ LONG_S $13, VCPU_R13(k1)
+ LONG_S $14, VCPU_R14(k1)
+ LONG_S $15, VCPU_R15(k1)
+ LONG_S $16, VCPU_R16(k1)
+ LONG_S $17,VCPU_R17(k1)
+ LONG_S $18, VCPU_R18(k1)
+ LONG_S $19, VCPU_R19(k1)
+ LONG_S $20, VCPU_R20(k1)
+ LONG_S $21, VCPU_R21(k1)
+ LONG_S $22, VCPU_R22(k1)
+ LONG_S $23, VCPU_R23(k1)
+ LONG_S $24, VCPU_R24(k1)
+ LONG_S $25, VCPU_R25(k1)
+
+ /* Guest k0/k1 saved later */
+
+ LONG_S $28, VCPU_R28(k1)
+ LONG_S $29, VCPU_R29(k1)
+ LONG_S $30, VCPU_R30(k1)
+ LONG_S $31, VCPU_R31(k1)
+
+ /* We need to save hi/lo and restore them on
+ * the way out
+ */
+ mfhi t0
+ LONG_S t0, VCPU_HI(k1)
+
+ mflo t0
+ LONG_S t0, VCPU_LO(k1)
+
+ /* Finally save guest k0/k1 to VCPU */
+ mfc0 t0, CP0_ERROREPC
+ LONG_S t0, VCPU_R26(k1)
+
+ /* Get GUEST k1 and save it in VCPU */
+ la t1, ~0x2ff
+ mfc0 t0, CP0_EBASE
+ and t0, t0, t1
+ LONG_L t0, 0x3000(t0)
+ LONG_S t0, VCPU_R27(k1)
+
+ /* Now that context has been saved, we can use other registers */
+
+ /* Restore vcpu */
+ mfc0 a1, CP0_DDATA_LO
+ move s1, a1
+
+ /* Restore run (vcpu->run) */
+ LONG_L a0, VCPU_RUN(a1)
+ /* Save pointer to run in s0, will be saved by the compiler */
+ move s0, a0
+
+
+ /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
+ mfc0 k0,CP0_EPC
+ LONG_S k0, VCPU_PC(k1)
+
+ mfc0 k0, CP0_BADVADDR
+ LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+ mfc0 k0, CP0_CAUSE
+ LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
+
+ mfc0 k0, CP0_ENTRYHI
+ LONG_S k0, VCPU_HOST_ENTRYHI(k1)
+
+ /* Now restore the host state just enough to run the handlers */
+
+ /* Swtich EBASE to the one used by Linux */
+ /* load up the host EBASE */
+ mfc0 v0, CP0_STATUS
+
+ .set at
+ or k0, v0, ST0_BEV
+ .set noat
+
+ mtc0 k0, CP0_STATUS
+ ehb
+
+ LONG_L k0, VCPU_HOST_EBASE(k1)
+ mtc0 k0,CP0_EBASE
+
+
+ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+ .set at
+ and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+ or v0, v0, ST0_CU0
+ .set noat
+ mtc0 v0, CP0_STATUS
+ ehb
+
+ /* Load up host GP */
+ LONG_L gp, VCPU_HOST_GP(k1)
+
+ /* Need a stack before we can jump to "C" */
+ LONG_L sp, VCPU_HOST_STACK(k1)
+
+ /* Saved host state */
+ addiu sp,sp, -PT_SIZE
+
+ /* XXXKYMA do we need to load the host ASID, maybe not because the
+ * kernel entries are marked GLOBAL, need to verify
+ */
+
+ /* Restore host DDATA_LO */
+ LONG_L k0, PT_HOST_USERLOCAL(sp)
+ mtc0 k0, CP0_DDATA_LO
+
+ /* Restore RDHWR access */
+ la k0, 0x2000000F
+ mtc0 k0, CP0_HWRENA
+
+ /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+ /* XXXKYMA: not sure if this is safe, how large is the stack?? */
+ /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
+ la t9,kvm_mips_handle_exit
+ jalr.hb t9
+ addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */
+
+ /* Return from handler Make sure interrupts are disabled */
+ di
+ ehb
+
+ /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
+ * while we were handling the exception from the guest, reload k1
+ */
+ move k1, s1
+ addiu k1, k1, VCPU_HOST_ARCH
+
+ /* Check return value, should tell us if we are returning to the host (handle I/O etc)
+ * or resuming the guest
+ */
+ andi t0, v0, RESUME_HOST
+ bnez t0, __kvm_mips_return_to_host
+ nop
+
+__kvm_mips_return_to_guest:
+ /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+ mtc0 s1, CP0_DDATA_LO
+
+ /* Load up the Guest EBASE to minimize the window where BEV is set */
+ LONG_L t0, VCPU_GUEST_EBASE(k1)
+
+ /* Switch EBASE back to the one used by KVM */
+ mfc0 v1, CP0_STATUS
+ .set at
+ or k0, v1, ST0_BEV
+ .set noat
+ mtc0 k0, CP0_STATUS
+ ehb
+ mtc0 t0,CP0_EBASE
+
+ /* Setup status register for running guest in UM */
+ .set at
+ or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+ and v1, v1, ~ST0_CU0
+ .set noat
+ mtc0 v1, CP0_STATUS
+ ehb
+
+
+ /* Set Guest EPC */
+ LONG_L t0, VCPU_PC(k1)
+ mtc0 t0, CP0_EPC
+
+ /* Set the ASID for the Guest Kernel */
+ sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
+ /* addresses shift to 0x80000000 */
+ bltz t0, 1f /* If kernel */
+ addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
+ addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
+1:
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ LONG_L t2, TI_CPU($28) /* smp_processor_id */
+ sll t2, t2, 2 /* x4 */
+ addu t3, t1, t2
+ LONG_L k0, (t3)
+ andi k0, k0, 0xff
+ mtc0 k0,CP0_ENTRYHI
+ ehb
+
+ /* Disable RDHWR access */
+ mtc0 zero, CP0_HWRENA
+
+ /* load the guest context from VCPU and return */
+ LONG_L $0, VCPU_R0(k1)
+ LONG_L $1, VCPU_R1(k1)
+ LONG_L $2, VCPU_R2(k1)
+ LONG_L $3, VCPU_R3(k1)
+ LONG_L $4, VCPU_R4(k1)
+ LONG_L $5, VCPU_R5(k1)
+ LONG_L $6, VCPU_R6(k1)
+ LONG_L $7, VCPU_R7(k1)
+ LONG_L $8, VCPU_R8(k1)
+ LONG_L $9, VCPU_R9(k1)
+ LONG_L $10, VCPU_R10(k1)
+ LONG_L $11, VCPU_R11(k1)
+ LONG_L $12, VCPU_R12(k1)
+ LONG_L $13, VCPU_R13(k1)
+ LONG_L $14, VCPU_R14(k1)
+ LONG_L $15, VCPU_R15(k1)
+ LONG_L $16, VCPU_R16(k1)
+ LONG_L $17, VCPU_R17(k1)
+ LONG_L $18, VCPU_R18(k1)
+ LONG_L $19, VCPU_R19(k1)
+ LONG_L $20, VCPU_R20(k1)
+ LONG_L $21, VCPU_R21(k1)
+ LONG_L $22, VCPU_R22(k1)
+ LONG_L $23, VCPU_R23(k1)
+ LONG_L $24, VCPU_R24(k1)
+ LONG_L $25, VCPU_R25(k1)
+
+ /* $/k1 loaded later */
+ LONG_L $28, VCPU_R28(k1)
+ LONG_L $29, VCPU_R29(k1)
+ LONG_L $30, VCPU_R30(k1)
+ LONG_L $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+ LONG_L k0, VCPU_HI(k1)
+ mthi k0
+
+ LONG_L k0, VCPU_LO(k1)
+ mtlo k0
+
+ LONG_L k0, VCPU_R26(k1)
+ LONG_L k1, VCPU_R27(k1)
+
+ eret
+
+__kvm_mips_return_to_host:
+ /* EBASE is already pointing to Linux */
+ LONG_L k1, VCPU_HOST_STACK(k1)
+ addiu k1,k1, -PT_SIZE
+
+ /* Restore host DDATA_LO */
+ LONG_L k0, PT_HOST_USERLOCAL(k1)
+ mtc0 k0, CP0_DDATA_LO
+
+ /* Restore host ASID */
+ LONG_L k0, PT_HOST_ASID(sp)
+ andi k0, 0xff
+ mtc0 k0,CP0_ENTRYHI
+ ehb
+
+ /* Load context saved on the host stack */
+ LONG_L $0, PT_R0(k1)
+ LONG_L $1, PT_R1(k1)
+
+ /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */
+ sra k0, v0, 2
+ move $2, k0
+
+ LONG_L $3, PT_R3(k1)
+ LONG_L $4, PT_R4(k1)
+ LONG_L $5, PT_R5(k1)
+ LONG_L $6, PT_R6(k1)
+ LONG_L $7, PT_R7(k1)
+ LONG_L $8, PT_R8(k1)
+ LONG_L $9, PT_R9(k1)
+ LONG_L $10, PT_R10(k1)
+ LONG_L $11, PT_R11(k1)
+ LONG_L $12, PT_R12(k1)
+ LONG_L $13, PT_R13(k1)
+ LONG_L $14, PT_R14(k1)
+ LONG_L $15, PT_R15(k1)
+ LONG_L $16, PT_R16(k1)
+ LONG_L $17, PT_R17(k1)
+ LONG_L $18, PT_R18(k1)
+ LONG_L $19, PT_R19(k1)
+ LONG_L $20, PT_R20(k1)
+ LONG_L $21, PT_R21(k1)
+ LONG_L $22, PT_R22(k1)
+ LONG_L $23, PT_R23(k1)
+ LONG_L $24, PT_R24(k1)
+ LONG_L $25, PT_R25(k1)
+
+ /* Host k0/k1 were not saved */
+
+ LONG_L $28, PT_R28(k1)
+ LONG_L $29, PT_R29(k1)
+ LONG_L $30, PT_R30(k1)
+
+ LONG_L k0, PT_HI(k1)
+ mthi k0
+
+ LONG_L k0, PT_LO(k1)
+ mtlo k0
+
+ /* Restore RDHWR access */
+ la k0, 0x2000000F
+ mtc0 k0, CP0_HWRENA
+
+
+ /* Restore RA, which is the address we will return to */
+ LONG_L ra, PT_R31(k1)
+ j ra
+ nop
+
+ .set pop
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+ ####
+ ##### The exception handlers.
+ #####
+ .word _C_LABEL(MIPSX(GuestException)) # 0
+ .word _C_LABEL(MIPSX(GuestException)) # 1
+ .word _C_LABEL(MIPSX(GuestException)) # 2
+ .word _C_LABEL(MIPSX(GuestException)) # 3
+ .word _C_LABEL(MIPSX(GuestException)) # 4
+ .word _C_LABEL(MIPSX(GuestException)) # 5
+ .word _C_LABEL(MIPSX(GuestException)) # 6
+ .word _C_LABEL(MIPSX(GuestException)) # 7
+ .word _C_LABEL(MIPSX(GuestException)) # 8
+ .word _C_LABEL(MIPSX(GuestException)) # 9
+ .word _C_LABEL(MIPSX(GuestException)) # 10
+ .word _C_LABEL(MIPSX(GuestException)) # 11
+ .word _C_LABEL(MIPSX(GuestException)) # 12
+ .word _C_LABEL(MIPSX(GuestException)) # 13
+ .word _C_LABEL(MIPSX(GuestException)) # 14
+ .word _C_LABEL(MIPSX(GuestException)) # 15
+ .word _C_LABEL(MIPSX(GuestException)) # 16
+ .word _C_LABEL(MIPSX(GuestException)) # 17
+ .word _C_LABEL(MIPSX(GuestException)) # 18
+ .word _C_LABEL(MIPSX(GuestException)) # 19
+ .word _C_LABEL(MIPSX(GuestException)) # 20
+ .word _C_LABEL(MIPSX(GuestException)) # 21
+ .word _C_LABEL(MIPSX(GuestException)) # 22
+ .word _C_LABEL(MIPSX(GuestException)) # 23
+ .word _C_LABEL(MIPSX(GuestException)) # 24
+ .word _C_LABEL(MIPSX(GuestException)) # 25
+ .word _C_LABEL(MIPSX(GuestException)) # 26
+ .word _C_LABEL(MIPSX(GuestException)) # 27
+ .word _C_LABEL(MIPSX(GuestException)) # 28
+ .word _C_LABEL(MIPSX(GuestException)) # 29
+ .word _C_LABEL(MIPSX(GuestException)) # 30
+ .word _C_LABEL(MIPSX(GuestException)) # 31
+
+
+/* This routine makes changes to the instruction stream effective to the hardware.
+ * It should be called after the instruction stream is written.
+ * On return, the new instructions are effective.
+ * Inputs:
+ * a0 = Start address of new instruction stream
+ * a1 = Size, in bytes, of new instruction stream
+ */
+
+#define HW_SYNCI_Step $1
+LEAF(MIPSX(SyncICache))
+ .set push
+ .set mips32r2
+ beq a1, zero, 20f
+ nop
+ addu a1, a0, a1
+ rdhwr v0, HW_SYNCI_Step
+ beq v0, zero, 20f
+ nop
+
+10:
+ synci 0(a0)
+ addu a0, a0, v0
+ sltu v1, a0, a1
+ bne v1, zero, 10b
+ nop
+ sync
+20:
+ jr.hb ra
+ nop
+ .set pop
+END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644
index 00000000000..e0dad028979
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips.c
@@ -0,0 +1,958 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100 /* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "wait", VCPU_STAT(wait_exits) },
+ { "cache", VCPU_STAT(cache_exits) },
+ { "signal", VCPU_STAT(signal_exits) },
+ { "interrupt", VCPU_STAT(int_exits) },
+ { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
+ { "tlbmod", VCPU_STAT(tlbmod_exits) },
+ { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
+ { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
+ { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
+ { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
+ { "syscall", VCPU_STAT(syscall_exits) },
+ { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
+ { "break_inst", VCPU_STAT(break_inst_exits) },
+ { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
+ { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+ {NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+ int i;
+ for_each_possible_cpu(i) {
+ vcpu->arch.guest_kernel_asid[i] = 0;
+ vcpu->arch.guest_user_asid[i] = 0;
+ }
+ return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
+ * are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ int *r = (int *)rtn;
+ *r = 0;
+ return;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+ unsigned long wired;
+
+ /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+ wired = read_c0_wired();
+ write_c0_wired(wired + 1);
+ mtc0_tlbw_hazard();
+ kvm->arch.commpage_tlb = wired;
+
+ kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+ kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+ struct kvm *kvm = (struct kvm *)arg;
+
+ kvm_mips_init_tlbs(kvm);
+ kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+ if (atomic_inc_return(&kvm_mips_instance) == 1) {
+ kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
+ __func__);
+ on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+ }
+
+
+ return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+ unsigned int i;
+ struct kvm_vcpu *vcpu;
+
+ /* Put the pages we reserved for the guest pmap */
+ for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+ if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+ kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+ }
+
+ if (kvm->arch.guest_pmap)
+ kfree(kvm->arch.guest_pmap);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvm_arch_vcpu_free(vcpu);
+ }
+
+ mutex_lock(&kvm->lock);
+
+ for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+ kvm->vcpus[i] = NULL;
+
+ atomic_set(&kvm->online_vcpus, 0);
+
+ mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+ /* Restore wired count */
+ write_c0_wired(0);
+ mtc0_tlbw_hazard();
+ /* Clear out all the TLBs */
+ kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kvm_mips_free_vcpus(kvm);
+
+ /* If this is the last instance, restore wired count */
+ if (atomic_dec_return(&kvm_mips_instance) == 0) {
+ kvm_info("%s: last KVM instance, restoring TLB parameters\n",
+ __func__);
+ on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+ }
+}
+
+long
+kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+{
+ return 0;
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change)
+{
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ enum kvm_mr_change change)
+{
+ unsigned long npages = 0;
+ int i, err = 0;
+
+ kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+ __func__, kvm, mem->slot, mem->guest_phys_addr,
+ mem->memory_size, mem->userspace_addr);
+
+ /* Setup Guest PMAP table */
+ if (!kvm->arch.guest_pmap) {
+ if (mem->slot == 0)
+ npages = mem->memory_size >> PAGE_SHIFT;
+
+ if (npages) {
+ kvm->arch.guest_pmap_npages = npages;
+ kvm->arch.guest_pmap =
+ kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+ if (!kvm->arch.guest_pmap) {
+ kvm_err("Failed to allocate guest PMAP");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ kvm_info
+ ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+ npages, kvm->arch.guest_pmap);
+
+ /* Now setup the page table */
+ for (i = 0; i < npages; i++) {
+ kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+ }
+ }
+ }
+out:
+ return;
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ extern char mips32_exception[], mips32_exceptionEnd[];
+ extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+ int err, size, offset;
+ void *gebase;
+ int i;
+
+ struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+ if (!vcpu) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+
+ if (err)
+ goto out_free_cpu;
+
+ kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+ /* Allocate space for host mode exception handlers that handle
+ * guest mode exits
+ */
+ if (cpu_has_veic || cpu_has_vint) {
+ size = 0x200 + VECTORSPACING * 64;
+ } else {
+ size = 0x200;
+ }
+
+ /* Save Linux EBASE */
+ vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+ gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+ if (!gebase) {
+ err = -ENOMEM;
+ goto out_free_cpu;
+ }
+ kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+ ALIGN(size, PAGE_SIZE), gebase);
+
+ /* Save new ebase */
+ vcpu->arch.guest_ebase = gebase;
+
+ /* Copy L1 Guest Exception handler to correct offset */
+
+ /* TLB Refill, EXL = 0 */
+ memcpy(gebase, mips32_exception,
+ mips32_exceptionEnd - mips32_exception);
+
+ /* General Exception Entry point */
+ memcpy(gebase + 0x180, mips32_exception,
+ mips32_exceptionEnd - mips32_exception);
+
+ /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+ for (i = 0; i < 8; i++) {
+ kvm_debug("L1 Vectored handler @ %p\n",
+ gebase + 0x200 + (i * VECTORSPACING));
+ memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+ mips32_exceptionEnd - mips32_exception);
+ }
+
+ /* General handler, relocate to unmapped space for sanity's sake */
+ offset = 0x2000;
+ kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
+ gebase + offset,
+ mips32_GuestExceptionEnd - mips32_GuestException);
+
+ memcpy(gebase + offset, mips32_GuestException,
+ mips32_GuestExceptionEnd - mips32_GuestException);
+
+ /* Invalidate the icache for these ranges */
+ mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+
+ /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+ vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+ if (!vcpu->arch.kseg0_commpage) {
+ err = -ENOMEM;
+ goto out_free_gebase;
+ }
+
+ kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+ kvm_mips_commpage_init(vcpu);
+
+ /* Init */
+ vcpu->arch.last_sched_cpu = -1;
+
+ /* Start off the timer */
+ kvm_mips_emulate_count(vcpu);
+
+ return vcpu;
+
+out_free_gebase:
+ kfree(gebase);
+
+out_free_cpu:
+ kfree(vcpu);
+
+out:
+ return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+ kvm_vcpu_uninit(vcpu);
+
+ kvm_mips_dump_stats(vcpu);
+
+ if (vcpu->arch.guest_ebase)
+ kfree(vcpu->arch.guest_ebase);
+
+ if (vcpu->arch.kseg0_commpage)
+ kfree(vcpu->arch.kseg0_commpage);
+
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_arch_vcpu_free(vcpu);
+}
+
+int
+kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ int r = 0;
+ sigset_t sigsaved;
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ if (vcpu->mmio_needed) {
+ if (!vcpu->mmio_is_write)
+ kvm_mips_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ }
+
+ /* Check if we have any exceptions/interrupts pending */
+ kvm_mips_deliver_interrupts(vcpu,
+ kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+ local_irq_disable();
+ kvm_guest_enter();
+
+ r = __kvm_mips_vcpu_run(run, vcpu);
+
+ kvm_guest_exit();
+ local_irq_enable();
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ return r;
+}
+
+int
+kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+ struct kvm_vcpu *dvcpu = NULL;
+
+ if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+ kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+ (int)intr);
+
+ if (irq->cpu == -1)
+ dvcpu = vcpu;
+ else
+ dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+ if (intr == 2 || intr == 3 || intr == 4) {
+ kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+ } else if (intr == -2 || intr == -3 || intr == -4) {
+ kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+ } else {
+ kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+ irq->cpu, irq->irq);
+ return -EINVAL;
+ }
+
+ dvcpu->arch.wait = 0;
+
+ if (waitqueue_active(&dvcpu->wq)) {
+ wake_up_interruptible(&dvcpu->wq);
+ }
+
+ return 0;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+long
+kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ long r;
+ int intr;
+
+ switch (ioctl) {
+ case KVM_NMI:
+ /* Treat the NMI as a CPU reset */
+ r = kvm_mips_reset_vcpu(vcpu);
+ break;
+ case KVM_INTERRUPT:
+ {
+ struct kvm_mips_interrupt irq;
+ r = -EFAULT;
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ goto out;
+
+ intr = (int)irq.irq;
+
+ kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+ irq.irq);
+
+ r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+out:
+ return r;
+}
+
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ struct kvm_memory_slot *memslot;
+ unsigned long ga, ga_end;
+ int is_dirty = 0;
+ int r;
+ unsigned long n;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ if (r)
+ goto out;
+
+ /* If nothing is dirty, don't bother messing with page tables. */
+ if (is_dirty) {
+ memslot = &kvm->memslots->memslots[log->slot];
+
+ ga = memslot->base_gfn << PAGE_SHIFT;
+ ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+ printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+ ga_end);
+
+ n = kvm_dirty_bitmap_bytes(memslot);
+ memset(memslot->dirty_bitmap, 0, n);
+ }
+
+ r = 0;
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ long r;
+
+ switch (ioctl) {
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+ int ret;
+
+ if (kvm_mips_callbacks) {
+ kvm_err("kvm: module already exists\n");
+ return -EEXIST;
+ }
+
+ ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
+
+ return ret;
+}
+
+void kvm_arch_exit(void)
+{
+ kvm_mips_callbacks = NULL;
+}
+
+int
+kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int
+kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_COALESCED_MMIO:
+ r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ return r;
+
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct mips_coproc *cop0;
+
+ if (!vcpu)
+ return -1;
+
+ printk("VCPU Register Dump:\n");
+ printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
+ printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+ for (i = 0; i < 32; i += 4) {
+ printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+ vcpu->arch.gprs[i],
+ vcpu->arch.gprs[i + 1],
+ vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+ }
+ printk("\thi: 0x%08lx\n", vcpu->arch.hi);
+ printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+ cop0 = vcpu->arch.cop0;
+ printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+ kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+
+ printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < 32; i++)
+ vcpu->arch.gprs[i] = regs->gprs[i];
+
+ vcpu->arch.hi = regs->hi;
+ vcpu->arch.lo = regs->lo;
+ vcpu->arch.pc = regs->pc;
+
+ return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < 32; i++)
+ regs->gprs[i] = vcpu->arch.gprs[i];
+
+ regs->hi = vcpu->arch.hi;
+ regs->lo = vcpu->arch.lo;
+ regs->pc = vcpu->arch.pc;
+
+ return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+}
+
+void kvm_mips_comparecount_func(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+ kvm_mips_callbacks->queue_timer_int(vcpu);
+
+ vcpu->arch.wait = 0;
+ if (waitqueue_active(&vcpu->wq)) {
+ wake_up_interruptible(&vcpu->wq);
+ }
+}
+
+/*
+ * low level hrtimer wake routine.
+ */
+enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+ struct kvm_vcpu *vcpu;
+
+ vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+ kvm_mips_comparecount_func((unsigned long) vcpu);
+ hrtimer_forward_now(&vcpu->arch.comparecount_timer,
+ ktime_set(0, MS_TO_NS(10)));
+ return HRTIMER_RESTART;
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ kvm_mips_callbacks->vcpu_init(vcpu);
+ hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+ kvm_mips_init_shadow_tlb(vcpu);
+ return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ return;
+}
+
+int
+kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+{
+ return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static
+void kvm_mips_set_c0_status(void)
+{
+ uint32_t status = read_c0_status();
+
+ if (cpu_has_fpu)
+ status |= (ST0_CU1);
+
+ if (cpu_has_dsp)
+ status |= (ST0_MX);
+
+ write_c0_status(status);
+ ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ uint32_t cause = vcpu->arch.host_cp0_cause;
+ uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ /* Set a default exit reason */
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+ kvm_mips_set_c0_status();
+
+ local_irq_enable();
+
+ kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+ cause, opc, run, vcpu);
+
+ /* Do a privilege check, if in UM most of these exit conditions end up
+ * causing an exception to be delivered to the Guest Kernel
+ */
+ er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+ if (er == EMULATE_PRIV_FAIL) {
+ goto skip_emul;
+ } else if (er == EMULATE_FAIL) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ goto skip_emul;
+ }
+
+ switch (exccode) {
+ case T_INT:
+ kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+ ++vcpu->stat.int_exits;
+ trace_kvm_exit(vcpu, INT_EXITS);
+
+ if (need_resched()) {
+ cond_resched();
+ }
+
+ ret = RESUME_GUEST;
+ break;
+
+ case T_COP_UNUSABLE:
+ kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+ ++vcpu->stat.cop_unusable_exits;
+ trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+ ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+ /* XXXKYMA: Might need to return to user space */
+ if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+ ret = RESUME_HOST;
+ }
+ break;
+
+ case T_TLB_MOD:
+ ++vcpu->stat.tlbmod_exits;
+ trace_kvm_exit(vcpu, TLBMOD_EXITS);
+ ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+ break;
+
+ case T_TLB_ST_MISS:
+ kvm_debug
+ ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+ badvaddr);
+
+ ++vcpu->stat.tlbmiss_st_exits;
+ trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+ ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+ break;
+
+ case T_TLB_LD_MISS:
+ kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+
+ ++vcpu->stat.tlbmiss_ld_exits;
+ trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+ ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+ break;
+
+ case T_ADDR_ERR_ST:
+ ++vcpu->stat.addrerr_st_exits;
+ trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+ ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+ break;
+
+ case T_ADDR_ERR_LD:
+ ++vcpu->stat.addrerr_ld_exits;
+ trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+ ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+ break;
+
+ case T_SYSCALL:
+ ++vcpu->stat.syscall_exits;
+ trace_kvm_exit(vcpu, SYSCALL_EXITS);
+ ret = kvm_mips_callbacks->handle_syscall(vcpu);
+ break;
+
+ case T_RES_INST:
+ ++vcpu->stat.resvd_inst_exits;
+ trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+ ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+ break;
+
+ case T_BREAK:
+ ++vcpu->stat.break_inst_exits;
+ trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+ ret = kvm_mips_callbacks->handle_break(vcpu);
+ break;
+
+ default:
+ kvm_err
+ ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
+ exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+ kvm_read_c0_guest_status(vcpu->arch.cop0));
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ }
+
+skip_emul:
+ local_irq_disable();
+
+ if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+ kvm_mips_deliver_interrupts(vcpu, cause);
+
+ if (!(ret & RESUME_HOST)) {
+ /* Only check for signals if not already exiting to userspace */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ ret = (-EINTR << 2) | RESUME_HOST;
+ ++vcpu->stat.signal_exits;
+ trace_kvm_exit(vcpu, SIGNAL_EXITS);
+ }
+ }
+
+ return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+ int ret;
+
+ ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+ if (ret)
+ return ret;
+
+ /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
+ * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
+ * to avoid the possibility of double faulting. The issue is that the TLB code
+ * references routines that are part of the the KVM module,
+ * which are only available once the module is loaded.
+ */
+ kvm_mips_gfn_to_pfn = gfn_to_pfn;
+ kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+ kvm_mips_is_error_pfn = is_error_pfn;
+
+ pr_info("KVM/MIPS Initialized\n");
+ return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+ kvm_exit();
+
+ kvm_mips_gfn_to_pfn = NULL;
+ kvm_mips_release_pfn_clean = NULL;
+ kvm_mips_is_error_pfn = NULL;
+
+ pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644
index 00000000000..a4a8c85cc8f
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_comm.h
@@ -0,0 +1,23 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: commpage: mapped into get kernel space
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+ struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644
index 00000000000..3873b1ecc40
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_commpage.c
@@ -0,0 +1,37 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* commpage, currently used for Virtual COP0 registers.
+* Mapped into the guest kernel @ 0x0.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_comm.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+ memset(page, 0, sizeof(struct kvm_mips_commpage));
+
+ /* Specific init values for fields */
+ vcpu->arch.cop0 = &page->cop0;
+ memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
+
+ return;
+}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644
index 00000000000..96528e2d1ea
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_dyntrans.c
@@ -0,0 +1,149 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+
+#include "kvm_mips_comm.h"
+
+#define SYNCI_TEMPLATE 0x041f0000
+#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET ((x) & 0xffff)
+
+#define LW_TEMPLATE 0x8c000000
+#define CLEAR_TEMPLATE 0x00000020
+#define SW_TEMPLATE 0xac000000
+
+int
+kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+ struct kvm_vcpu *vcpu)
+{
+ int result = 0;
+ unsigned long kseg0_opc;
+ uint32_t synci_inst = 0x0;
+
+ /* Replace the CACHE instruction, with a NOP */
+ kseg0_opc =
+ CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+ (vcpu, (unsigned long) opc));
+ memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+ mips32_SyncICache(kseg0_opc, 32);
+
+ return result;
+}
+
+/*
+ * Address based CACHE instructions are transformed into synci(s). A little heavy
+ * for just D-cache invalidates, but avoids an expensive trap
+ */
+int
+kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+ struct kvm_vcpu *vcpu)
+{
+ int result = 0;
+ unsigned long kseg0_opc;
+ uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+ base = (inst >> 21) & 0x1f;
+ offset = inst & 0xffff;
+ synci_inst |= (base << 21);
+ synci_inst |= offset;
+
+ kseg0_opc =
+ CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+ (vcpu, (unsigned long) opc));
+ memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+ mips32_SyncICache(kseg0_opc, 32);
+
+ return result;
+}
+
+int
+kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+ int32_t rt, rd, sel;
+ uint32_t mfc0_inst;
+ unsigned long kseg0_opc, flags;
+
+ rt = (inst >> 16) & 0x1f;
+ rd = (inst >> 11) & 0x1f;
+ sel = inst & 0x7;
+
+ if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+ mfc0_inst = CLEAR_TEMPLATE;
+ mfc0_inst |= ((rt & 0x1f) << 16);
+ } else {
+ mfc0_inst = LW_TEMPLATE;
+ mfc0_inst |= ((rt & 0x1f) << 16);
+ mfc0_inst |=
+ offsetof(struct mips_coproc,
+ reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+ cop0);
+ }
+
+ if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+ kseg0_opc =
+ CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+ (vcpu, (unsigned long) opc));
+ memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+ mips32_SyncICache(kseg0_opc, 32);
+ } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+ mips32_SyncICache((unsigned long) opc, 32);
+ local_irq_restore(flags);
+ } else {
+ kvm_err("%s: Invalid address: %p\n", __func__, opc);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int
+kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+ int32_t rt, rd, sel;
+ uint32_t mtc0_inst = SW_TEMPLATE;
+ unsigned long kseg0_opc, flags;
+
+ rt = (inst >> 16) & 0x1f;
+ rd = (inst >> 11) & 0x1f;
+ sel = inst & 0x7;
+
+ mtc0_inst |= ((rt & 0x1f) << 16);
+ mtc0_inst |=
+ offsetof(struct mips_coproc,
+ reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+ if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+ kseg0_opc =
+ CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+ (vcpu, (unsigned long) opc));
+ memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+ mips32_SyncICache(kseg0_opc, 32);
+ } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+ mips32_SyncICache((unsigned long) opc, 32);
+ local_irq_restore(flags);
+ } else {
+ kvm_err("%s: Invalid address: %p\n", __func__, opc);
+ return -EFAULT;
+ }
+
+ return 0;
+}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644
index 00000000000..2b2bac9a40a
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -0,0 +1,1826 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Instruction/Exception emulation
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+#include "kvm_mips_comm.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+ unsigned long instpc)
+{
+ unsigned int dspcontrol;
+ union mips_instruction insn;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ long epc = instpc;
+ long nextpc = KVM_INVALID_INST;
+
+ if (epc & 3)
+ goto unaligned;
+
+ /*
+ * Read the instruction
+ */
+ insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+ if (insn.word == KVM_INVALID_INST)
+ return KVM_INVALID_INST;
+
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ arch->gprs[insn.r_format.rd] = epc + 8;
+ /* Fall through */
+ case jr_op:
+ nextpc = arch->gprs[insn.r_format.rs];
+ break;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltz_op:
+ case bltzl_op:
+ if ((long)arch->gprs[insn.i_format.rs] < 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgez_op:
+ case bgezl_op:
+ if ((long)arch->gprs[insn.i_format.rs] >= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bltzal_op:
+ case bltzall_op:
+ arch->gprs[31] = epc + 8;
+ if ((long)arch->gprs[insn.i_format.rs] < 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgezal_op:
+ case bgezall_op:
+ arch->gprs[31] = epc + 8;
+ if ((long)arch->gprs[insn.i_format.rs] >= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+ case bposge32_op:
+ if (!cpu_has_dsp)
+ goto sigill;
+
+ dspcontrol = rddsp(0x01);
+
+ if (dspcontrol >= 32) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ } else
+ epc += 8;
+ nextpc = epc;
+ break;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ arch->gprs[31] = instpc + 8;
+ case j_op:
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ epc |= (insn.j_format.target << 2);
+ nextpc = epc;
+ break;
+
+ /*
+ * These are conditional and in i_format.
+ */
+ case beq_op:
+ case beql_op:
+ if (arch->gprs[insn.i_format.rs] ==
+ arch->gprs[insn.i_format.rt])
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bne_op:
+ case bnel_op:
+ if (arch->gprs[insn.i_format.rs] !=
+ arch->gprs[insn.i_format.rt])
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case blez_op: /* not really i_format */
+ case blezl_op:
+ /* rt field assumed to be zero */
+ if ((long)arch->gprs[insn.i_format.rs] <= 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ case bgtz_op:
+ case bgtzl_op:
+ /* rt field assumed to be zero */
+ if ((long)arch->gprs[insn.i_format.rs] > 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ nextpc = epc;
+ break;
+
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+ printk("%s: unsupported cop1_op\n", __func__);
+ break;
+ }
+
+ return nextpc;
+
+unaligned:
+ printk("%s: unaligned epc\n", __func__);
+ return nextpc;
+
+sigill:
+ printk("%s: DSP branch but not DSP ASE\n", __func__);
+ return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+ unsigned long branch_pc;
+ enum emulation_result er = EMULATE_DONE;
+
+ if (cause & CAUSEF_BD) {
+ branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+ if (branch_pc == KVM_INVALID_INST) {
+ er = EMULATE_FAIL;
+ } else {
+ vcpu->arch.pc = branch_pc;
+ kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+ }
+ } else
+ vcpu->arch.pc += 4;
+
+ kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+ return er;
+}
+
+/* Everytime the compare register is written to, we need to decide when to fire
+ * the timer that represents timer ticks to the GUEST.
+ *
+ */
+enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+
+ /* If COUNT is enabled */
+ if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
+ hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+ hrtimer_start(&vcpu->arch.comparecount_timer,
+ ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
+ } else {
+ hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+
+ if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+ kvm_read_c0_guest_epc(cop0));
+ kvm_clear_c0_guest_status(cop0, ST0_EXL);
+ vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+ } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+ kvm_clear_c0_guest_status(cop0, ST0_ERL);
+ vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ } else {
+ printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+ vcpu->arch.pc);
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+
+ kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+ vcpu->arch.pending_exceptions);
+
+ ++vcpu->stat.wait_exits;
+ trace_kvm_exit(vcpu, WAIT_EXITS);
+ if (!vcpu->arch.pending_exceptions) {
+ vcpu->arch.wait = 1;
+ kvm_vcpu_block(vcpu);
+
+ /* We we are runnable, then definitely go off to user space to check if any
+ * I/O interrupts are pending.
+ */
+ if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+ vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+ }
+ }
+
+ return er;
+}
+
+/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
+ * this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_FAIL;
+ uint32_t pc = vcpu->arch.pc;
+
+ printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+ return er;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int index = kvm_read_c0_guest_index(cop0);
+ enum emulation_result er = EMULATE_DONE;
+ struct kvm_mips_tlb *tlb = NULL;
+ uint32_t pc = vcpu->arch.pc;
+
+ if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+ printk("%s: illegal index: %d\n", __func__, index);
+ printk
+ ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0),
+ kvm_read_c0_guest_entrylo1(cop0),
+ kvm_read_c0_guest_pagemask(cop0));
+ index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+ }
+
+ tlb = &vcpu->arch.guest_tlb[index];
+#if 1
+ /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+ tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+ tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+ tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+ kvm_debug
+ ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
+ kvm_read_c0_guest_pagemask(cop0));
+
+ return er;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+ struct kvm_mips_tlb *tlb = NULL;
+ uint32_t pc = vcpu->arch.pc;
+ int index;
+
+#if 1
+ get_random_bytes(&index, sizeof(index));
+ index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+#else
+ index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
+#endif
+
+ if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+ printk("%s: illegal index: %d\n", __func__, index);
+ return EMULATE_FAIL;
+ }
+
+ tlb = &vcpu->arch.guest_tlb[index];
+
+#if 1
+ /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
+ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+#endif
+
+ tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+ tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+ tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+ kvm_debug
+ ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+ pc, index, kvm_read_c0_guest_entryhi(cop0),
+ kvm_read_c0_guest_entrylo0(cop0),
+ kvm_read_c0_guest_entrylo1(cop0));
+
+ return er;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ long entryhi = kvm_read_c0_guest_entryhi(cop0);
+ enum emulation_result er = EMULATE_DONE;
+ uint32_t pc = vcpu->arch.pc;
+ int index = -1;
+
+ index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+ kvm_write_c0_guest_index(cop0, index);
+
+ kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+ index);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ enum emulation_result er = EMULATE_DONE;
+ int32_t rt, rd, copz, sel, co_bit, op;
+ uint32_t pc = vcpu->arch.pc;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL) {
+ return er;
+ }
+
+ copz = (inst >> 21) & 0x1f;
+ rt = (inst >> 16) & 0x1f;
+ rd = (inst >> 11) & 0x1f;
+ sel = inst & 0x7;
+ co_bit = (inst >> 25) & 1;
+
+ /* Verify that the register is valid */
+ if (rd > MIPS_CP0_DESAVE) {
+ printk("Invalid rd: %d\n", rd);
+ er = EMULATE_FAIL;
+ goto done;
+ }
+
+ if (co_bit) {
+ op = (inst) & 0xff;
+
+ switch (op) {
+ case tlbr_op: /* Read indexed TLB entry */
+ er = kvm_mips_emul_tlbr(vcpu);
+ break;
+ case tlbwi_op: /* Write indexed */
+ er = kvm_mips_emul_tlbwi(vcpu);
+ break;
+ case tlbwr_op: /* Write random */
+ er = kvm_mips_emul_tlbwr(vcpu);
+ break;
+ case tlbp_op: /* TLB Probe */
+ er = kvm_mips_emul_tlbp(vcpu);
+ break;
+ case rfe_op:
+ printk("!!!COP0_RFE!!!\n");
+ break;
+ case eret_op:
+ er = kvm_mips_emul_eret(vcpu);
+ goto dont_update_pc;
+ break;
+ case wait_op:
+ er = kvm_mips_emul_wait(vcpu);
+ break;
+ }
+ } else {
+ switch (copz) {
+ case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ /* Get reg */
+ if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+ /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
+ vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
+ } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+ vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+ }
+ else {
+ vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+ }
+
+ kvm_debug
+ ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+ pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+ break;
+
+ case dmfc_op:
+ vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+ break;
+
+ case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[rd][sel]++;
+#endif
+ if ((rd == MIPS_CP0_TLB_INDEX)
+ && (vcpu->arch.gprs[rt] >=
+ KVM_MIPS_GUEST_TLB_SIZE)) {
+ printk("Invalid TLB Index: %ld",
+ vcpu->arch.gprs[rt]);
+ er = EMULATE_FAIL;
+ break;
+ }
+#define C0_EBASE_CORE_MASK 0xff
+ if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+ /* Preserve CORE number */
+ kvm_change_c0_guest_ebase(cop0,
+ ~(C0_EBASE_CORE_MASK),
+ vcpu->arch.gprs[rt]);
+ printk("MTCz, cop0->reg[EBASE]: %#lx\n",
+ kvm_read_c0_guest_ebase(cop0));
+ } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+ uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]);
+ if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
+ &&
+ (ASID_MASK(kvm_read_c0_guest_entryhi(cop0))
+ != nasid)) {
+
+ kvm_debug
+ ("MTCz, change ASID from %#lx to %#lx\n",
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0)),
+ ASID_MASK(vcpu->arch.gprs[rt]));
+
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+ }
+ kvm_write_c0_guest_entryhi(cop0,
+ vcpu->arch.gprs[rt]);
+ }
+ /* Are we writing to COUNT */
+ else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+ /* Linux doesn't seem to write into COUNT, we throw an error
+ * if we notice a write to COUNT
+ */
+ /*er = EMULATE_FAIL; */
+ goto done;
+ } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+ kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+ pc, kvm_read_c0_guest_compare(cop0),
+ vcpu->arch.gprs[rt]);
+
+ /* If we are writing to COMPARE */
+ /* Clear pending timer interrupt, if any */
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ kvm_write_c0_guest_compare(cop0,
+ vcpu->arch.gprs[rt]);
+ } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+ kvm_write_c0_guest_status(cop0,
+ vcpu->arch.gprs[rt]);
+ /* Make sure that CU1 and NMI bits are never set */
+ kvm_clear_c0_guest_status(cop0,
+ (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+ } else {
+ cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+ }
+
+ kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+ rd, sel, cop0->reg[rd][sel]);
+ break;
+
+ case dmtc_op:
+ printk
+ ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+ vcpu->arch.pc, rt, rd, sel);
+ er = EMULATE_FAIL;
+ break;
+
+ case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+ cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+ if (rt != 0) {
+ vcpu->arch.gprs[rt] =
+ kvm_read_c0_guest_status(cop0);
+ }
+ /* EI */
+ if (inst & 0x20) {
+ kvm_debug("[%#lx] mfmcz_op: EI\n",
+ vcpu->arch.pc);
+ kvm_set_c0_guest_status(cop0, ST0_IE);
+ } else {
+ kvm_debug("[%#lx] mfmcz_op: DI\n",
+ vcpu->arch.pc);
+ kvm_clear_c0_guest_status(cop0, ST0_IE);
+ }
+
+ break;
+
+ case wrpgpr_op:
+ {
+ uint32_t css =
+ cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+ uint32_t pss =
+ (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+ /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
+ if (css || pss) {
+ er = EMULATE_FAIL;
+ break;
+ }
+ kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+ vcpu->arch.gprs[rt]);
+ vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+ }
+ break;
+ default:
+ printk
+ ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+ vcpu->arch.pc, copz);
+ er = EMULATE_FAIL;
+ break;
+ }
+ }
+
+done:
+ /*
+ * Rollback PC only if emulation was unsuccessful
+ */
+ if (er == EMULATE_FAIL) {
+ vcpu->arch.pc = curr_pc;
+ }
+
+dont_update_pc:
+ /*
+ * This is for special instructions whose emulation
+ * updates the PC, so do not overwrite the PC under
+ * any circumstances
+ */
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DO_MMIO;
+ int32_t op, base, rt, offset;
+ uint32_t bytes;
+ void *data = run->mmio.data;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ rt = (inst >> 16) & 0x1f;
+ base = (inst >> 21) & 0x1f;
+ offset = inst & 0xffff;
+ op = (inst >> 26) & 0x3f;
+
+ switch (op) {
+ case sb_op:
+ bytes = 1;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+ *(u8 *) data = vcpu->arch.gprs[rt];
+ kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+ *(uint8_t *) data);
+
+ break;
+
+ case sw_op:
+ bytes = 4;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+ *(uint32_t *) data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(uint32_t *) data);
+ break;
+
+ case sh_op:
+ bytes = 2;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+ *(uint16_t *) data = vcpu->arch.gprs[rt];
+
+ kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+ vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+ vcpu->arch.gprs[rt], *(uint32_t *) data);
+ break;
+
+ default:
+ printk("Store not yet supported");
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ /*
+ * Rollback PC if emulation was unsuccessful
+ */
+ if (er == EMULATE_FAIL) {
+ vcpu->arch.pc = curr_pc;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DO_MMIO;
+ int32_t op, base, rt, offset;
+ uint32_t bytes;
+
+ rt = (inst >> 16) & 0x1f;
+ base = (inst >> 21) & 0x1f;
+ offset = inst & 0xffff;
+ op = (inst >> 26) & 0x3f;
+
+ vcpu->arch.pending_load_cause = cause;
+ vcpu->arch.io_gpr = rt;
+
+ switch (op) {
+ case lw_op:
+ bytes = 4;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ er = EMULATE_FAIL;
+ break;
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 0;
+ break;
+
+ case lh_op:
+ case lhu_op:
+ bytes = 2;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ er = EMULATE_FAIL;
+ break;
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 0;
+
+ if (op == lh_op)
+ vcpu->mmio_needed = 2;
+ else
+ vcpu->mmio_needed = 1;
+
+ break;
+
+ case lbu_op:
+ case lb_op:
+ bytes = 1;
+ if (bytes > sizeof(run->mmio.data)) {
+ kvm_err("%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ er = EMULATE_FAIL;
+ break;
+ }
+ run->mmio.phys_addr =
+ kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+ host_cp0_badvaddr);
+ if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+ vcpu->mmio_is_write = 0;
+
+ if (op == lb_op)
+ vcpu->mmio_needed = 2;
+ else
+ vcpu->mmio_needed = 1;
+
+ break;
+
+ default:
+ printk("Load not yet supported");
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+ unsigned long offset = (va & ~PAGE_MASK);
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long pa;
+ gfn_t gfn;
+ pfn_t pfn;
+
+ gfn = va >> PAGE_SHIFT;
+
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ return -1;
+ }
+ pfn = kvm->arch.guest_pmap[gfn];
+ pa = (pfn << PAGE_SHIFT) | offset;
+
+ printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
+
+ mips32_SyncICache(CKSEG0ADDR(pa), 32);
+ return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV 0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
+#define MIPS_CACHE_OP_IMP 0x3
+#define MIPS_CACHE_OP_HIT_INV 0x4
+#define MIPS_CACHE_OP_FILL_WB_INV 0x5
+#define MIPS_CACHE_OP_HIT_HB 0x6
+#define MIPS_CACHE_OP_FETCH_LOCK 0x7
+
+#define MIPS_CACHE_ICACHE 0x0
+#define MIPS_CACHE_DCACHE 0x1
+#define MIPS_CACHE_SEC 0x3
+
+enum emulation_result
+kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ extern void (*r4k_blast_dcache) (void);
+ extern void (*r4k_blast_icache) (void);
+ enum emulation_result er = EMULATE_DONE;
+ int32_t offset, cache, op_inst, op, base;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ unsigned long va;
+ unsigned long curr_pc;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ base = (inst >> 21) & 0x1f;
+ op_inst = (inst >> 16) & 0x1f;
+ offset = inst & 0xffff;
+ cache = (inst >> 16) & 0x3;
+ op = (inst >> 18) & 0x7;
+
+ va = arch->gprs[base] + offset;
+
+ kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+
+ /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
+ * the caches entirely by stepping through all the ways/indexes
+ */
+ if (op == MIPS_CACHE_OP_INDEX_INV) {
+ kvm_debug
+ ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+ arch->gprs[base], offset);
+
+ if (cache == MIPS_CACHE_DCACHE)
+ r4k_blast_dcache();
+ else if (cache == MIPS_CACHE_ICACHE)
+ r4k_blast_icache();
+ else {
+ printk("%s: unsupported CACHE INDEX operation\n",
+ __func__);
+ return EMULATE_FAIL;
+ }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+ goto done;
+ }
+
+ preempt_disable();
+ if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+
+ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
+ kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+ }
+ } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+ KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+ int index;
+
+ /* If an entry already exists then skip */
+ if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
+ goto skip_fault;
+ }
+
+ /* If address not in the guest TLB, then give the guest a fault, the
+ * resulting handler will do the right thing
+ */
+ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+
+ if (index < 0) {
+ vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+ vcpu->arch.host_cp0_badvaddr = va;
+ er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+ vcpu);
+ preempt_enable();
+ goto dont_update_pc;
+ } else {
+ struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+ /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+ if (!TLB_IS_VALID(*tlb, va)) {
+ er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+ run, vcpu);
+ preempt_enable();
+ goto dont_update_pc;
+ } else {
+ /* We fault an entry from the guest tlb to the shadow host TLB */
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+ NULL,
+ NULL);
+ }
+ }
+ } else {
+ printk
+ ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto dont_update_pc;
+
+ }
+
+skip_fault:
+ /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+ if (cache == MIPS_CACHE_DCACHE
+ && (op == MIPS_CACHE_OP_FILL_WB_INV
+ || op == MIPS_CACHE_OP_HIT_INV)) {
+ flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
+ kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+ } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+ flush_dcache_line(va);
+ flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+ /* Replace the CACHE instruction, with a SYNCI */
+ kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+ } else {
+ printk
+ ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+ cache, op, base, arch->gprs[base], offset);
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto dont_update_pc;
+ }
+
+ preempt_enable();
+
+ dont_update_pc:
+ /*
+ * Rollback PC
+ */
+ vcpu->arch.pc = curr_pc;
+ done:
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ uint32_t inst;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD) {
+ opc += 1;
+ }
+
+ inst = kvm_get_inst(opc, vcpu);
+
+ switch (((union mips_instruction)inst).r_format.opcode) {
+ case cop0_op:
+ er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+ break;
+ case sb_op:
+ case sh_op:
+ case sw_op:
+ er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+ break;
+ case lb_op:
+ case lbu_op:
+ case lhu_op:
+ case lh_op:
+ case lw_op:
+ er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+ break;
+
+ case cache_op:
+ ++vcpu->stat.cache_exits;
+ trace_kvm_exit(vcpu, CACHE_EXITS);
+ er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+ break;
+
+ default:
+ printk("Instruction emulation not supported (%p/%#x)\n", opc,
+ inst);
+ kvm_arch_vcpu_dump_regs(vcpu);
+ er = EMULATE_FAIL;
+ break;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_SYSCALL << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ printk("Trying to deliver SYSCALL when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ /* set pc to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+ } else {
+ kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long entryhi =
+ (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+ arch->pc);
+
+ /* set pc to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x0;
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+ arch->pc);
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result
+kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+
+#ifdef DEBUG
+ /*
+ * If address not in the guest TLB, then we are in trouble
+ */
+ index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+ if (index < 0) {
+ /* XXXKYMA Invalidate and retry */
+ kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+ kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+ __func__, entryhi);
+ kvm_mips_dump_guest_tlbs(vcpu);
+ kvm_mips_dump_host_tlbs();
+ return EMULATE_FAIL;
+ }
+#endif
+
+ er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0));
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+ arch->pc);
+
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ } else {
+ kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+ arch->pc);
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ }
+
+ kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+ /* setup badvaddr, context and entryhi registers for the guest */
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+ /* XXXKYMA: is the context register used by linux??? */
+ kvm_write_c0_guest_entryhi(cop0, entryhi);
+ /* Blow away the shadow host TLBs */
+ kvm_mips_flush_host_tlb(1);
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ }
+
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+ kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_RES_INST << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ kvm_err("Trying to deliver RI when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (T_BREAK << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ } else {
+ printk("Trying to deliver BP when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE 0x03e00000
+#define RT 0x001f0000
+#define OFFSET 0x0000ffff
+#define LL 0xc0000000
+#define SC 0xe0000000
+#define SPEC0 0x00000000
+#define SPEC3 0x7c000000
+#define RD 0x0000f800
+#define FUNC 0x0000003f
+#define SYNC 0x0000000f
+#define RDHWR 0x0000003b
+
+enum emulation_result
+kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long curr_pc;
+ uint32_t inst;
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ /*
+ * Fetch the instruction.
+ */
+ if (cause & CAUSEF_BD)
+ opc += 1;
+
+ inst = kvm_get_inst(opc, vcpu);
+
+ if (inst == KVM_INVALID_INST) {
+ printk("%s: Cannot get inst @ %p\n", __func__, opc);
+ return EMULATE_FAIL;
+ }
+
+ if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+ int rd = (inst & RD) >> 11;
+ int rt = (inst & RT) >> 16;
+ switch (rd) {
+ case 0: /* CPU number */
+ arch->gprs[rt] = 0;
+ break;
+ case 1: /* SYNCI length */
+ arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+ current_cpu_data.icache.linesz);
+ break;
+ case 2: /* Read count register */
+ printk("RDHWR: Cont register\n");
+ arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
+ break;
+ case 3: /* Count register resolution */
+ switch (current_cpu_data.cputype) {
+ case CPU_20KC:
+ case CPU_25KF:
+ arch->gprs[rt] = 1;
+ break;
+ default:
+ arch->gprs[rt] = 2;
+ }
+ break;
+ case 29:
+#if 1
+ arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+#else
+ /* UserLocal not implemented */
+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+#endif
+ break;
+
+ default:
+ printk("RDHWR not supported\n");
+ er = EMULATE_FAIL;
+ break;
+ }
+ } else {
+ printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
+ er = EMULATE_FAIL;
+ }
+
+ /*
+ * Rollback PC only if emulation was unsuccessful
+ */
+ if (er == EMULATE_FAIL) {
+ vcpu->arch.pc = curr_pc;
+ }
+ return er;
+}
+
+enum emulation_result
+kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+ enum emulation_result er = EMULATE_DONE;
+ unsigned long curr_pc;
+
+ if (run->mmio.len > sizeof(*gpr)) {
+ printk("Bad MMIO length: %d", run->mmio.len);
+ er = EMULATE_FAIL;
+ goto done;
+ }
+
+ /*
+ * Update PC and hold onto current PC in case there is
+ * an error and we want to rollback the PC
+ */
+ curr_pc = vcpu->arch.pc;
+ er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+ if (er == EMULATE_FAIL)
+ return er;
+
+ switch (run->mmio.len) {
+ case 4:
+ *gpr = *(int32_t *) run->mmio.data;
+ break;
+
+ case 2:
+ if (vcpu->mmio_needed == 2)
+ *gpr = *(int16_t *) run->mmio.data;
+ else
+ *gpr = *(int16_t *) run->mmio.data;
+
+ break;
+ case 1:
+ if (vcpu->mmio_needed == 2)
+ *gpr = *(int8_t *) run->mmio.data;
+ else
+ *gpr = *(u8 *) run->mmio.data;
+ break;
+ }
+
+ if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+ kvm_debug
+ ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+ vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+ vcpu->mmio_needed);
+
+done:
+ return er;
+}
+
+static enum emulation_result
+kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ enum emulation_result er = EMULATE_DONE;
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (exccode << CAUSEB_EXCCODE));
+
+ /* Set PC to the exception entry point */
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+ kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+ kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+ exccode, kvm_read_c0_guest_epc(cop0),
+ kvm_read_c0_guest_badvaddr(cop0));
+ } else {
+ printk("Trying to deliver EXC when EXL is already set\n");
+ er = EMULATE_FAIL;
+ }
+
+ return er;
+}
+
+enum emulation_result
+kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+ int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+ if (usermode) {
+ switch (exccode) {
+ case T_INT:
+ case T_SYSCALL:
+ case T_BREAK:
+ case T_RES_INST:
+ break;
+
+ case T_COP_UNUSABLE:
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+ er = EMULATE_PRIV_FAIL;
+ break;
+
+ case T_TLB_MOD:
+ break;
+
+ case T_TLB_LD_MISS:
+ /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+ printk("%s: LD MISS @ %#lx\n", __func__,
+ badvaddr);
+ cause &= ~0xff;
+ cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+ er = EMULATE_PRIV_FAIL;
+ }
+ break;
+
+ case T_TLB_ST_MISS:
+ /* We we are accessing Guest kernel space, then send an address error exception to the guest */
+ if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+ printk("%s: ST MISS @ %#lx\n", __func__,
+ badvaddr);
+ cause &= ~0xff;
+ cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+ er = EMULATE_PRIV_FAIL;
+ }
+ break;
+
+ case T_ADDR_ERR_ST:
+ printk("%s: address error ST @ %#lx\n", __func__,
+ badvaddr);
+ if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+ cause &= ~0xff;
+ cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+ }
+ er = EMULATE_PRIV_FAIL;
+ break;
+ case T_ADDR_ERR_LD:
+ printk("%s: address error LD @ %#lx\n", __func__,
+ badvaddr);
+ if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+ cause &= ~0xff;
+ cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+ }
+ er = EMULATE_PRIV_FAIL;
+ break;
+ default:
+ er = EMULATE_PRIV_FAIL;
+ break;
+ }
+ }
+
+ if (er == EMULATE_PRIV_FAIL) {
+ kvm_mips_emulate_exc(cause, opc, run, vcpu);
+ }
+ return er;
+}
+
+/* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ * case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ * case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result
+kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
+ struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er = EMULATE_DONE;
+ uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ unsigned long va = vcpu->arch.host_cp0_badvaddr;
+ int index;
+
+ kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+ vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+ /* KVM would not have got the exception if this entry was valid in the shadow host TLB
+ * Check the Guest TLB, if the entry is not there then send the guest an
+ * exception. The guest exc handler should then inject an entry into the
+ * guest TLB
+ */
+ index = kvm_mips_guest_tlb_lookup(vcpu,
+ (va & VPN2_MASK) |
+ ASID_MASK(kvm_read_c0_guest_entryhi
+ (vcpu->arch.cop0)));
+ if (index < 0) {
+ if (exccode == T_TLB_LD_MISS) {
+ er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+ } else if (exccode == T_TLB_ST_MISS) {
+ er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+ } else {
+ printk("%s: invalid exc code: %d\n", __func__, exccode);
+ er = EMULATE_FAIL;
+ }
+ } else {
+ struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+ /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
+ if (!TLB_IS_VALID(*tlb, va)) {
+ if (exccode == T_TLB_LD_MISS) {
+ er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+ vcpu);
+ } else if (exccode == T_TLB_ST_MISS) {
+ er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+ vcpu);
+ } else {
+ printk("%s: invalid exc code: %d\n", __func__,
+ exccode);
+ er = EMULATE_FAIL;
+ }
+ } else {
+#ifdef DEBUG
+ kvm_debug
+ ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+ tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+ /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+ NULL);
+ }
+ }
+
+ return er;
+}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644
index 00000000000..1e5de16afe2
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.c
@@ -0,0 +1,243 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupt delivery
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_int.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ /* Cause bits to reflect the pending timer interrupt,
+ * the EXC code will be set when we are actually
+ * delivering the interrupt:
+ */
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+ /* Queue up an INT exception for the core */
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void
+kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+
+ /* Cause bits to reflect the pending IO interrupt,
+ * the EXC code will be set when we are actually
+ * delivering the interrupt:
+ */
+ switch (intr) {
+ case 2:
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+ /* Queue up an INT exception for the core */
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+ break;
+
+ case 3:
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+ break;
+
+ case 4:
+ kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+ kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+void
+kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq)
+{
+ int intr = (int)irq->irq;
+ switch (intr) {
+ case -2:
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+ break;
+
+ case -3:
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+ break;
+
+ case -4:
+ kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+ kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+ break;
+
+ default:
+ break;
+ }
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int
+kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause)
+{
+ int allowed = 0;
+ uint32_t exccode;
+
+ struct kvm_vcpu_arch *arch = &vcpu->arch;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ switch (priority) {
+ case MIPS_EXC_INT_TIMER:
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+ allowed = 1;
+ exccode = T_INT;
+ }
+ break;
+
+ case MIPS_EXC_INT_IO:
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+ allowed = 1;
+ exccode = T_INT;
+ }
+ break;
+
+ case MIPS_EXC_INT_IPI_1:
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+ allowed = 1;
+ exccode = T_INT;
+ }
+ break;
+
+ case MIPS_EXC_INT_IPI_2:
+ if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+ && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+ && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+ allowed = 1;
+ exccode = T_INT;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ /* Are we allowed to deliver the interrupt ??? */
+ if (allowed) {
+
+ if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+ /* save old pc */
+ kvm_write_c0_guest_epc(cop0, arch->pc);
+ kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+ if (cause & CAUSEF_BD)
+ kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+ else
+ kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+ kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+ } else
+ kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+ kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+ (exccode << CAUSEB_EXCCODE));
+
+ /* XXXSL Set PC to the interrupt exception entry point */
+ if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+ arch->pc = KVM_GUEST_KSEG0 + 0x200;
+ else
+ arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
+ }
+
+ return allowed;
+}
+
+int
+kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause)
+{
+ return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+ unsigned long *pending = &vcpu->arch.pending_exceptions;
+ unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+ unsigned int priority;
+
+ if (!(*pending) && !(*pending_clr))
+ return;
+
+ priority = __ffs(*pending_clr);
+ while (priority <= MIPS_EXC_MAX) {
+ if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+ if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+ break;
+ }
+
+ priority = find_next_bit(pending_clr,
+ BITS_PER_BYTE * sizeof(*pending_clr),
+ priority + 1);
+ }
+
+ priority = __ffs(*pending);
+ while (priority <= MIPS_EXC_MAX) {
+ if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+ if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+ break;
+ }
+
+ priority = find_next_bit(pending,
+ BITS_PER_BYTE * sizeof(*pending),
+ priority + 1);
+ }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+ return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644
index 00000000000..20da7d29eed
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.h
@@ -0,0 +1,49 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Interrupts
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET 0
+#define MIPS_EXC_SRESET 1
+#define MIPS_EXC_DEBUG_ST 2
+#define MIPS_EXC_DEBUG 3
+#define MIPS_EXC_DDB 4
+#define MIPS_EXC_NMI 5
+#define MIPS_EXC_MCHK 6
+#define MIPS_EXC_INT_TIMER 7
+#define MIPS_EXC_INT_IO 8
+#define MIPS_EXC_EXECUTE 9
+#define MIPS_EXC_INT_IPI_1 10
+#define MIPS_EXC_INT_IPI_2 11
+#define MIPS_EXC_MAX 12
+/* XXXSL More to follow */
+
+#define C_TI (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+ uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644
index 00000000000..86d3b4cc348
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_opcode.h
@@ -0,0 +1,24 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+/*
+ * Define opcode values not defined in <asm/isnt.h>
+ */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define mfmcz_op 0x0b /* 01011 */
+#define wrpgpr_op 0x0e /* 01110 */
+
+/* COP0 opcodes (only if COP0 and CO=1): */
+#define wait_op 0x20 /* 100000 */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644
index 00000000000..075904bcac1
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -0,0 +1,82 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: COP0 access histogram
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+ "WAIT",
+ "CACHE",
+ "Signal",
+ "Interrupt",
+ "COP0/1 Unusable",
+ "TLB Mod",
+ "TLB Miss (LD)",
+ "TLB Miss (ST)",
+ "Address Err (ST)",
+ "Address Error (LD)",
+ "System Call",
+ "Reserved Inst",
+ "Break Inst",
+ "D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+ "Index",
+ "Random",
+ "EntryLo0",
+ "EntryLo1",
+ "Context",
+ "PG Mask",
+ "Wired",
+ "HWREna",
+ "BadVAddr",
+ "Count",
+ "EntryHI",
+ "Compare",
+ "Status",
+ "Cause",
+ "EXC PC",
+ "PRID",
+ "Config",
+ "LLAddr",
+ "Watch Lo",
+ "Watch Hi",
+ "X Context",
+ "Reserved",
+ "Impl Dep",
+ "Debug",
+ "DEPC",
+ "PerfCnt",
+ "ErrCtl",
+ "CacheErr",
+ "TagLo",
+ "TagHi",
+ "ErrorEPC",
+ "DESAVE"
+};
+
+int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+ int i, j;
+
+ printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+ for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+ for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+ if (vcpu->arch.cop0->stat[i][j])
+ printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+ vcpu->arch.cop0->stat[i][j]);
+ }
+ }
+#endif
+
+ return 0;
+}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644
index 00000000000..89511a9258d
--- /dev/null
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -0,0 +1,928 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+* TLB handlers run from KSEG0
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB 0
+#define KVM_GUEST_SP_TLB 1
+
+#define PRIx64 "llx"
+
+/* Use VZ EntryHi.EHINV to invalidate TLB entries */
+#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+ return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]);
+}
+
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+ return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]);
+}
+
+inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->arch.commpage_tlb;
+}
+
+
+/*
+ * Structure defining an tlb entry data set.
+ */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+ unsigned long old_entryhi;
+ unsigned long old_pagemask;
+ struct kvm_mips_tlb tlb;
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+
+ printk("HOST TLBs:\n");
+ printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi()));
+
+ for (i = 0; i < current_cpu_data.tlbsize; i++) {
+ write_c0_index(i);
+ mtc0_tlbw_hazard();
+
+ tlb_read();
+ tlbw_use_hazard();
+
+ tlb.tlb_hi = read_c0_entryhi();
+ tlb.tlb_lo0 = read_c0_entrylo0();
+ tlb.tlb_lo1 = read_c0_entrylo1();
+ tlb.tlb_mask = read_c0_pagemask();
+
+ printk("TLB%c%3d Hi 0x%08lx ",
+ (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ i, tlb.tlb_hi);
+ printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+ (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo0 >> 3) & 7);
+ printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+ (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ }
+ write_c0_entryhi(old_entryhi);
+ write_c0_pagemask(old_pagemask);
+ mtc0_tlbw_hazard();
+ local_irq_restore(flags);
+}
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct kvm_mips_tlb tlb;
+ int i;
+
+ printk("Guest TLBs:\n");
+ printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+ for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+ tlb = vcpu->arch.guest_tlb[i];
+ printk("TLB%c%3d Hi 0x%08lx ",
+ (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ i, tlb.tlb_hi);
+ printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+ (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo0 >> 3) & 7);
+ printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+ (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ }
+}
+
+void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
+{
+ int i;
+ volatile struct kvm_mips_tlb tlb;
+
+ printk("Shadow TLBs:\n");
+ for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+ tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
+ printk("TLB%c%3d Hi 0x%08lx ",
+ (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ i, tlb.tlb_hi);
+ printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+ (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo0 >> 3) & 7);
+ printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+ (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+ (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+ (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+ (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ }
+}
+
+static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+ pfn_t pfn;
+
+ if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+ return;
+
+ pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+ if (kvm_mips_is_error_pfn(pfn)) {
+ panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+ }
+
+ kvm->arch.guest_pmap[gfn] = pfn;
+ return;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+ unsigned long gva)
+{
+ gfn_t gfn;
+ uint32_t offset = gva & ~PAGE_MASK;
+ struct kvm *kvm = vcpu->kvm;
+
+ if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+ kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+ __builtin_return_address(0), gva);
+ return KVM_INVALID_PAGE;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+ gva);
+ return KVM_INVALID_PAGE;
+ }
+ kvm_mips_map_page(vcpu->kvm, gfn);
+ return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int
+kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+ unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
+{
+ unsigned long flags;
+ unsigned long old_entryhi;
+ volatile int idx;
+
+ local_irq_save(flags);
+
+
+ old_entryhi = read_c0_entryhi();
+ write_c0_entryhi(entryhi);
+ mtc0_tlbw_hazard();
+
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+
+ if (idx > current_cpu_data.tlbsize) {
+ kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+
+ if (idx < 0) {
+ idx = read_c0_random() % current_cpu_data.tlbsize;
+ write_c0_index(idx);
+ mtc0_tlbw_hazard();
+ }
+ write_c0_entrylo0(entrylo0);
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+#ifdef DEBUG
+ if (debug) {
+ kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
+ "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+ vcpu->arch.pc, idx, read_c0_entryhi(),
+ read_c0_entrylo0(), read_c0_entrylo1());
+ }
+#endif
+
+ /* Flush D-cache */
+ if (flush_dcache_mask) {
+ if (entrylo0 & MIPS3_PG_V) {
+ ++vcpu->stat.flush_dcache_exits;
+ flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
+ }
+ if (entrylo1 & MIPS3_PG_V) {
+ ++vcpu->stat.flush_dcache_exits;
+ flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
+ (0x1 << PAGE_SHIFT));
+ }
+ }
+
+ /* Restore old ASID */
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+ local_irq_restore(flags);
+ return 0;
+}
+
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
+{
+ gfn_t gfn;
+ pfn_t pfn0, pfn1;
+ unsigned long vaddr = 0;
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ int even;
+ struct kvm *kvm = vcpu->kvm;
+ const int flush_dcache_mask = 0;
+
+
+ if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+ kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ gfn, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+ even = !(gfn & 0x1);
+ vaddr = badvaddr & (PAGE_MASK << 1);
+
+ kvm_mips_map_page(vcpu->kvm, gfn);
+ kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
+
+ if (even) {
+ pfn0 = kvm->arch.guest_pmap[gfn];
+ pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+ } else {
+ pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+ pfn1 = kvm->arch.guest_pmap[gfn];
+ }
+
+ entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+ (0x1 << 1);
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+ (0x1 << 1);
+
+ return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ flush_dcache_mask);
+}
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
+{
+ pfn_t pfn0, pfn1;
+ unsigned long flags, old_entryhi = 0, vaddr = 0;
+ unsigned long entrylo0 = 0, entrylo1 = 0;
+
+
+ pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+ pfn1 = 0;
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
+ (0x1 << 1);
+ entrylo1 = 0;
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+ vaddr = badvaddr & (PAGE_MASK << 1);
+ write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+ mtc0_tlbw_hazard();
+ write_c0_entrylo0(entrylo0);
+ mtc0_tlbw_hazard();
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+ write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+#ifdef DEBUG
+ kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+ vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+ read_c0_entrylo0(), read_c0_entrylo1());
+#endif
+
+ /* Restore old ASID */
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+int
+kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
+{
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ pfn_t pfn0, pfn1;
+
+
+ if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+ pfn0 = 0;
+ pfn1 = 0;
+ } else {
+ kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
+ kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
+
+ pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
+ pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
+ }
+
+ if (hpa0)
+ *hpa0 = pfn0 << PAGE_SHIFT;
+
+ if (hpa1)
+ *hpa1 = pfn1 << PAGE_SHIFT;
+
+ /* Get attributes from the Guest TLB */
+ entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+ kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+ (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+ (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+#ifdef DEBUG
+ kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ tlb->tlb_lo0, tlb->tlb_lo1);
+#endif
+
+ return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ tlb->tlb_mask);
+}
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+ int i;
+ int index = -1;
+ struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+
+ for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+ if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
+ (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) {
+ index = i;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+ __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+#endif
+
+ return index;
+}
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+ unsigned long old_entryhi, flags;
+ volatile int idx;
+
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
+ else {
+ write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+ }
+
+ mtc0_tlbw_hazard();
+
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+
+ /* Restore old ASID */
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+
+#ifdef DEBUG
+ kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+#endif
+
+ return idx;
+}
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+ int idx;
+ unsigned long flags, old_entryhi;
+
+ local_irq_save(flags);
+
+
+ old_entryhi = read_c0_entryhi();
+
+ write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+ mtc0_tlbw_hazard();
+
+ tlb_probe();
+ tlb_probe_hazard();
+ idx = read_c0_index();
+
+ if (idx >= current_cpu_data.tlbsize)
+ BUG();
+
+ if (idx > 0) {
+ write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+ mtc0_tlbw_hazard();
+
+ write_c0_entrylo0(0);
+ mtc0_tlbw_hazard();
+
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ mtc0_tlbw_hazard();
+ }
+
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+
+#ifdef DEBUG
+ if (idx > 0) {
+ kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+ (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
+ }
+#endif
+
+ return 0;
+}
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+ unsigned long flags, old_entryhi;
+
+ if (index >= current_cpu_data.tlbsize)
+ BUG();
+
+ local_irq_save(flags);
+
+
+ old_entryhi = read_c0_entryhi();
+
+ write_c0_entryhi(UNIQUE_ENTRYHI(index));
+ mtc0_tlbw_hazard();
+
+ write_c0_index(index);
+ mtc0_tlbw_hazard();
+
+ write_c0_entrylo0(0);
+ mtc0_tlbw_hazard();
+
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_entryhi);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+ unsigned long flags;
+ unsigned long old_entryhi, entryhi;
+ unsigned long old_pagemask;
+ int entry = 0;
+ int maxentry = current_cpu_data.tlbsize;
+
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+
+ /* Blast 'em all away. */
+ for (entry = 0; entry < maxentry; entry++) {
+
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+
+ if (skip_kseg0) {
+ tlb_read();
+ tlbw_use_hazard();
+
+ entryhi = read_c0_entryhi();
+
+ /* Don't blow away guest kernel entries */
+ if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
+ continue;
+ }
+ }
+
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ mtc0_tlbw_hazard();
+ write_c0_entrylo0(0);
+ mtc0_tlbw_hazard();
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ mtc0_tlbw_hazard();
+ }
+
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_entryhi);
+ write_c0_pagemask(old_pagemask);
+ mtc0_tlbw_hazard();
+ tlbw_use_hazard();
+
+ local_irq_restore(flags);
+}
+
+void
+kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned long asid = asid_cache(cpu);
+
+ if (!(ASID_MASK(ASID_INC(asid)))) {
+ if (cpu_has_vtag_icache) {
+ flush_icache_all();
+ }
+
+ kvm_local_flush_tlb_all(); /* start new asid cycle */
+
+ if (!asid) /* fix version if needed */
+ asid = ASID_FIRST_VERSION;
+ }
+
+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ unsigned long old_entryhi;
+ unsigned long old_pagemask;
+ int entry = 0;
+ int cpu = smp_processor_id();
+
+ local_irq_save(flags);
+
+ old_entryhi = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+
+ for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_read();
+ tlbw_use_hazard();
+
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
+ }
+
+ write_c0_entryhi(old_entryhi);
+ write_c0_pagemask(old_pagemask);
+ mtc0_tlbw_hazard();
+
+ local_irq_restore(flags);
+
+}
+
+void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry;
+ int cpu = smp_processor_id();
+
+ local_irq_save(flags);
+
+ old_ctx = read_c0_entryhi();
+
+ for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+ write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
+ mtc0_tlbw_hazard();
+ write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
+ write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ }
+
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
+ local_irq_restore(flags);
+}
+
+
+void kvm_local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned long old_ctx;
+ int entry = 0;
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+
+ /* Blast 'em all away. */
+ while (entry < current_cpu_data.tlbsize) {
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ entry++;
+ }
+ tlbw_use_hazard();
+ write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
+
+ local_irq_restore(flags);
+}
+
+void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
+{
+ int cpu, entry;
+
+ for_each_possible_cpu(cpu) {
+ for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
+ UNIQUE_ENTRYHI(entry);
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
+ read_c0_pagemask();
+#ifdef DEBUG
+ kvm_debug
+ ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
+ cpu, entry,
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
+ vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
+#endif
+ }
+ }
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ unsigned long flags;
+ int newasid = 0;
+
+#ifdef DEBUG
+ kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+#endif
+
+ /* Alocate new kernel and user ASIDs if needed */
+
+ local_irq_save(flags);
+
+ if (((vcpu->arch.
+ guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+ kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+ vcpu->arch.guest_kernel_asid[cpu] =
+ vcpu->arch.guest_kernel_mm.context.asid[cpu];
+ kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+ vcpu->arch.guest_user_asid[cpu] =
+ vcpu->arch.guest_user_mm.context.asid[cpu];
+ newasid++;
+
+ kvm_info("[%d]: cpu_context: %#lx\n", cpu,
+ cpu_context(cpu, current->mm));
+ kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+ cpu, vcpu->arch.guest_kernel_asid[cpu]);
+ kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+ vcpu->arch.guest_user_asid[cpu]);
+ }
+
+ if (vcpu->arch.last_sched_cpu != cpu) {
+ kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
+ vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+ }
+
+ /* Only reload shadow host TLB if new ASIDs haven't been allocated */
+#if 0
+ if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
+ kvm_mips_flush_host_tlb(0);
+ kvm_shadow_tlb_load(vcpu);
+ }
+#endif
+
+ if (!newasid) {
+ /* If we preempted while the guest was executing, then reload the pre-empted ASID */
+ if (current->flags & PF_VCPU) {
+ write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi));
+ ehb();
+ }
+ } else {
+ /* New ASIDs were allocated for the VM */
+
+ /* Were we in guest context? If so then the pre-empted ASID is no longer
+ * valid, we need to set it to what it should be based on the mode of
+ * the Guest (Kernel/User)
+ */
+ if (current->flags & PF_VCPU) {
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ write_c0_entryhi(ASID_MASK(vcpu->arch.
+ guest_kernel_asid[cpu]));
+ else
+ write_c0_entryhi(ASID_MASK(vcpu->arch.
+ guest_user_asid[cpu]));
+ ehb();
+ }
+ }
+
+ local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ uint32_t cpu;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+
+ vcpu->arch.preempt_entryhi = read_c0_entryhi();
+ vcpu->arch.last_sched_cpu = cpu;
+
+#if 0
+ if ((atomic_read(&kvm_mips_instance) > 1)) {
+ kvm_shadow_tlb_put(vcpu);
+ }
+#endif
+
+ if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+ ASID_VERSION_MASK)) {
+ kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
+ cpu_context(cpu, current->mm));
+ drop_mmu_context(current->mm, cpu);
+ }
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+ ehb();
+
+ local_irq_restore(flags);
+}
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long paddr, flags;
+ uint32_t inst;
+ int index;
+
+ if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+ KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+ if (index >= 0) {
+ inst = *(opc);
+ } else {
+ index =
+ kvm_mips_guest_tlb_lookup(vcpu,
+ ((unsigned long) opc & VPN2_MASK)
+ |
+ ASID_MASK(kvm_read_c0_guest_entryhi(cop0)));
+ if (index < 0) {
+ kvm_err
+ ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, vcpu, read_c0_entryhi());
+ kvm_mips_dump_host_tlbs();
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+ &vcpu->arch.
+ guest_tlb[index],
+ NULL, NULL);
+ inst = *(opc);
+ }
+ local_irq_restore(flags);
+ } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+ paddr =
+ kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+ (unsigned long) opc);
+ inst = *(uint32_t *) CKSEG0ADDR(paddr);
+ } else {
+ kvm_err("%s: illegal address: %p\n", __func__, opc);
+ return KVM_INVALID_INST;
+ }
+
+ return inst;
+}
+
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+EXPORT_SYMBOL(kvm_shadow_tlb_put);
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+EXPORT_SYMBOL(kvm_shadow_tlb_load);
+EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+EXPORT_SYMBOL(kvm_get_inst);
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644
index 00000000000..466aeef044b
--- /dev/null
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -0,0 +1,482 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "kvm_mips_opcode.h"
+#include "kvm_mips_int.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+ gpa_t gpa;
+ uint32_t kseg = KSEGX(gva);
+
+ if ((kseg == CKSEG0) || (kseg == CKSEG1))
+ gpa = CPHYSADDR(gva);
+ else {
+ printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+ kvm_mips_dump_host_tlbs();
+ gpa = KVM_INVALID_ADDR;
+ }
+
+#ifdef DEBUG
+ kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+#endif
+
+ return gpa;
+}
+
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
+ er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+ } else
+ er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ case EMULATE_WAIT:
+ run->exit_reason = KVM_EXIT_INTR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+ || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+ kvm_debug
+ ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+#endif
+ er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
+ * using HIGHMEM. Need to address this in a HIGHMEM kernel
+ */
+ printk
+ ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ } else {
+ printk
+ ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+ && KVM_GUEST_KERNEL_MODE(vcpu)) {
+ if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+ || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+ kvm_debug
+ ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+#endif
+ er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ /* All KSEG0 faults are handled by KVM, as the guest kernel does not
+ * expect to ever get them
+ */
+ if (kvm_mips_handle_kseg0_tlb_fault
+ (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else {
+ kvm_err
+ ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+ && KVM_GUEST_KERNEL_MODE(vcpu)) {
+ if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+ || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+#ifdef DEBUG
+ kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+ vcpu->arch.pc, badvaddr);
+#endif
+
+ /* User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ * case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ * case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+
+ er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ if (kvm_mips_handle_kseg0_tlb_fault
+ (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ } else {
+ printk
+ ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ kvm_arch_vcpu_dump_regs(vcpu);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (KVM_GUEST_KERNEL_MODE(vcpu)
+ && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+#ifdef DEBUG
+ kvm_debug("Emulate Store to MMIO space\n");
+#endif
+ er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ if (er == EMULATE_FAIL) {
+ printk("Emulate Store to MMIO space failed\n");
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ }
+ } else {
+ printk
+ ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+#ifdef DEBUG
+ kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+#endif
+ er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+ if (er == EMULATE_FAIL) {
+ printk("Emulate Load from MMIO space failed\n");
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ } else {
+ run->exit_reason = KVM_EXIT_MMIO;
+ ret = RESUME_HOST;
+ }
+ } else {
+ printk
+ ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ cause, opc, badvaddr);
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ er = EMULATE_FAIL;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+ if (er == EMULATE_DONE)
+ ret = RESUME_GUEST;
+ else {
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ }
+ return ret;
+}
+
+static int
+kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
+ kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
+ kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
+ kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
+ kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
+
+ kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
+ kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
+ kvm_write_c0_guest_pagemask(cop0,
+ regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
+ kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
+ kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
+
+ return 0;
+}
+
+static int
+kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+ regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
+ regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
+ regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
+ regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
+ regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
+
+ regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
+ regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
+ regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
+ kvm_read_c0_guest_pagemask(cop0);
+ regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
+ regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
+
+ regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
+ regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
+ regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
+ regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
+ regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
+
+ return 0;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+ return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ uint32_t config1;
+ int vcpu_id = vcpu->vcpu_id;
+
+ /* Arch specific stuff, set up config registers properly so that the
+ * guest will come up as expected, for now we simulate a
+ * MIPS 24kc
+ */
+ kvm_write_c0_guest_prid(cop0, 0x00019300);
+ kvm_write_c0_guest_config(cop0,
+ MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT));
+
+ /* Read the cache characteristics from the host Config1 Register */
+ config1 = (read_c0_config1() & ~0x7f);
+
+ /* Set up MMU size */
+ config1 &= ~(0x3f << 25);
+ config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+ /* We unset some bits that we aren't emulating */
+ config1 &=
+ ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+ (1 << CP0C1_WR) | (1 << CP0C1_CA));
+ kvm_write_c0_guest_config1(cop0, config1);
+
+ kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+ /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+ kvm_write_c0_guest_config3(cop0,
+ MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
+ CP0C3_ULRI));
+
+ /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+ kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+ /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
+ kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+ /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+ kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+ return 0;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+ /* exit handlers */
+ .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+ .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+ .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+ .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+ .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+ .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+ .handle_syscall = kvm_trap_emul_handle_syscall,
+ .handle_res_inst = kvm_trap_emul_handle_res_inst,
+ .handle_break = kvm_trap_emul_handle_break,
+
+ .vm_init = kvm_trap_emul_vm_init,
+ .vcpu_init = kvm_trap_emul_vcpu_init,
+ .vcpu_setup = kvm_trap_emul_vcpu_setup,
+ .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+ .queue_timer_int = kvm_mips_queue_timer_int_cb,
+ .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+ .queue_io_int = kvm_mips_queue_io_int_cb,
+ .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+ .irq_deliver = kvm_mips_irq_deliver_cb,
+ .irq_clear = kvm_mips_irq_clear_cb,
+ .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
+ .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+ *install_callbacks = &kvm_trap_emul_callbacks;
+ return 0;
+}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 00000000000..bc9e0f406c0
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,46 @@
+/*
+* This file is subject to the terms and conditions of the GNU General Public
+* License. See the file "COPYING" in the main directory of this archive
+* for more details.
+*
+* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+* Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
+/*
+ * Tracepoints for VM eists
+ */
+extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+
+TRACE_EVENT(kvm_exit,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
+ TP_ARGS(vcpu, reason),
+ TP_STRUCT__entry(
+ __field(struct kvm_vcpu *, vcpu)
+ __field(unsigned int, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu = vcpu;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("[%s]PC: 0x%08lx",
+ kvm_mips_exit_types_str[__entry->reason],
+ __entry->vcpu->arch.pc)
+);
+
+#endif /* _TRACE_KVM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>