aboutsummaryrefslogtreecommitdiff
path: root/target-i386
diff options
context:
space:
mode:
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/arch_memory_mapping.c34
-rw-r--r--target-i386/cc_helper.c10
-rw-r--r--target-i386/cpu.c767
-rw-r--r--target-i386/cpu.h109
-rw-r--r--target-i386/helper.c176
-rw-r--r--target-i386/helper.h6
-rw-r--r--target-i386/kvm.c347
-rw-r--r--target-i386/kvm_i386.h22
-rw-r--r--target-i386/seg_helper.c4
-rw-r--r--target-i386/smm_helper.c4
-rw-r--r--target-i386/svm_helper.c6
-rw-r--r--target-i386/translate.c484
12 files changed, 1334 insertions, 635 deletions
diff --git a/target-i386/arch_memory_mapping.c b/target-i386/arch_memory_mapping.c
index 8e5a56a..41f9d1c 100644
--- a/target-i386/arch_memory_mapping.c
+++ b/target-i386/arch_memory_mapping.c
@@ -16,10 +16,10 @@
#include "memory_mapping.h"
/* PAE Paging or IA-32e Paging */
-static void walk_pte(MemoryMappingList *list, target_phys_addr_t pte_start_addr,
+static void walk_pte(MemoryMappingList *list, hwaddr pte_start_addr,
int32_t a20_mask, target_ulong start_line_addr)
{
- target_phys_addr_t pte_addr, start_paddr;
+ hwaddr pte_addr, start_paddr;
uint64_t pte;
target_ulong start_vaddr;
int i;
@@ -46,10 +46,10 @@ static void walk_pte(MemoryMappingList *list, target_phys_addr_t pte_start_addr,
/* 32-bit Paging */
static void walk_pte2(MemoryMappingList *list,
- target_phys_addr_t pte_start_addr, int32_t a20_mask,
+ hwaddr pte_start_addr, int32_t a20_mask,
target_ulong start_line_addr)
{
- target_phys_addr_t pte_addr, start_paddr;
+ hwaddr pte_addr, start_paddr;
uint32_t pte;
target_ulong start_vaddr;
int i;
@@ -75,10 +75,10 @@ static void walk_pte2(MemoryMappingList *list,
}
/* PAE Paging or IA-32e Paging */
-static void walk_pde(MemoryMappingList *list, target_phys_addr_t pde_start_addr,
+static void walk_pde(MemoryMappingList *list, hwaddr pde_start_addr,
int32_t a20_mask, target_ulong start_line_addr)
{
- target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
+ hwaddr pde_addr, pte_start_addr, start_paddr;
uint64_t pde;
target_ulong line_addr, start_vaddr;
int i;
@@ -112,10 +112,10 @@ static void walk_pde(MemoryMappingList *list, target_phys_addr_t pde_start_addr,
/* 32-bit Paging */
static void walk_pde2(MemoryMappingList *list,
- target_phys_addr_t pde_start_addr, int32_t a20_mask,
+ hwaddr pde_start_addr, int32_t a20_mask,
bool pse)
{
- target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
+ hwaddr pde_addr, pte_start_addr, start_paddr;
uint32_t pde;
target_ulong line_addr, start_vaddr;
int i;
@@ -149,9 +149,9 @@ static void walk_pde2(MemoryMappingList *list,
/* PAE Paging */
static void walk_pdpe2(MemoryMappingList *list,
- target_phys_addr_t pdpe_start_addr, int32_t a20_mask)
+ hwaddr pdpe_start_addr, int32_t a20_mask)
{
- target_phys_addr_t pdpe_addr, pde_start_addr;
+ hwaddr pdpe_addr, pde_start_addr;
uint64_t pdpe;
target_ulong line_addr;
int i;
@@ -173,10 +173,10 @@ static void walk_pdpe2(MemoryMappingList *list,
#ifdef TARGET_X86_64
/* IA-32e Paging */
static void walk_pdpe(MemoryMappingList *list,
- target_phys_addr_t pdpe_start_addr, int32_t a20_mask,
+ hwaddr pdpe_start_addr, int32_t a20_mask,
target_ulong start_line_addr)
{
- target_phys_addr_t pdpe_addr, pde_start_addr, start_paddr;
+ hwaddr pdpe_addr, pde_start_addr, start_paddr;
uint64_t pdpe;
target_ulong line_addr, start_vaddr;
int i;
@@ -210,9 +210,9 @@ static void walk_pdpe(MemoryMappingList *list,
/* IA-32e Paging */
static void walk_pml4e(MemoryMappingList *list,
- target_phys_addr_t pml4e_start_addr, int32_t a20_mask)
+ hwaddr pml4e_start_addr, int32_t a20_mask)
{
- target_phys_addr_t pml4e_addr, pdpe_start_addr;
+ hwaddr pml4e_addr, pdpe_start_addr;
uint64_t pml4e;
target_ulong line_addr;
int i;
@@ -242,20 +242,20 @@ int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env)
if (env->cr[4] & CR4_PAE_MASK) {
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
- target_phys_addr_t pml4e_addr;
+ hwaddr pml4e_addr;
pml4e_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
walk_pml4e(list, pml4e_addr, env->a20_mask);
} else
#endif
{
- target_phys_addr_t pdpe_addr;
+ hwaddr pdpe_addr;
pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
walk_pdpe2(list, pdpe_addr, env->a20_mask);
}
} else {
- target_phys_addr_t pde_addr;
+ hwaddr pde_addr;
bool pse;
pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
diff --git a/target-i386/cc_helper.c b/target-i386/cc_helper.c
index 07892f9..9422003 100644
--- a/target-i386/cc_helper.c
+++ b/target-i386/cc_helper.c
@@ -353,6 +353,16 @@ void helper_sti(CPUX86State *env)
env->eflags |= IF_MASK;
}
+void helper_clac(CPUX86State *env)
+{
+ env->eflags &= ~AC_MASK;
+}
+
+void helper_stac(CPUX86State *env)
+{
+ env->eflags |= AC_MASK;
+}
+
#if 0
/* vm86plus instructions */
void helper_cli_vm(CPUX86State *env)
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 423e009..c6c2ca0 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -37,6 +37,13 @@
#include <linux/kvm_para.h>
#endif
+#include "sysemu.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/xen.h"
+#include "hw/sysbus.h"
+#include "hw/apic_internal.h"
+#endif
+
/* feature flags taken from "Intel Processor Identification and the CPUID
* Instruction" and AMD's "CPUID Specification". In cases of disagreement
* between feature naming conventions, aliases may be added.
@@ -59,34 +66,43 @@ static const char *ext_feature_name[] = {
NULL, "pcid", "dca", "sse4.1|sse4_1",
"sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
"tsc-deadline", "aes", "xsave", "osxsave",
- "avx", NULL, NULL, "hypervisor",
+ "avx", "f16c", "rdrand", "hypervisor",
};
+/* Feature names that are already defined on feature_name[] but are set on
+ * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
+ * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
+ * if and only if CPU vendor is AMD.
+ */
static const char *ext2_feature_name[] = {
- "fpu", "vme", "de", "pse",
- "tsc", "msr", "pae", "mce",
- "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall",
- "mtrr", "pge", "mca", "cmov",
- "pat", "pse36", NULL, NULL /* Linux mp */,
- "nx|xd", NULL, "mmxext", "mmx",
- "fxsr", "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
+ NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
+ NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
+ NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
+ NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
+ NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
+ "nx|xd", NULL, "mmxext", NULL /* mmx */,
+ NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
NULL, "lm|i64", "3dnowext", "3dnow",
};
static const char *ext3_feature_name[] = {
"lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
"cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
"3dnowprefetch", "osvw", "ibs", "xop",
- "skinit", "wdt", NULL, NULL,
- "fma4", NULL, "cvt16", "nodeid_msr",
- NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL,
+ "skinit", "wdt", NULL, "lwp",
+ "fma4", "tce", NULL, "nodeid_msr",
+ NULL, "tbm", "topoext", "perfctr_core",
+ "perfctr_nb", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
};
static const char *kvm_feature_name[] = {
- "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, "kvm_pv_eoi", NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
+ "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
};
static const char *svm_feature_name[] = {
@@ -100,6 +116,13 @@ static const char *svm_feature_name[] = {
NULL, NULL, NULL, NULL,
};
+static const char *cpuid_7_0_ebx_feature_name[] = {
+ "fsgsbase", NULL, NULL, "bmi1", "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm", NULL, NULL, NULL, NULL,
+ NULL, NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+};
+
/* collects per-function cpuid data
*/
typedef struct model_features_t {
@@ -113,6 +136,25 @@ typedef struct model_features_t {
int check_cpuid = 0;
int enforce_cpuid = 0;
+#if defined(CONFIG_KVM)
+static uint32_t kvm_default_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
+ (1 << KVM_FEATURE_NOP_IO_DELAY) |
+ (1 << KVM_FEATURE_MMU_OP) |
+ (1 << KVM_FEATURE_CLOCKSOURCE2) |
+ (1 << KVM_FEATURE_ASYNC_PF) |
+ (1 << KVM_FEATURE_STEAL_TIME) |
+ (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+static const uint32_t kvm_pv_eoi_features = (0x1 << KVM_FEATURE_PV_EOI);
+#else
+static uint32_t kvm_default_features = 0;
+static const uint32_t kvm_pv_eoi_features = 0;
+#endif
+
+void enable_kvm_pv_eoi(void)
+{
+ kvm_default_features |= kvm_pv_eoi_features;
+}
+
void host_cpuid(uint32_t function, uint32_t count,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
{
@@ -215,14 +257,17 @@ static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
uint32_t *ext2_features,
uint32_t *ext3_features,
uint32_t *kvm_features,
- uint32_t *svm_features)
+ uint32_t *svm_features,
+ uint32_t *cpuid_7_0_ebx_features)
{
if (!lookup_feature(features, flagname, NULL, feature_name) &&
!lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
!lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
!lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
!lookup_feature(kvm_features, flagname, NULL, kvm_feature_name) &&
- !lookup_feature(svm_features, flagname, NULL, svm_feature_name))
+ !lookup_feature(svm_features, flagname, NULL, svm_feature_name) &&
+ !lookup_feature(cpuid_7_0_ebx_features, flagname, NULL,
+ cpuid_7_0_ebx_feature_name))
fprintf(stderr, "CPU feature %s not found\n", flagname);
}
@@ -240,7 +285,6 @@ typedef struct x86_def_t {
uint32_t xlevel;
char model_id[48];
int vendor_override;
- uint32_t flags;
/* Store the results of Centaur's CPUID instructions */
uint32_t ext4_features;
uint32_t xlevel2;
@@ -259,7 +303,6 @@ typedef struct x86_def_t {
CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
CPUID_PAE | CPUID_SEP | CPUID_APIC)
-#define EXT2_FEATURE_MASK 0x0183F3FF
#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
@@ -272,12 +315,12 @@ typedef struct x86_def_t {
/* missing:
CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
- CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
+ CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
CPUID_EXT_HYPERVISOR)
/* missing:
CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
-#define TCG_EXT2_FEATURES ((TCG_FEATURES & EXT2_FEATURE_MASK) | \
+#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
/* missing:
@@ -285,6 +328,7 @@ typedef struct x86_def_t {
#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
#define TCG_SVM_FEATURES 0
+#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP)
/* maintains list of cpu model definitions
*/
@@ -306,7 +350,7 @@ static x86_def_t builtin_x86_defs[] = {
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
CPUID_PSE36,
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
.ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
@@ -326,7 +370,7 @@ static x86_def_t builtin_x86_defs[] = {
CPUID_PSE36 | CPUID_VME | CPUID_HT,
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
CPUID_EXT_POPCNT,
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
@@ -374,7 +418,7 @@ static x86_def_t builtin_x86_defs[] = {
/* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
/* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
/* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
@@ -403,7 +447,7 @@ static x86_def_t builtin_x86_defs[] = {
.features = PPRO_FEATURES |
CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
.ext_features = CPUID_EXT_SSE3,
- .ext2_features = PPRO_FEATURES & EXT2_FEATURE_MASK,
+ .ext2_features = PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
.ext3_features = 0,
.xlevel = 0x80000008,
.model_id = "Common 32-bit KVM processor"
@@ -468,8 +512,10 @@ static x86_def_t builtin_x86_defs[] = {
.family = 6,
.model = 2,
.stepping = 3,
- .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
+ .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
+ CPUID_MCA,
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
+ CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
.xlevel = 0x80000008,
},
{
@@ -485,13 +531,296 @@ static x86_def_t builtin_x86_defs[] = {
/* Some CPUs got no CPUID_SEP */
.ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR,
- .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_NX,
+ .ext2_features = (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
+ CPUID_EXT2_NX,
.ext3_features = CPUID_EXT3_LAHF_LM,
.xlevel = 0x8000000A,
.model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
},
+ {
+ .name = "Conroe",
+ .level = 2,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 2,
+ .stepping = 3,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
+ },
+ {
+ .name = "Penryn",
+ .level = 2,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 2,
+ .stepping = 3,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
+ },
+ {
+ .name = "Nehalem",
+ .level = 2,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 2,
+ .stepping = 3,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
+ },
+ {
+ .name = "Westmere",
+ .level = 11,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 44,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
+ },
+ {
+ .name = "SandyBridge",
+ .level = 0xd,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 42,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+ CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Xeon E312xx (Sandy Bridge)",
+ },
+ {
+ .name = "Haswell",
+ .level = 0xd,
+ .vendor1 = CPUID_VENDOR_INTEL_1,
+ .vendor2 = CPUID_VENDOR_INTEL_2,
+ .vendor3 = CPUID_VENDOR_INTEL_3,
+ .family = 6,
+ .model = 60,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .ext3_features = CPUID_EXT3_LAHF_LM,
+ .cpuid_7_0_ebx_features = CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM,
+ .xlevel = 0x8000000A,
+ .model_id = "Intel Core Processor (Haswell)",
+ },
+ {
+ .name = "Opteron_G1",
+ .level = 5,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G2",
+ .level = 5,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_CX16 | CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
+ CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
+ CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
+ CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
+ CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
+ CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
+ CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .ext3_features = CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G3",
+ .level = 5,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
+ CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
+ CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
+ CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
+ CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
+ CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
+ CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .ext3_features = CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
+ CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G4",
+ .level = 0xd,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 21,
+ .model = 1,
+ .stepping = 2,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .ext3_features = CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Opteron 62xx class CPU",
+ },
+ {
+ .name = "Opteron_G5",
+ .level = 0xd,
+ .vendor1 = CPUID_VENDOR_AMD_1,
+ .vendor2 = CPUID_VENDOR_AMD_2,
+ .vendor3 = CPUID_VENDOR_AMD_3,
+ .family = 21,
+ .model = 2,
+ .stepping = 0,
+ .features = CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .ext_features = CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
+ CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+ CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
+ CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .ext3_features = CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Opteron 63xx class CPU",
+ },
};
+#ifdef CONFIG_KVM
static int cpu_x86_fill_model_id(char *str)
{
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
@@ -506,14 +835,23 @@ static int cpu_x86_fill_model_id(char *str)
}
return 0;
}
+#endif
-static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
+/* Fill a x86_def_t struct with information about the host CPU, and
+ * the CPU features supported by the host hardware + host kernel
+ *
+ * This function may be called only if KVM is enabled.
+ */
+static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def)
{
+#ifdef CONFIG_KVM
+ KVMState *s = kvm_state;
uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+ assert(kvm_enabled());
+
x86_cpu_def->name = "host";
host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->level = eax;
x86_cpu_def->vendor1 = ebx;
x86_cpu_def->vendor2 = edx;
x86_cpu_def->vendor3 = ecx;
@@ -522,21 +860,24 @@ static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
x86_cpu_def->stepping = eax & 0x0F;
- x86_cpu_def->ext_features = ecx;
- x86_cpu_def->features = edx;
- if (kvm_enabled() && x86_cpu_def->level >= 7) {
- x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
+ x86_cpu_def->level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+ x86_cpu_def->features = kvm_arch_get_supported_cpuid(s, 0x1, 0, R_EDX);
+ x86_cpu_def->ext_features = kvm_arch_get_supported_cpuid(s, 0x1, 0, R_ECX);
+
+ if (x86_cpu_def->level >= 7) {
+ x86_cpu_def->cpuid_7_0_ebx_features =
+ kvm_arch_get_supported_cpuid(s, 0x7, 0, R_EBX);
} else {
x86_cpu_def->cpuid_7_0_ebx_features = 0;
}
- host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->xlevel = eax;
+ x86_cpu_def->xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+ x86_cpu_def->ext2_features =
+ kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX);
+ x86_cpu_def->ext3_features =
+ kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX);
- host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->ext2_features = edx;
- x86_cpu_def->ext3_features = ecx;
cpu_x86_fill_model_id(x86_cpu_def->model_id);
x86_cpu_def->vendor_override = 0;
@@ -545,11 +886,13 @@ static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 &&
x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) {
host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx);
+ eax = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
if (eax >= 0xC0000001) {
/* Support VIA max extended level */
x86_cpu_def->xlevel2 = eax;
host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx);
- x86_cpu_def->ext4_features = edx;
+ x86_cpu_def->ext4_features =
+ kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
}
}
@@ -560,8 +903,7 @@ static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
* unsupported ones later.
*/
x86_cpu_def->svm_features = -1;
-
- return 0;
+#endif /* CONFIG_KVM */
}
static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
@@ -582,8 +924,10 @@ static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
/* best effort attempt to inform user requested cpu flags aren't making
* their way to the guest. Note: ft[].check_feat ideally should be
* specified via a guest_def field to suppress report of extraneous flags.
+ *
+ * This function may be called only if KVM is enabled.
*/
-static int check_features_against_host(x86_def_t *guest_def)
+static int kvm_check_features_against_host(x86_def_t *guest_def)
{
x86_def_t host_def;
uint32_t mask;
@@ -598,7 +942,9 @@ static int check_features_against_host(x86_def_t *guest_def)
{&guest_def->ext3_features, &host_def.ext3_features,
~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
- cpu_x86_fill_host(&host_def);
+ assert(kvm_enabled());
+
+ kvm_cpu_fill_host(&host_def);
for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i)
for (mask = 1; mask; mask <<= 1)
if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
@@ -846,7 +1192,7 @@ static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
{
X86CPU *cpu = X86_CPU(obj);
const int64_t min = 0;
- const int64_t max = INT_MAX;
+ const int64_t max = INT64_MAX;
int64_t value;
visit_type_int(v, &value, name, errp);
@@ -872,39 +1218,29 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
/* Features to be added*/
uint32_t plus_features = 0, plus_ext_features = 0;
uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
- uint32_t plus_kvm_features = 0, plus_svm_features = 0;
+ uint32_t plus_kvm_features = kvm_default_features, plus_svm_features = 0;
+ uint32_t plus_7_0_ebx_features = 0;
/* Features to be removed */
uint32_t minus_features = 0, minus_ext_features = 0;
uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
uint32_t minus_kvm_features = 0, minus_svm_features = 0;
+ uint32_t minus_7_0_ebx_features = 0;
uint32_t numvalue;
for (def = x86_defs; def; def = def->next)
if (name && !strcmp(name, def->name))
break;
if (kvm_enabled() && name && strcmp(name, "host") == 0) {
- cpu_x86_fill_host(x86_cpu_def);
+ kvm_cpu_fill_host(x86_cpu_def);
} else if (!def) {
goto error;
} else {
memcpy(x86_cpu_def, def, sizeof(*def));
}
-#if defined(CONFIG_KVM)
- plus_kvm_features = (1 << KVM_FEATURE_CLOCKSOURCE) |
- (1 << KVM_FEATURE_NOP_IO_DELAY) |
- (1 << KVM_FEATURE_MMU_OP) |
- (1 << KVM_FEATURE_CLOCKSOURCE2) |
- (1 << KVM_FEATURE_ASYNC_PF) |
- (1 << KVM_FEATURE_STEAL_TIME) |
- (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
-#else
- plus_kvm_features = 0;
-#endif
-
add_flagname_to_bitmaps("hypervisor", &plus_features,
- &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
- &plus_kvm_features, &plus_svm_features);
+ &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
+ &plus_kvm_features, &plus_svm_features, &plus_7_0_ebx_features);
featurestr = strtok(NULL, ",");
@@ -914,12 +1250,12 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
add_flagname_to_bitmaps(featurestr + 1, &plus_features,
&plus_ext_features, &plus_ext2_features,
&plus_ext3_features, &plus_kvm_features,
- &plus_svm_features);
+ &plus_svm_features, &plus_7_0_ebx_features);
} else if (featurestr[0] == '-') {
add_flagname_to_bitmaps(featurestr + 1, &minus_features,
&minus_ext_features, &minus_ext2_features,
&minus_ext3_features, &minus_kvm_features,
- &minus_svm_features);
+ &minus_svm_features, &minus_7_0_ebx_features);
} else if ((val = strchr(featurestr, '='))) {
*val = 0; val++;
if (!strcmp(featurestr, "family")) {
@@ -1025,16 +1361,21 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
x86_cpu_def->ext3_features |= plus_ext3_features;
x86_cpu_def->kvm_features |= plus_kvm_features;
x86_cpu_def->svm_features |= plus_svm_features;
+ x86_cpu_def->cpuid_7_0_ebx_features |= plus_7_0_ebx_features;
x86_cpu_def->features &= ~minus_features;
x86_cpu_def->ext_features &= ~minus_ext_features;
x86_cpu_def->ext2_features &= ~minus_ext2_features;
x86_cpu_def->ext3_features &= ~minus_ext3_features;
x86_cpu_def->kvm_features &= ~minus_kvm_features;
x86_cpu_def->svm_features &= ~minus_svm_features;
- if (check_cpuid) {
- if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
+ x86_cpu_def->cpuid_7_0_ebx_features &= ~minus_7_0_ebx_features;
+ if (check_cpuid && kvm_enabled()) {
+ if (kvm_check_features_against_host(x86_cpu_def) && enforce_cpuid)
goto error;
}
+ if (x86_cpu_def->cpuid_7_0_ebx_features && x86_cpu_def->level < 7) {
+ x86_cpu_def->level = 7;
+ }
g_free(s);
return 0;
@@ -1073,70 +1414,28 @@ static void listflags(char *buf, int bufsize, uint32_t fbits,
}
}
-/* generate CPU information:
- * -? list model names
- * -?model list model names/IDs
- * -?dump output all model (x86_def_t) data
- * -?cpuid list all recognized cpuid flag names
- */
-void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
+/* generate CPU information. */
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
{
- unsigned char model = !strcmp("?model", optarg);
- unsigned char dump = !strcmp("?dump", optarg);
- unsigned char cpuid = !strcmp("?cpuid", optarg);
x86_def_t *def;
char buf[256];
- if (cpuid) {
- (*cpu_fprintf)(f, "Recognized CPUID flags:\n");
- listflags(buf, sizeof (buf), (uint32_t)~0, feature_name, 1);
- (*cpu_fprintf)(f, " f_edx: %s\n", buf);
- listflags(buf, sizeof (buf), (uint32_t)~0, ext_feature_name, 1);
- (*cpu_fprintf)(f, " f_ecx: %s\n", buf);
- listflags(buf, sizeof (buf), (uint32_t)~0, ext2_feature_name, 1);
- (*cpu_fprintf)(f, " extf_edx: %s\n", buf);
- listflags(buf, sizeof (buf), (uint32_t)~0, ext3_feature_name, 1);
- (*cpu_fprintf)(f, " extf_ecx: %s\n", buf);
- return;
- }
for (def = x86_defs; def; def = def->next) {
- snprintf(buf, sizeof (buf), def->flags ? "[%s]": "%s", def->name);
- if (model || dump) {
- (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
- } else {
- (*cpu_fprintf)(f, "x86 %16s\n", buf);
- }
- if (dump) {
- memcpy(buf, &def->vendor1, sizeof (def->vendor1));
- memcpy(buf + 4, &def->vendor2, sizeof (def->vendor2));
- memcpy(buf + 8, &def->vendor3, sizeof (def->vendor3));
- buf[12] = '\0';
- (*cpu_fprintf)(f,
- " family %d model %d stepping %d level %d xlevel 0x%x"
- " vendor \"%s\"\n",
- def->family, def->model, def->stepping, def->level,
- def->xlevel, buf);
- listflags(buf, sizeof (buf), def->features, feature_name, 0);
- (*cpu_fprintf)(f, " feature_edx %08x (%s)\n", def->features,
- buf);
- listflags(buf, sizeof (buf), def->ext_features, ext_feature_name,
- 0);
- (*cpu_fprintf)(f, " feature_ecx %08x (%s)\n", def->ext_features,
- buf);
- listflags(buf, sizeof (buf), def->ext2_features, ext2_feature_name,
- 0);
- (*cpu_fprintf)(f, " extfeature_edx %08x (%s)\n",
- def->ext2_features, buf);
- listflags(buf, sizeof (buf), def->ext3_features, ext3_feature_name,
- 0);
- (*cpu_fprintf)(f, " extfeature_ecx %08x (%s)\n",
- def->ext3_features, buf);
- (*cpu_fprintf)(f, "\n");
- }
+ snprintf(buf, sizeof(buf), "%s", def->name);
+ (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
}
if (kvm_enabled()) {
(*cpu_fprintf)(f, "x86 %16s\n", "[host]");
}
+ (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
+ listflags(buf, sizeof(buf), (uint32_t)~0, feature_name, 1);
+ (*cpu_fprintf)(f, " %s\n", buf);
+ listflags(buf, sizeof(buf), (uint32_t)~0, ext_feature_name, 1);
+ (*cpu_fprintf)(f, " %s\n", buf);
+ listflags(buf, sizeof(buf), (uint32_t)~0, ext2_feature_name, 1);
+ (*cpu_fprintf)(f, " %s\n", buf);
+ listflags(buf, sizeof(buf), (uint32_t)~0, ext3_feature_name, 1);
+ (*cpu_fprintf)(f, " %s\n", buf);
}
CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
@@ -1160,6 +1459,32 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
return cpu_list;
}
+#ifdef CONFIG_KVM
+static void filter_features_for_kvm(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ KVMState *s = kvm_state;
+
+ env->cpuid_features &=
+ kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
+ env->cpuid_ext_features &=
+ kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
+ env->cpuid_ext2_features &=
+ kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX);
+ env->cpuid_ext3_features &=
+ kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX);
+ env->cpuid_svm_features &=
+ kvm_arch_get_supported_cpuid(s, 0x8000000A, 0, R_EDX);
+ env->cpuid_7_0_ebx_features &=
+ kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX);
+ env->cpuid_kvm_features &=
+ kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
+ env->cpuid_ext4_features &=
+ kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
+
+}
+#endif
+
int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
{
CPUX86State *env = &cpu->env;
@@ -1192,10 +1517,21 @@ int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
env->cpuid_kvm_features = def->kvm_features;
env->cpuid_svm_features = def->svm_features;
env->cpuid_ext4_features = def->ext4_features;
- env->cpuid_7_0_ebx = def->cpuid_7_0_ebx_features;
+ env->cpuid_7_0_ebx_features = def->cpuid_7_0_ebx_features;
env->cpuid_xlevel2 = def->xlevel2;
object_property_set_int(OBJECT(cpu), (int64_t)def->tsc_khz * 1000,
"tsc-frequency", &error);
+
+ /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
+ * CPUID[1].EDX.
+ */
+ if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
+ env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
+ env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
+ env->cpuid_ext2_features &= ~CPUID_EXT2_AMD_ALIASES;
+ env->cpuid_ext2_features |= (def->features & CPUID_EXT2_AMD_ALIASES);
+ }
+
if (!kvm_enabled()) {
env->cpuid_features &= TCG_FEATURES;
env->cpuid_ext_features &= TCG_EXT_FEATURES;
@@ -1206,9 +1542,14 @@ int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
);
env->cpuid_ext3_features &= TCG_EXT3_FEATURES;
env->cpuid_svm_features &= TCG_SVM_FEATURES;
+ } else {
+#ifdef CONFIG_KVM
+ filter_features_for_kvm(cpu);
+#endif
}
object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
- if (error_is_set(&error)) {
+ if (error) {
+ fprintf(stderr, "%s\n", error_get_pretty(error));
error_free(error);
return -1;
}
@@ -1216,109 +1557,6 @@ int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
}
#if !defined(CONFIG_USER_ONLY)
-/* copy vendor id string to 32 bit register, nul pad as needed
- */
-static void cpyid(const char *s, uint32_t *id)
-{
- char *d = (char *)id;
- char i;
-
- for (i = sizeof (*id); i--; )
- *d++ = *s ? *s++ : '\0';
-}
-
-/* interpret radix and convert from string to arbitrary scalar,
- * otherwise flag failure
- */
-#define setscalar(pval, str, perr) \
-{ \
- char *pend; \
- unsigned long ul; \
- \
- ul = strtoul(str, &pend, 0); \
- *str && !*pend ? (*pval = ul) : (*perr = 1); \
-}
-
-/* map cpuid options to feature bits, otherwise return failure
- * (option tags in *str are delimited by whitespace)
- */
-static void setfeatures(uint32_t *pval, const char *str,
- const char **featureset, int *perr)
-{
- const char *p, *q;
-
- for (q = p = str; *p || *q; q = p) {
- while (iswhite(*p))
- q = ++p;
- while (*p && !iswhite(*p))
- ++p;
- if (!*q && !*p)
- return;
- if (!lookup_feature(pval, q, p, featureset)) {
- fprintf(stderr, "error: feature \"%.*s\" not available in set\n",
- (int)(p - q), q);
- *perr = 1;
- return;
- }
- }
-}
-
-/* map config file options to x86_def_t form
- */
-static int cpudef_setfield(const char *name, const char *str, void *opaque)
-{
- x86_def_t *def = opaque;
- int err = 0;
-
- if (!strcmp(name, "name")) {
- g_free((void *)def->name);
- def->name = g_strdup(str);
- } else if (!strcmp(name, "model_id")) {
- strncpy(def->model_id, str, sizeof (def->model_id));
- } else if (!strcmp(name, "level")) {
- setscalar(&def->level, str, &err)
- } else if (!strcmp(name, "vendor")) {
- cpyid(&str[0], &def->vendor1);
- cpyid(&str[4], &def->vendor2);
- cpyid(&str[8], &def->vendor3);
- } else if (!strcmp(name, "family")) {
- setscalar(&def->family, str, &err)
- } else if (!strcmp(name, "model")) {
- setscalar(&def->model, str, &err)
- } else if (!strcmp(name, "stepping")) {
- setscalar(&def->stepping, str, &err)
- } else if (!strcmp(name, "feature_edx")) {
- setfeatures(&def->features, str, feature_name, &err);
- } else if (!strcmp(name, "feature_ecx")) {
- setfeatures(&def->ext_features, str, ext_feature_name, &err);
- } else if (!strcmp(name, "extfeature_edx")) {
- setfeatures(&def->ext2_features, str, ext2_feature_name, &err);
- } else if (!strcmp(name, "extfeature_ecx")) {
- setfeatures(&def->ext3_features, str, ext3_feature_name, &err);
- } else if (!strcmp(name, "xlevel")) {
- setscalar(&def->xlevel, str, &err)
- } else {
- fprintf(stderr, "error: unknown option [%s = %s]\n", name, str);
- return (1);
- }
- if (err) {
- fprintf(stderr, "error: bad option value [%s = %s]\n", name, str);
- return (1);
- }
- return (0);
-}
-
-/* register config file entry as x86_def_t
- */
-static int cpudef_register(QemuOpts *opts, void *opaque)
-{
- x86_def_t *def = g_malloc0(sizeof (x86_def_t));
-
- qemu_opt_foreach(opts, cpudef_setfield, def, 1);
- def->next = x86_defs;
- x86_defs = def;
- return (0);
-}
void cpu_clear_apic_feature(CPUX86State *env)
{
@@ -1327,8 +1565,7 @@ void cpu_clear_apic_feature(CPUX86State *env)
#endif /* !CONFIG_USER_ONLY */
-/* register "cpudef" models defined in configuration file. Here we first
- * preload any built-in definitions
+/* Initialize list of CPU models, filling some non-static fields if necessary
*/
void x86_cpudef_setup(void)
{
@@ -1336,24 +1573,23 @@ void x86_cpudef_setup(void)
static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
- builtin_x86_defs[i].next = x86_defs;
- builtin_x86_defs[i].flags = 1;
+ x86_def_t *def = &builtin_x86_defs[i];
+ def->next = x86_defs;
/* Look for specific "cpudef" models that */
/* have the QEMU version in .model_id */
for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
- if (strcmp(model_with_versions[j], builtin_x86_defs[i].name) == 0) {
- pstrcpy(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), "QEMU Virtual CPU version ");
- pstrcat(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), qemu_get_version());
+ if (strcmp(model_with_versions[j], def->name) == 0) {
+ pstrcpy(def->model_id, sizeof(def->model_id),
+ "QEMU Virtual CPU version ");
+ pstrcat(def->model_id, sizeof(def->model_id),
+ qemu_get_version());
break;
}
}
- x86_defs = &builtin_x86_defs[i];
+ x86_defs = def;
}
-#if !defined(CONFIG_USER_ONLY)
- qemu_opts_foreach(qemu_find_opts("cpudef"), cpudef_register, NULL, 0);
-#endif
}
static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
@@ -1474,7 +1710,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
/* Structured Extended Feature Flags Enumeration Leaf */
if (count == 0) {
*eax = 0; /* Maximum ECX value for sub-leaves */
- *ebx = env->cpuid_7_0_ebx; /* Feature flags */
+ *ebx = env->cpuid_7_0_ebx_features; /* Feature flags */
*ecx = 0; /* Reserved */
*edx = 0; /* Reserved */
} else {
@@ -1653,7 +1889,7 @@ static void x86_cpu_reset(CPUState *s)
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
- log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
+ log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
}
xcc->parent_reset(s);
@@ -1764,12 +2000,65 @@ static void mce_init(X86CPU *cpu)
}
}
+#define MSI_ADDR_BASE 0xfee00000
+
+#ifndef CONFIG_USER_ONLY
+static void x86_cpu_apic_init(X86CPU *cpu, Error **errp)
+{
+ static int apic_mapped;
+ CPUX86State *env = &cpu->env;
+ APICCommonState *apic;
+ const char *apic_type = "apic";
+
+ if (kvm_irqchip_in_kernel()) {
+ apic_type = "kvm-apic";
+ } else if (xen_enabled()) {
+ apic_type = "xen-apic";
+ }
+
+ env->apic_state = qdev_try_create(NULL, apic_type);
+ if (env->apic_state == NULL) {
+ error_setg(errp, "APIC device '%s' could not be created", apic_type);
+ return;
+ }
+
+ object_property_add_child(OBJECT(cpu), "apic",
+ OBJECT(env->apic_state), NULL);
+ qdev_prop_set_uint8(env->apic_state, "id", env->cpuid_apic_id);
+ /* TODO: convert to link<> */
+ apic = APIC_COMMON(env->apic_state);
+ apic->cpu = cpu;
+
+ if (qdev_init(env->apic_state)) {
+ error_setg(errp, "APIC device '%s' could not be initialized",
+ object_get_typename(OBJECT(env->apic_state)));
+ return;
+ }
+
+ /* XXX: mapping more APICs at the same memory location */
+ if (apic_mapped == 0) {
+ /* NOTE: the APIC is directly connected to the CPU - it is not
+ on the global memory bus. */
+ /* XXX: what if the base changes? */
+ sysbus_mmio_map(sysbus_from_qdev(env->apic_state), 0, MSI_ADDR_BASE);
+ apic_mapped = 1;
+ }
+}
+#endif
+
void x86_cpu_realize(Object *obj, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
#ifndef CONFIG_USER_ONLY
qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
+
+ if (cpu->env.cpuid_features & CPUID_APIC || smp_cpus > 1) {
+ x86_cpu_apic_init(cpu, errp);
+ if (error_is_set(errp)) {
+ return;
+ }
+ }
#endif
mce_init(cpu);
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 0677502..90ef1ff 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -123,8 +123,8 @@
/* hidden flags - used internally by qemu to represent additional cpu
states. Only the CPL, INHIBIT_IRQ, SMM and SVMI are not
- redundant. We avoid using the IOPL_MASK, TF_MASK and VM_MASK bit
- position to ease oring with eflags. */
+ redundant. We avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK
+ bit positions to ease oring with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
@@ -147,10 +147,12 @@
#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
#define HF_RF_SHIFT 16 /* must be same as eflags */
#define HF_VM_SHIFT 17 /* must be same as eflags */
+#define HF_AC_SHIFT 18 /* must be same as eflags */
#define HF_SMM_SHIFT 19 /* CPU in SMM mode */
#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
+#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
@@ -168,10 +170,12 @@
#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
#define HF_RF_MASK (1 << HF_RF_SHIFT)
#define HF_VM_MASK (1 << HF_VM_SHIFT)
+#define HF_AC_MASK (1 << HF_AC_SHIFT)
#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
+#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
/* hflags2 */
@@ -210,6 +214,13 @@
#define CR4_OSFXSR_SHIFT 9
#define CR4_OSFXSR_MASK (1 << CR4_OSFXSR_SHIFT)
#define CR4_OSXMMEXCPT_MASK (1 << 10)
+#define CR4_VMXE_MASK (1 << 13)
+#define CR4_SMXE_MASK (1 << 14)
+#define CR4_FSGSBASE_MASK (1 << 16)
+#define CR4_PCIDE_MASK (1 << 17)
+#define CR4_OSXSAVE_MASK (1 << 18)
+#define CR4_SMEP_MASK (1 << 20)
+#define CR4_SMAP_MASK (1 << 21)
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
@@ -382,6 +393,7 @@
#define CPUID_PBE (1 << 31)
#define CPUID_EXT_SSE3 (1 << 0)
+#define CPUID_EXT_PCLMULQDQ (1 << 1)
#define CPUID_EXT_DTES64 (1 << 2)
#define CPUID_EXT_MONITOR (1 << 3)
#define CPUID_EXT_DSCPL (1 << 4)
@@ -391,9 +403,11 @@
#define CPUID_EXT_TM2 (1 << 8)
#define CPUID_EXT_SSSE3 (1 << 9)
#define CPUID_EXT_CID (1 << 10)
+#define CPUID_EXT_FMA (1 << 12)
#define CPUID_EXT_CX16 (1 << 13)
#define CPUID_EXT_XTPR (1 << 14)
#define CPUID_EXT_PDCM (1 << 15)
+#define CPUID_EXT_PCID (1 << 17)
#define CPUID_EXT_DCA (1 << 18)
#define CPUID_EXT_SSE41 (1 << 19)
#define CPUID_EXT_SSE42 (1 << 20)
@@ -401,14 +415,36 @@
#define CPUID_EXT_MOVBE (1 << 22)
#define CPUID_EXT_POPCNT (1 << 23)
#define CPUID_EXT_TSC_DEADLINE_TIMER (1 << 24)
+#define CPUID_EXT_AES (1 << 25)
#define CPUID_EXT_XSAVE (1 << 26)
#define CPUID_EXT_OSXSAVE (1 << 27)
+#define CPUID_EXT_AVX (1 << 28)
+#define CPUID_EXT_F16C (1 << 29)
+#define CPUID_EXT_RDRAND (1 << 30)
#define CPUID_EXT_HYPERVISOR (1 << 31)
+#define CPUID_EXT2_FPU (1 << 0)
+#define CPUID_EXT2_VME (1 << 1)
+#define CPUID_EXT2_DE (1 << 2)
+#define CPUID_EXT2_PSE (1 << 3)
+#define CPUID_EXT2_TSC (1 << 4)
+#define CPUID_EXT2_MSR (1 << 5)
+#define CPUID_EXT2_PAE (1 << 6)
+#define CPUID_EXT2_MCE (1 << 7)
+#define CPUID_EXT2_CX8 (1 << 8)
+#define CPUID_EXT2_APIC (1 << 9)
#define CPUID_EXT2_SYSCALL (1 << 11)
+#define CPUID_EXT2_MTRR (1 << 12)
+#define CPUID_EXT2_PGE (1 << 13)
+#define CPUID_EXT2_MCA (1 << 14)
+#define CPUID_EXT2_CMOV (1 << 15)
+#define CPUID_EXT2_PAT (1 << 16)
+#define CPUID_EXT2_PSE36 (1 << 17)
#define CPUID_EXT2_MP (1 << 19)
#define CPUID_EXT2_NX (1 << 20)
#define CPUID_EXT2_MMXEXT (1 << 22)
+#define CPUID_EXT2_MMX (1 << 23)
+#define CPUID_EXT2_FXSR (1 << 24)
#define CPUID_EXT2_FFXSR (1 << 25)
#define CPUID_EXT2_PDPE1GB (1 << 26)
#define CPUID_EXT2_RDTSCP (1 << 27)
@@ -416,6 +452,17 @@
#define CPUID_EXT2_3DNOWEXT (1 << 30)
#define CPUID_EXT2_3DNOW (1 << 31)
+/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
+ CPUID_EXT2_DE | CPUID_EXT2_PSE | \
+ CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
+ CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
+ CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
+ CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
+ CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
+ CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
+ CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
+
#define CPUID_EXT3_LAHF_LM (1 << 0)
#define CPUID_EXT3_CMP_LEG (1 << 1)
#define CPUID_EXT3_SVM (1 << 2)
@@ -427,7 +474,17 @@
#define CPUID_EXT3_3DNOWPREFETCH (1 << 8)
#define CPUID_EXT3_OSVW (1 << 9)
#define CPUID_EXT3_IBS (1 << 10)
+#define CPUID_EXT3_XOP (1 << 11)
#define CPUID_EXT3_SKINIT (1 << 12)
+#define CPUID_EXT3_WDT (1 << 13)
+#define CPUID_EXT3_LWP (1 << 15)
+#define CPUID_EXT3_FMA4 (1 << 16)
+#define CPUID_EXT3_TCE (1 << 17)
+#define CPUID_EXT3_NODEID (1 << 19)
+#define CPUID_EXT3_TBM (1 << 21)
+#define CPUID_EXT3_TOPOEXT (1 << 22)
+#define CPUID_EXT3_PERFCORE (1 << 23)
+#define CPUID_EXT3_PERFNB (1 << 24)
#define CPUID_SVM_NPT (1 << 0)
#define CPUID_SVM_LBRV (1 << 1)
@@ -440,6 +497,19 @@
#define CPUID_SVM_PAUSEFILTER (1 << 10)
#define CPUID_SVM_PFTHRESHOLD (1 << 12)
+#define CPUID_7_0_EBX_FSGSBASE (1 << 0)
+#define CPUID_7_0_EBX_BMI1 (1 << 3)
+#define CPUID_7_0_EBX_HLE (1 << 4)
+#define CPUID_7_0_EBX_AVX2 (1 << 5)
+#define CPUID_7_0_EBX_SMEP (1 << 7)
+#define CPUID_7_0_EBX_BMI2 (1 << 8)
+#define CPUID_7_0_EBX_ERMS (1 << 9)
+#define CPUID_7_0_EBX_INVPCID (1 << 10)
+#define CPUID_7_0_EBX_RTM (1 << 11)
+#define CPUID_7_0_EBX_RDSEED (1 << 18)
+#define CPUID_7_0_EBX_ADX (1 << 19)
+#define CPUID_7_0_EBX_SMAP (1 << 20)
+
#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
@@ -615,7 +685,7 @@ typedef struct {
#define CPU_NB_REGS CPU_NB_REGS32
#endif
-#define NB_MMU_MODES 2
+#define NB_MMU_MODES 3
typedef enum TPRAccess {
TPR_ACCESS_READ,
@@ -745,7 +815,7 @@ typedef struct CPUX86State {
uint32_t cpuid_xlevel2;
uint32_t cpuid_ext4_features;
/* Flags from CPUID[EAX=7,ECX=0].EBX */
- uint32_t cpuid_7_0_ebx;
+ uint32_t cpuid_7_0_ebx_features;
/* MTRRs */
uint64_t mtrr_fixed[11];
@@ -792,7 +862,7 @@ typedef struct CPUX86State {
X86CPU *cpu_x86_init(const char *cpu_model);
int cpu_x86_exec(CPUX86State *s);
-void x86_cpu_list (FILE *f, fprintf_function cpu_fprintf, const char *optarg);
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
void x86_cpudef_setup(void);
int cpu_x86_support_mca_broadcast(CPUX86State *env);
@@ -859,9 +929,11 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env,
}
}
-static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env,
+static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
int sipi_vector)
{
+ CPUX86State *env = &cpu->env;
+
env->eip = 0;
cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
sipi_vector << 12,
@@ -947,10 +1019,6 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
void cpu_smm_update(CPUX86State *env);
uint64_t cpu_get_tsc(CPUX86State *env);
-/* used to debug */
-#define X86_DUMP_FPU 0x0001 /* dump FPU state too */
-#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
-
#define TARGET_PAGE_BITS 12
#ifdef TARGET_X86_64
@@ -976,7 +1044,7 @@ static inline CPUX86State *cpu_init(const char *cpu_model)
#define cpu_exec cpu_x86_exec
#define cpu_gen_code cpu_x86_gen_code
#define cpu_signal_handler cpu_x86_signal_handler
-#define cpu_list_id x86_cpu_list
+#define cpu_list x86_cpu_list
#define cpudef_setup x86_cpudef_setup
#define CPU_SAVE_VERSION 12
@@ -984,10 +1052,15 @@ static inline CPUX86State *cpu_init(const char *cpu_model)
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
#define MMU_MODE1_SUFFIX _user
-#define MMU_USER_IDX 1
+#define MMU_MODE2_SUFFIX _ksmap /* Kernel with SMAP override */
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 1
+#define MMU_KSMAP_IDX 2
static inline int cpu_mmu_index (CPUX86State *env)
{
- return (env->hflags & HF_CPL_MASK) == 3 ? 1 : 0;
+ return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
+ ((env->hflags & HF_SMAP_MASK) && (env->eflags & AC_MASK))
+ ? MMU_KSMAP_IDX : MMU_KERNEL_IDX;
}
#undef EAX
@@ -1049,8 +1122,10 @@ static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
#include "hw/apic.h"
#endif
-static inline bool cpu_has_work(CPUX86State *env)
+static inline bool cpu_has_work(CPUState *cpu)
{
+ CPUX86State *env = &X86_CPU(cpu)->env;
+
return ((env->interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
@@ -1073,7 +1148,7 @@ static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
*cs_base = env->segs[R_CS].base;
*pc = *cs_base + env->eip;
*flags = env->hflags |
- (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK));
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
}
void do_cpu_init(X86CPU *cpu);
@@ -1082,7 +1157,7 @@ void do_cpu_sipi(X86CPU *cpu);
#define MCE_INJECT_BROADCAST 1
#define MCE_INJECT_UNCOND_AO 2
-void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags);
@@ -1139,4 +1214,6 @@ void do_smm_enter(CPUX86State *env1);
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
+void enable_kvm_pv_eoi(void);
+
#endif /* CPU_I386_H */
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 8a5da3d..bf206cf 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -284,7 +284,7 @@ void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
env->dr[6], env->dr[7]);
}
- if (flags & X86_DUMP_CCOP) {
+ if (flags & CPU_DUMP_CCOP) {
if ((unsigned)env->cc_op < CC_OP_NB)
snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
else
@@ -303,7 +303,7 @@ void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
}
}
cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
- if (flags & X86_DUMP_FPU) {
+ if (flags & CPU_DUMP_FPU) {
int fptag;
fptag = 0;
for(i = 0; i < 8; i++) {
@@ -443,17 +443,27 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
#if defined(DEBUG_MMU)
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
#endif
- if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
- (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
+ if ((new_cr4 ^ env->cr[4]) &
+ (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
+ CR4_SMEP_MASK | CR4_SMAP_MASK)) {
tlb_flush(env, 1);
}
/* SSE handling */
- if (!(env->cpuid_features & CPUID_SSE))
+ if (!(env->cpuid_features & CPUID_SSE)) {
new_cr4 &= ~CR4_OSFXSR_MASK;
- if (new_cr4 & CR4_OSFXSR_MASK)
+ }
+ env->hflags &= ~HF_OSFXSR_MASK;
+ if (new_cr4 & CR4_OSFXSR_MASK) {
env->hflags |= HF_OSFXSR_MASK;
- else
- env->hflags &= ~HF_OSFXSR_MASK;
+ }
+
+ if (!(env->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)) {
+ new_cr4 &= ~CR4_SMAP_MASK;
+ }
+ env->hflags &= ~HF_SMAP_MASK;
+ if (new_cr4 & CR4_SMAP_MASK) {
+ env->hflags |= HF_SMAP_MASK;
+ }
env->cr[4] = new_cr4;
}
@@ -493,7 +503,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
int error_code, is_dirty, prot, page_size, is_write, is_user;
- target_phys_addr_t paddr;
+ hwaddr paddr;
uint32_t page_offset;
target_ulong vaddr, virt_addr;
@@ -591,17 +601,38 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* 2 MB page */
page_size = 2048 * 1024;
ptep ^= PG_NX_MASK;
- if ((ptep & PG_NX_MASK) && is_write1 == 2)
+ if ((ptep & PG_NX_MASK) && is_write1 == 2) {
goto do_fault_protect;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ }
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
@@ -635,15 +666,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
ptep ^= PG_NX_MASK;
if ((ptep & PG_NX_MASK) && is_write1 == 2)
goto do_fault_protect;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
@@ -670,15 +721,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
/* if PSE bit is set, then we use a 4MB page */
if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
page_size = 4096 * 1024;
- if (is_user) {
- if (!(pde & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(pde & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(pde & PG_RW_MASK))
+ }
+ if (is_write && !(pde & PG_RW_MASK)) {
goto do_fault_protect;
- } else {
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (pde & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (pde & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(pde & PG_RW_MASK))
+ is_write && !(pde & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pde & PG_DIRTY_MASK);
if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
@@ -707,15 +778,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
}
/* combine pde and pte user and rw protections */
ptep = pte & pde;
- if (is_user) {
- if (!(ptep & PG_USER_MASK))
+ switch (mmu_idx) {
+ case MMU_USER_IDX:
+ if (!(ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+ if (is_write && !(ptep & PG_RW_MASK)) {
+ goto do_fault_protect;
+ }
+ break;
+
+ case MMU_KERNEL_IDX:
+ if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
+ (ptep & PG_USER_MASK)) {
goto do_fault_protect;
- if (is_write && !(ptep & PG_RW_MASK))
+ }
+ /* fall through */
+ case MMU_KSMAP_IDX:
+ if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
+ (ptep & PG_USER_MASK)) {
goto do_fault_protect;
- } else {
+ }
if ((env->cr[0] & CR0_WP_MASK) &&
- is_write && !(ptep & PG_RW_MASK))
+ is_write && !(ptep & PG_RW_MASK)) {
goto do_fault_protect;
+ }
+ break;
+
+ default: /* cannot happen */
+ break;
}
is_dirty = is_write && !(pte & PG_DIRTY_MASK);
if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
@@ -762,8 +853,9 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
if (is_user)
error_code |= PG_ERROR_U_MASK;
if (is_write1 == 2 &&
- (env->efer & MSR_EFER_NXE) &&
- (env->cr[4] & CR4_PAE_MASK))
+ (((env->efer & MSR_EFER_NXE) &&
+ (env->cr[4] & CR4_PAE_MASK)) ||
+ (env->cr[4] & CR4_SMEP_MASK)))
error_code |= PG_ERROR_I_D_MASK;
if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
/* cr2 is not modified in case of exceptions */
@@ -777,11 +869,11 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
return 1;
}
-target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
+hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
{
target_ulong pde_addr, pte_addr;
uint64_t pte;
- target_phys_addr_t paddr;
+ hwaddr paddr;
uint32_t page_offset;
int page_size;
@@ -1049,10 +1141,11 @@ static void do_inject_x86_mce(void *data)
}
}
-void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags)
{
+ CPUX86State *cenv = &cpu->env;
MCEInjectionParams params = {
.mon = mon,
.env = cenv,
@@ -1084,7 +1177,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
return;
}
- run_on_cpu(cenv, do_inject_x86_mce, &params);
+ run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
if (flags & MCE_INJECT_BROADCAST) {
params.bank = 1;
params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
@@ -1096,7 +1189,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
continue;
}
params.env = env;
- run_on_cpu(cenv, do_inject_x86_mce, &params);
+ run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
}
}
}
@@ -1151,6 +1244,7 @@ X86CPU *cpu_x86_init(const char *cpu_model)
{
X86CPU *cpu;
CPUX86State *env;
+ Error *error = NULL;
cpu = X86_CPU(object_new(TYPE_X86_CPU));
env = &cpu->env;
@@ -1161,8 +1255,12 @@ X86CPU *cpu_x86_init(const char *cpu_model)
return NULL;
}
- x86_cpu_realize(OBJECT(cpu), NULL);
-
+ x86_cpu_realize(OBJECT(cpu), &error);
+ if (error) {
+ error_free(error);
+ object_delete(OBJECT(cpu));
+ return NULL;
+ }
return cpu;
}
diff --git a/target-i386/helper.h b/target-i386/helper.h
index ab6af63..970fcd9 100644
--- a/target-i386/helper.h
+++ b/target-i386/helper.h
@@ -1,7 +1,7 @@
#include "def-helper.h"
-DEF_HELPER_FLAGS_2(cc_compute_all, TCG_CALL_PURE, i32, env, int)
-DEF_HELPER_FLAGS_2(cc_compute_c, TCG_CALL_PURE, i32, env, int)
+DEF_HELPER_FLAGS_2(cc_compute_all, TCG_CALL_NO_SE, i32, env, int)
+DEF_HELPER_FLAGS_2(cc_compute_c, TCG_CALL_NO_SE, i32, env, int)
DEF_HELPER_0(lock, void)
DEF_HELPER_0(unlock, void)
@@ -67,6 +67,8 @@ DEF_HELPER_3(raise_interrupt, void, env, int, int)
DEF_HELPER_2(raise_exception, void, env, int)
DEF_HELPER_1(cli, void, env)
DEF_HELPER_1(sti, void, env)
+DEF_HELPER_1(clac, void, env)
+DEF_HELPER_1(stac, void, env)
DEF_HELPER_1(set_inhibit_irq, void, env)
DEF_HELPER_1(reset_inhibit_irq, void, env)
DEF_HELPER_3(boundw, void, env, tl, int)
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index ffc294e..f669281 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -31,6 +31,7 @@
#include "hw/apic.h"
#include "ioport.h"
#include "hyperv.h"
+#include "hw/pci.h"
//#define DEBUG_KVM
@@ -97,6 +98,19 @@ static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
return cpuid;
}
+/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
+ * for all entries.
+ */
+static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
+{
+ struct kvm_cpuid2 *cpuid;
+ int max = 1;
+ while ((cpuid = try_get_cpuid(s, max)) == NULL) {
+ max *= 2;
+ }
+ return cpuid;
+}
+
struct kvm_para_features {
int cap;
int feature;
@@ -122,60 +136,98 @@ static int get_para_features(KVMState *s)
}
+/* Returns the value for a specific register on the cpuid entry
+ */
+static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
+{
+ uint32_t ret = 0;
+ switch (reg) {
+ case R_EAX:
+ ret = entry->eax;
+ break;
+ case R_EBX:
+ ret = entry->ebx;
+ break;
+ case R_ECX:
+ ret = entry->ecx;
+ break;
+ case R_EDX:
+ ret = entry->edx;
+ break;
+ }
+ return ret;
+}
+
+/* Find matching entry for function/index on kvm_cpuid2 struct
+ */
+static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index)
+{
+ int i;
+ for (i = 0; i < cpuid->nent; ++i) {
+ if (cpuid->entries[i].function == function &&
+ cpuid->entries[i].index == index) {
+ return &cpuid->entries[i];
+ }
+ }
+ /* not found: */
+ return NULL;
+}
+
uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
uint32_t index, int reg)
{
struct kvm_cpuid2 *cpuid;
- int i, max;
uint32_t ret = 0;
uint32_t cpuid_1_edx;
- int has_kvm_features = 0;
+ bool found = false;
- max = 1;
- while ((cpuid = try_get_cpuid(s, max)) == NULL) {
- max *= 2;
+ cpuid = get_supported_cpuid(s);
+
+ struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
+ if (entry) {
+ found = true;
+ ret = cpuid_entry_get_reg(entry, reg);
}
- for (i = 0; i < cpuid->nent; ++i) {
- if (cpuid->entries[i].function == function &&
- cpuid->entries[i].index == index) {
- if (cpuid->entries[i].function == KVM_CPUID_FEATURES) {
- has_kvm_features = 1;
- }
- switch (reg) {
- case R_EAX:
- ret = cpuid->entries[i].eax;
- break;
- case R_EBX:
- ret = cpuid->entries[i].ebx;
- break;
- case R_ECX:
- ret = cpuid->entries[i].ecx;
- break;
- case R_EDX:
- ret = cpuid->entries[i].edx;
- switch (function) {
- case 1:
- /* KVM before 2.6.30 misreports the following features */
- ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
- break;
- case 0x80000001:
- /* On Intel, kvm returns cpuid according to the Intel spec,
- * so add missing bits according to the AMD spec:
- */
- cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
- ret |= cpuid_1_edx & 0x183f7ff;
- break;
- }
- break;
- }
+ /* Fixups for the data returned by KVM, below */
+
+ if (function == 1 && reg == R_EDX) {
+ /* KVM before 2.6.30 misreports the following features */
+ ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
+ } else if (function == 1 && reg == R_ECX) {
+ /* We can set the hypervisor flag, even if KVM does not return it on
+ * GET_SUPPORTED_CPUID
+ */
+ ret |= CPUID_EXT_HYPERVISOR;
+ /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
+ * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
+ * and the irqchip is in the kernel.
+ */
+ if (kvm_irqchip_in_kernel() &&
+ kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
+ ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
}
+
+ /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
+ * without the in-kernel irqchip
+ */
+ if (!kvm_irqchip_in_kernel()) {
+ ret &= ~CPUID_EXT_X2APIC;
+ }
+ } else if (function == 0x80000001 && reg == R_EDX) {
+ /* On Intel, kvm returns cpuid according to the Intel spec,
+ * so add missing bits according to the AMD spec:
+ */
+ cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
+ ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
}
g_free(cpuid);
/* fallback for older kernels */
- if (!has_kvm_features && (function == KVM_CPUID_FEATURES)) {
+ if ((function == KVM_CPUID_FEATURES) && !found) {
ret = get_para_features(s);
}
@@ -228,8 +280,9 @@ static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
return -ENOSYS;
}
-static void kvm_mce_inject(CPUX86State *env, target_phys_addr_t paddr, int code)
+static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
{
+ CPUX86State *env = &cpu->env;
uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
uint64_t mcg_status = MCG_STATUS_MCIP;
@@ -241,7 +294,7 @@ static void kvm_mce_inject(CPUX86State *env, target_phys_addr_t paddr, int code)
status |= 0xc0;
mcg_status |= MCG_STATUS_RIPV;
}
- cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr,
+ cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
(MCM_ADDR_PHYS << 6) | 0xc,
cpu_x86_support_mca_broadcast(env) ?
MCE_INJECT_BROADCAST : 0);
@@ -255,8 +308,9 @@ static void hardware_memory_error(void)
int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
ram_addr_t ram_addr;
- target_phys_addr_t paddr;
+ hwaddr paddr;
if ((env->mcg_cap & MCG_SER_P) && addr
&& (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
@@ -272,7 +326,7 @@ int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr)
}
}
kvm_hwpoison_page_add(ram_addr);
- kvm_mce_inject(env, paddr, code);
+ kvm_mce_inject(cpu, paddr, code);
} else {
if (code == BUS_MCEERR_AO) {
return 0;
@@ -289,7 +343,7 @@ int kvm_arch_on_sigbus(int code, void *addr)
{
if ((first_cpu->mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
ram_addr_t ram_addr;
- target_phys_addr_t paddr;
+ hwaddr paddr;
/* Hope we are lucky for AO MCE */
if (qemu_ram_addr_from_host(addr, &ram_addr) ||
@@ -300,7 +354,7 @@ int kvm_arch_on_sigbus(int code, void *addr)
return 0;
}
kvm_hwpoison_page_add(ram_addr);
- kvm_mce_inject(first_cpu, paddr, code);
+ kvm_mce_inject(x86_env_get_cpu(first_cpu), paddr, code);
} else {
if (code == BUS_MCEERR_AO) {
return 0;
@@ -358,31 +412,12 @@ int kvm_arch_init_vcpu(CPUX86State *env)
struct kvm_cpuid2 cpuid;
struct kvm_cpuid_entry2 entries[100];
} QEMU_PACKED cpuid_data;
- KVMState *s = env->kvm_state;
uint32_t limit, i, j, cpuid_i;
uint32_t unused;
struct kvm_cpuid_entry2 *c;
uint32_t signature[3];
int r;
- env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
-
- i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
- j = env->cpuid_ext_features & CPUID_EXT_TSC_DEADLINE_TIMER;
- env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
- env->cpuid_ext_features |= i;
- if (j && kvm_irqchip_in_kernel() &&
- kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
- env->cpuid_ext_features |= CPUID_EXT_TSC_DEADLINE_TIMER;
- }
-
- env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
- 0, R_EDX);
- env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
- 0, R_ECX);
- env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A,
- 0, R_EDX);
-
cpuid_i = 0;
/* Paravirtualization CPUIDs */
@@ -403,8 +438,7 @@ int kvm_arch_init_vcpu(CPUX86State *env)
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_FEATURES;
- c->eax = env->cpuid_kvm_features &
- kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX);
+ c->eax = env->cpuid_kvm_features;
if (hyperv_enabled()) {
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
@@ -525,8 +559,6 @@ int kvm_arch_init_vcpu(CPUX86State *env)
/* Call Centaur's CPUID instructions they are supported. */
if (env->cpuid_xlevel2 > 0) {
- env->cpuid_ext4_features &=
- kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX);
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
for (i = 0xC0000000; i <= limit; i++) {
@@ -1364,8 +1396,9 @@ static int kvm_put_mp_state(CPUX86State *env)
return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
}
-static int kvm_get_mp_state(CPUX86State *env)
+static int kvm_get_mp_state(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
struct kvm_mp_state mp_state;
int ret;
@@ -1551,9 +1584,10 @@ static int kvm_get_debugregs(CPUX86State *env)
int kvm_arch_put_registers(CPUX86State *env, int level)
{
+ CPUState *cpu = ENV_GET_CPU(env);
int ret;
- assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
+ assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
ret = kvm_getput_regs(env, 1);
if (ret < 0) {
@@ -1608,9 +1642,10 @@ int kvm_arch_put_registers(CPUX86State *env, int level)
int kvm_arch_get_registers(CPUX86State *env)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
int ret;
- assert(cpu_is_stopped(env) || qemu_cpu_is_self(env));
+ assert(cpu_is_stopped(CPU(cpu)) || qemu_cpu_is_self(CPU(cpu)));
ret = kvm_getput_regs(env, 0);
if (ret < 0) {
@@ -1632,7 +1667,7 @@ int kvm_arch_get_registers(CPUX86State *env)
if (ret < 0) {
return ret;
}
- ret = kvm_get_mp_state(env);
+ ret = kvm_get_mp_state(cpu);
if (ret < 0) {
return ret;
}
@@ -1780,8 +1815,10 @@ int kvm_arch_process_async_events(CPUX86State *env)
return env->halted;
}
-static int kvm_handle_halt(CPUX86State *env)
+static int kvm_handle_halt(X86CPU *cpu)
{
+ CPUX86State *env = &cpu->env;
+
if (!((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
!(env->interrupt_request & CPU_INTERRUPT_NMI)) {
@@ -1908,14 +1945,15 @@ void kvm_arch_remove_all_hw_breakpoints(void)
static CPUWatchpoint hw_watchpoint;
-static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
+static int kvm_handle_debug(CPUX86State *env,
+ struct kvm_debug_exit_arch *arch_info)
{
int ret = 0;
int n;
if (arch_info->exception == 1) {
if (arch_info->dr6 & (1 << 14)) {
- if (cpu_single_env->singlestep_enabled) {
+ if (env->singlestep_enabled) {
ret = EXCP_DEBUG;
}
} else {
@@ -1927,13 +1965,13 @@ static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
break;
case 0x1:
ret = EXCP_DEBUG;
- cpu_single_env->watchpoint_hit = &hw_watchpoint;
+ env->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_WRITE;
break;
case 0x3:
ret = EXCP_DEBUG;
- cpu_single_env->watchpoint_hit = &hw_watchpoint;
+ env->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_breakpoint[n].addr;
hw_watchpoint.flags = BP_MEM_ACCESS;
break;
@@ -1941,16 +1979,16 @@ static int kvm_handle_debug(struct kvm_debug_exit_arch *arch_info)
}
}
}
- } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) {
+ } else if (kvm_find_sw_breakpoint(env, arch_info->pc)) {
ret = EXCP_DEBUG;
}
if (ret == 0) {
- cpu_synchronize_state(cpu_single_env);
- assert(cpu_single_env->exception_injected == -1);
+ cpu_synchronize_state(env);
+ assert(env->exception_injected == -1);
/* pass to guest */
- cpu_single_env->exception_injected = arch_info->exception;
- cpu_single_env->has_error_code = 0;
+ env->exception_injected = arch_info->exception;
+ env->has_error_code = 0;
}
return ret;
@@ -1995,13 +2033,14 @@ static bool host_supports_vmx(void)
int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
uint64_t code;
int ret;
switch (run->exit_reason) {
case KVM_EXIT_HLT:
DPRINTF("handle_hlt\n");
- ret = kvm_handle_halt(env);
+ ret = kvm_handle_halt(cpu);
break;
case KVM_EXIT_SET_TPR:
ret = 0;
@@ -2033,7 +2072,7 @@ int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run)
break;
case KVM_EXIT_DEBUG:
DPRINTF("kvm_exit_debug\n");
- ret = kvm_handle_debug(&run->debug.arch);
+ ret = kvm_handle_debug(env, &run->debug.arch);
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
@@ -2068,3 +2107,143 @@ void kvm_arch_init_irq_routing(KVMState *s)
kvm_msi_via_irqfd_allowed = true;
kvm_gsi_routing_allowed = true;
}
+
+/* Classic KVM device assignment interface. Will remain x86 only. */
+int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
+ uint32_t flags, uint32_t *dev_id)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .segnr = dev_addr->domain,
+ .busnr = dev_addr->bus,
+ .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
+ .flags = flags,
+ };
+ int ret;
+
+ dev_data.assigned_dev_id =
+ (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
+
+ ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ *dev_id = dev_data.assigned_dev_id;
+
+ return 0;
+}
+
+int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .assigned_dev_id = dev_id,
+ };
+
+ return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
+}
+
+static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
+ uint32_t irq_type, uint32_t guest_irq)
+{
+ struct kvm_assigned_irq assigned_irq = {
+ .assigned_dev_id = dev_id,
+ .guest_irq = guest_irq,
+ .flags = irq_type,
+ };
+
+ if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
+ return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
+ } else {
+ return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
+ }
+}
+
+int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
+ uint32_t guest_irq)
+{
+ uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
+ (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
+
+ return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
+}
+
+int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .assigned_dev_id = dev_id,
+ .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
+}
+
+static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
+ uint32_t type)
+{
+ struct kvm_assigned_irq assigned_irq = {
+ .assigned_dev_id = dev_id,
+ .flags = type,
+ };
+
+ return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
+}
+
+int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
+ (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
+}
+
+int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
+{
+ return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
+ KVM_DEV_IRQ_GUEST_MSI, virq);
+}
+
+int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
+ KVM_DEV_IRQ_HOST_MSI);
+}
+
+bool kvm_device_msix_supported(KVMState *s)
+{
+ /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
+ * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
+}
+
+int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
+ uint32_t nr_vectors)
+{
+ struct kvm_assigned_msix_nr msix_nr = {
+ .assigned_dev_id = dev_id,
+ .entry_nr = nr_vectors,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
+}
+
+int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
+ int virq)
+{
+ struct kvm_assigned_msix_entry msix_entry = {
+ .assigned_dev_id = dev_id,
+ .gsi = virq,
+ .entry = vector,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
+}
+
+int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
+ KVM_DEV_IRQ_GUEST_MSIX, 0);
+}
+
+int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
+ KVM_DEV_IRQ_HOST_MSIX);
+}
diff --git a/target-i386/kvm_i386.h b/target-i386/kvm_i386.h
index b82bbf4..f6ab82f 100644
--- a/target-i386/kvm_i386.h
+++ b/target-i386/kvm_i386.h
@@ -11,6 +11,28 @@
#ifndef QEMU_KVM_I386_H
#define QEMU_KVM_I386_H
+#include "kvm.h"
+
bool kvm_allows_irq0_override(void);
+int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
+ uint32_t flags, uint32_t *dev_id);
+int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id);
+
+int kvm_device_intx_assign(KVMState *s, uint32_t dev_id,
+ bool use_host_msi, uint32_t guest_irq);
+int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked);
+int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi);
+
+int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq);
+int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id);
+
+bool kvm_device_msix_supported(KVMState *s);
+int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
+ uint32_t nr_vectors);
+int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
+ int virq);
+int kvm_device_msix_assign(KVMState *s, uint32_t dev_id);
+int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id);
+
#endif
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
index 5fff8d5..ff93374 100644
--- a/target-i386/seg_helper.c
+++ b/target-i386/seg_helper.c
@@ -31,7 +31,7 @@
#ifdef DEBUG_PCALL
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
# define LOG_PCALL_STATE(env) \
- log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
+ log_cpu_state_mask(CPU_LOG_PCALL, (env), CPU_DUMP_CCOP)
#else
# define LOG_PCALL(...) do { } while (0)
# define LOG_PCALL_STATE(env) do { } while (0)
@@ -1177,7 +1177,7 @@ static void do_interrupt_all(CPUX86State *env, int intno, int is_int,
qemu_log(" EAX=" TARGET_FMT_lx, EAX);
}
qemu_log("\n");
- log_cpu_state(env, X86_DUMP_CCOP);
+ log_cpu_state(env, CPU_DUMP_CCOP);
#if 0
{
int i;
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index 8b04eb2..eea2fe9 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -47,7 +47,7 @@ void do_smm_enter(CPUX86State *env)
int i, offset;
qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
- log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
+ log_cpu_state_mask(CPU_LOG_INT, env, CPU_DUMP_CCOP);
env->hflags |= HF_SMM_MASK;
cpu_smm_update(env);
@@ -295,7 +295,7 @@ void helper_rsm(CPUX86State *env)
cpu_smm_update(env);
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
- log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
+ log_cpu_state_mask(CPU_LOG_INT, env, CPU_DUMP_CCOP);
}
#endif /* !CONFIG_USER_ONLY */
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
index 4943c37..a238d95 100644
--- a/target-i386/svm_helper.c
+++ b/target-i386/svm_helper.c
@@ -85,7 +85,7 @@ void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
}
#else
-static inline void svm_save_seg(CPUX86State *env, target_phys_addr_t addr,
+static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
const SegmentCache *sc)
{
stw_phys(addr + offsetof(struct vmcb_seg, selector),
@@ -98,7 +98,7 @@ static inline void svm_save_seg(CPUX86State *env, target_phys_addr_t addr,
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
}
-static inline void svm_load_seg(CPUX86State *env, target_phys_addr_t addr,
+static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
SegmentCache *sc)
{
unsigned int flags;
@@ -110,7 +110,7 @@ static inline void svm_load_seg(CPUX86State *env, target_phys_addr_t addr,
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
}
-static inline void svm_load_seg_cache(CPUX86State *env, target_phys_addr_t addr,
+static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
int seg_reg)
{
SegmentCache sc1, *sc = &sc1;
diff --git a/target-i386/translate.c b/target-i386/translate.c
index eb0cabc..8e676ba 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -107,6 +107,7 @@ typedef struct DisasContext {
int cpuid_ext_features;
int cpuid_ext2_features;
int cpuid_ext3_features;
+ int cpuid_7_0_ebx_features;
} DisasContext;
static void gen_eob(DisasContext *s);
@@ -2017,7 +2018,8 @@ static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
}
}
-static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
+static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
+ int *reg_ptr, int *offset_ptr)
{
target_long disp;
int havesib;
@@ -2043,7 +2045,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
if (base == 4) {
havesib = 1;
- code = cpu_ldub_code(cpu_single_env, s->pc++);
+ code = cpu_ldub_code(env, s->pc++);
scale = (code >> 6) & 3;
index = ((code >> 3) & 7) | REX_X(s);
base = (code & 7);
@@ -2054,7 +2056,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
case 0:
if ((base & 7) == 5) {
base = -1;
- disp = (int32_t)cpu_ldl_code(cpu_single_env, s->pc);
+ disp = (int32_t)cpu_ldl_code(env, s->pc);
s->pc += 4;
if (CODE64(s) && !havesib) {
disp += s->pc + s->rip_offset;
@@ -2064,11 +2066,11 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
}
break;
case 1:
- disp = (int8_t)cpu_ldub_code(cpu_single_env, s->pc++);
+ disp = (int8_t)cpu_ldub_code(env, s->pc++);
break;
default:
case 2:
- disp = (int32_t)cpu_ldl_code(cpu_single_env, s->pc);
+ disp = (int32_t)cpu_ldl_code(env, s->pc);
s->pc += 4;
break;
}
@@ -2131,7 +2133,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
switch (mod) {
case 0:
if (rm == 6) {
- disp = cpu_lduw_code(cpu_single_env, s->pc);
+ disp = cpu_lduw_code(env, s->pc);
s->pc += 2;
gen_op_movl_A0_im(disp);
rm = 0; /* avoid SS override */
@@ -2141,11 +2143,11 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
}
break;
case 1:
- disp = (int8_t)cpu_ldub_code(cpu_single_env, s->pc++);
+ disp = (int8_t)cpu_ldub_code(env, s->pc++);
break;
default:
case 2:
- disp = cpu_lduw_code(cpu_single_env, s->pc);
+ disp = cpu_lduw_code(env, s->pc);
s->pc += 2;
break;
}
@@ -2201,7 +2203,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
*offset_ptr = disp;
}
-static void gen_nop_modrm(DisasContext *s, int modrm)
+static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
{
int mod, rm, base, code;
@@ -2215,7 +2217,7 @@ static void gen_nop_modrm(DisasContext *s, int modrm)
base = rm;
if (base == 4) {
- code = cpu_ldub_code(cpu_single_env, s->pc++);
+ code = cpu_ldub_code(env, s->pc++);
base = (code & 7);
}
@@ -2275,7 +2277,8 @@ static void gen_add_A0_ds_seg(DisasContext *s)
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
OR_TMP0 */
-static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
+static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
+ int ot, int reg, int is_store)
{
int mod, rm, opreg, disp;
@@ -2292,7 +2295,7 @@ static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_s
gen_op_mov_reg_T0(ot, reg);
}
} else {
- gen_lea_modrm(s, modrm, &opreg, &disp);
+ gen_lea_modrm(env, s, modrm, &opreg, &disp);
if (is_store) {
if (reg != OR_TMP0)
gen_op_mov_TN_reg(ot, 0, reg);
@@ -2305,22 +2308,22 @@ static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_s
}
}
-static inline uint32_t insn_get(DisasContext *s, int ot)
+static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
{
uint32_t ret;
switch(ot) {
case OT_BYTE:
- ret = cpu_ldub_code(cpu_single_env, s->pc);
+ ret = cpu_ldub_code(env, s->pc);
s->pc++;
break;
case OT_WORD:
- ret = cpu_lduw_code(cpu_single_env, s->pc);
+ ret = cpu_lduw_code(env, s->pc);
s->pc += 2;
break;
default:
case OT_LONG:
- ret = cpu_ldl_code(cpu_single_env, s->pc);
+ ret = cpu_ldl_code(env, s->pc);
s->pc += 4;
break;
}
@@ -3166,7 +3169,8 @@ static const struct SSEOpHelper_eppi sse_op_table7[256] = {
[0x63] = SSE42_OP(pcmpistri),
};
-static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
+static void gen_sse(CPUX86State *env, DisasContext *s, int b,
+ target_ulong pc_start, int rex_r)
{
int b1, op1_offset, op2_offset, is_xmm, val, ot;
int modrm, mod, rm, reg, reg_addr, offset_addr;
@@ -3229,7 +3233,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
gen_helper_enter_mmx(cpu_env);
}
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7);
if (is_xmm)
reg |= rex_r;
@@ -3240,7 +3244,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x0e7: /* movntq */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
break;
case 0x1e7: /* movntdq */
@@ -3248,20 +3252,20 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x12b: /* movntps */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
break;
case 0x3f0: /* lddqu */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
break;
case 0x22b: /* movntss */
case 0x32b: /* movntsd */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (b1 & 1) {
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
xmm_regs[reg]));
@@ -3274,12 +3278,12 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x6e: /* movd mm, ea */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
} else
#endif
{
- gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
@@ -3289,14 +3293,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x16e: /* movd xmm, ea */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
- gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
} else
#endif
{
- gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
@@ -3305,7 +3309,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x6f: /* movq mm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
} else {
rm = (modrm & 7);
@@ -3322,7 +3326,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x16f: /* movdqa xmm, ea */
case 0x26f: /* movdqu xmm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3332,7 +3336,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x210: /* movss xmm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_op_movl_T0_0();
@@ -3347,7 +3351,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x310: /* movsd xmm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
gen_op_movl_T0_0();
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
@@ -3361,7 +3365,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x012: /* movlps */
case 0x112: /* movlpd */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
/* movhlps */
@@ -3372,7 +3376,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x212: /* movsldup */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3388,7 +3392,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x312: /* movddup */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3401,7 +3405,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x016: /* movhps */
case 0x116: /* movhpd */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
} else {
/* movlhps */
@@ -3412,7 +3416,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x216: /* movshdup */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3433,8 +3437,8 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (b1 == 1 && reg != 0)
goto illegal_op;
- field_length = cpu_ldub_code(cpu_single_env, s->pc++) & 0x3F;
- bit_index = cpu_ldub_code(cpu_single_env, s->pc++) & 0x3F;
+ field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
+ bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
offsetof(CPUX86State,xmm_regs[reg]));
if (b1 == 1)
@@ -3452,13 +3456,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (s->dflag == 2) {
tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx));
- gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
- gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
}
break;
case 0x17e: /* movd ea, xmm */
@@ -3466,18 +3470,18 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (s->dflag == 2) {
tcg_gen_ld_i64(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
- gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
} else
#endif
{
tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
- gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
}
break;
case 0x27e: /* movq xmm, ea */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3488,7 +3492,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x7f: /* movq ea, mm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
} else {
rm = (modrm & 7);
@@ -3503,7 +3507,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x17f: /* movdqa ea, xmm */
case 0x27f: /* movdqu ea, xmm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3513,7 +3517,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x211: /* movss ea, xmm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
gen_op_st_T0_A0(OT_LONG + s->mem_index);
} else {
@@ -3524,7 +3528,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x311: /* movsd ea, xmm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3535,7 +3539,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x013: /* movlps */
case 0x113: /* movlpd */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
goto illegal_op;
@@ -3544,7 +3548,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x017: /* movhps */
case 0x117: /* movhpd */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
} else {
goto illegal_op;
@@ -3559,7 +3563,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (b1 >= 2) {
goto illegal_op;
}
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (is_xmm) {
gen_op_movl_T0_im(val);
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
@@ -3609,7 +3613,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x12a: /* cvtpi2pd */
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(s->mem_index, op2_offset);
} else {
@@ -3632,7 +3636,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x22a: /* cvtsi2ss */
case 0x32a: /* cvtsi2sd */
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
if (ot == OT_LONG) {
@@ -3654,7 +3658,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x12d: /* cvtpd2pi */
gen_helper_enter_mmx(cpu_env);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
op2_offset = offsetof(CPUX86State,xmm_t0);
gen_ldo_env_A0(s->mem_index, op2_offset);
} else {
@@ -3685,7 +3689,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x32d: /* cvtsd2si */
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if ((b >> 8) & 1) {
gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
} else {
@@ -3717,8 +3721,8 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0xc4: /* pinsrw */
case 0x1c4:
s->rip_offset = 1;
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
+ val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
tcg_gen_st16_tl(cpu_T[0], cpu_env,
@@ -3734,7 +3738,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (mod != 3)
goto illegal_op;
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (b1) {
val &= 7;
rm = (modrm & 7) | REX_B(s);
@@ -3751,7 +3755,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x1d6: /* movq ea, xmm */
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
} else {
rm = (modrm & 7) | REX_B(s);
@@ -3795,7 +3799,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
goto crc32;
case 0x038:
b = modrm;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
@@ -3816,7 +3820,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
} else {
op2_offset = offsetof(CPUX86State,xmm_t0);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
switch (b) {
case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
@@ -3851,7 +3855,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
} else {
op2_offset = offsetof(CPUX86State,mmx_t0);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, op2_offset);
}
}
@@ -3869,7 +3873,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x338: /* crc32 */
crc32:
b = modrm;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (b != 0xf0 && b != 0xf1)
@@ -3889,7 +3893,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
gen_op_mov_TN_reg(OT_LONG, 0, reg);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
cpu_T[0], tcg_const_i32(8 << ot));
@@ -3899,7 +3903,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x03a:
case 0x13a:
b = modrm;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
rm = modrm & 7;
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
@@ -3918,9 +3922,9 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
rm = (modrm & 7) | REX_B(s);
if (mod != 3)
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
reg = ((modrm >> 3) & 7) | rex_r;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
switch (b) {
case 0x14: /* pextrb */
tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
@@ -4050,7 +4054,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
} else {
op2_offset = offsetof(CPUX86State,xmm_t0);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldo_env_A0(s->mem_index, op2_offset);
}
} else {
@@ -4059,11 +4063,11 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
} else {
op2_offset = offsetof(CPUX86State,mmx_t0);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_ldq_env_A0(s->mem_index, op2_offset);
}
}
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
s->cc_op = CC_OP_EFLAGS;
@@ -4094,7 +4098,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (is_xmm) {
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
op2_offset = offsetof(CPUX86State,xmm_t0);
if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
b == 0xc2)) {
@@ -4117,7 +4121,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
} else {
op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
op2_offset = offsetof(CPUX86State,mmx_t0);
gen_ldq_env_A0(s->mem_index, op2_offset);
} else {
@@ -4129,7 +4133,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
case 0x0f: /* 3DNow! data insns */
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
goto illegal_op;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
sse_fn_epp = sse_op_table5[val];
if (!sse_fn_epp) {
goto illegal_op;
@@ -4140,7 +4144,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0x70: /* pshufx insn */
case 0xc6: /* pshufx insn */
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
/* XXX: introduce a new table? */
@@ -4149,7 +4153,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
break;
case 0xc2:
/* compare insns */
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (val >= 8)
goto illegal_op;
sse_fn_epp = sse_op_table4[val][b1];
@@ -4194,7 +4198,8 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
/* convert one instruction. s->is_jmp is set if the translation must
be stopped. Return the next pc value */
-static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
+static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
+ target_ulong pc_start)
{
int b, prefixes, aflag, dflag;
int shift, ot;
@@ -4202,8 +4207,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
target_ulong next_eip, tval;
int rex_w, rex_r;
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(pc_start);
+ }
s->pc = pc_start;
prefixes = 0;
aflag = s->code32;
@@ -4218,7 +4224,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
#endif
s->rip_offset = 0; /* for relative ip address */
next_byte:
- b = cpu_ldub_code(cpu_single_env, s->pc);
+ b = cpu_ldub_code(env, s->pc);
s->pc++;
/* check prefixes */
#ifdef TARGET_X86_64
@@ -4333,7 +4339,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x0f:
/**************************/
/* extended op code */
- b = cpu_ldub_code(cpu_single_env, s->pc++) | 0x100;
+ b = cpu_ldub_code(env, s->pc++) | 0x100;
goto reswitch;
/**************************/
@@ -4358,12 +4364,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
switch(f) {
case 0: /* OP Ev, Gv */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
opreg = OR_TMP0;
} else if (op == OP_XORL && rm == reg) {
xor_zero:
@@ -4380,12 +4386,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op(s, op, ot, opreg);
break;
case 1: /* OP Gv, Ev */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
reg = ((modrm >> 3) & 7) | rex_r;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T1_A0(ot + s->mem_index);
} else if (op == OP_XORL && rm == reg) {
goto xor_zero;
@@ -4395,7 +4401,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op(s, op, ot, reg);
break;
case 2: /* OP A, Iv */
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
gen_op(s, op, ot, OR_EAX);
break;
@@ -4417,7 +4423,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
@@ -4427,7 +4433,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
s->rip_offset = 1;
else
s->rip_offset = insn_const_size(ot);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = rm;
@@ -4438,10 +4444,10 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x80:
case 0x81:
case 0x82:
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
break;
case 0x83:
- val = (int8_t)insn_get(s, OT_BYTE);
+ val = (int8_t)insn_get(env, s, OT_BYTE);
break;
}
gen_op_movl_T1_im(val);
@@ -4466,14 +4472,14 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
if (mod != 3) {
if (op == 0)
s->rip_offset = insn_const_size(ot);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_TN_reg(ot, 0, rm);
@@ -4481,7 +4487,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
switch(op) {
case 0: /* test */
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
gen_op_testl_T0_T1_cc();
s->cc_op = CC_OP_LOGICB + ot;
@@ -4698,7 +4704,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
op = (modrm >> 3) & 7;
@@ -4717,7 +4723,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
}
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (op >= 2 && op != 3 && op != 5)
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
@@ -4810,10 +4816,10 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_TN_reg(ot, 1, reg);
gen_op_testl_T0_T1_cc();
s->cc_op = CC_OP_LOGICB + ot;
@@ -4825,7 +4831,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_mov_TN_reg(ot, 0, OR_EAX);
gen_op_movl_T1_im(val);
@@ -4875,18 +4881,18 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x69: /* imul Gv, Ev, I */
case 0x6b:
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
if (b == 0x69)
s->rip_offset = insn_const_size(ot);
else if (b == 0x6b)
s->rip_offset = 1;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
if (b == 0x69) {
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_movl_T1_im(val);
} else if (b == 0x6b) {
- val = (int8_t)insn_get(s, OT_BYTE);
+ val = (int8_t)insn_get(env, s, OT_BYTE);
gen_op_movl_T1_im(val);
} else {
gen_op_mov_TN_reg(ot, 1, reg);
@@ -4939,7 +4945,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
@@ -4950,7 +4956,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_mov_reg_T1(ot, reg);
gen_op_mov_reg_T0(ot, rm);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_mov_TN_reg(ot, 0, reg);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_op_addl_T0_T1();
@@ -4970,7 +4976,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
t0 = tcg_temp_local_new();
@@ -4982,7 +4988,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
rm = (modrm & 7) | REX_B(s);
gen_op_mov_v_reg(ot, t0, rm);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
tcg_gen_mov_tl(a0, cpu_A0);
gen_op_ld_v(ot + s->mem_index, t0, a0);
rm = 0; /* avoid warning */
@@ -5018,7 +5024,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x1c7: /* cmpxchg8b */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if ((mod == 3) || ((modrm & 0x38) != 0x8))
goto illegal_op;
@@ -5029,7 +5035,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_jmp_im(pc_start - s->cs_base);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_helper_cmpxchg16b(cpu_env, cpu_A0);
} else
#endif
@@ -5039,7 +5045,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_jmp_im(pc_start - s->cs_base);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_helper_cmpxchg8b(cpu_env, cpu_A0);
}
s->cc_op = CC_OP_EFLAGS;
@@ -5080,9 +5086,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = dflag + OT_WORD;
}
if (b == 0x68)
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
else
- val = (int8_t)insn_get(s, OT_BYTE);
+ val = (int8_t)insn_get(env, s, OT_BYTE);
gen_op_movl_T0_im(val);
gen_push_T0(s);
break;
@@ -5092,7 +5098,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
ot = dflag + OT_WORD;
}
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
gen_pop_T0(s);
if (mod == 3) {
@@ -5103,7 +5109,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 1 << ot;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
s->popl_esp_hack = 0;
gen_pop_update(s);
}
@@ -5111,9 +5117,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0xc8: /* enter */
{
int level;
- val = cpu_lduw_code(cpu_single_env, s->pc);
+ val = cpu_lduw_code(env, s->pc);
s->pc += 2;
- level = cpu_ldub_code(cpu_single_env, s->pc++);
+ level = cpu_ldub_code(env, s->pc++);
gen_enter(s, val, level);
}
break;
@@ -5193,11 +5199,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
- gen_ldst_modrm(s, modrm, ot, reg, 1);
+ gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0xc6:
case 0xc7: /* mov Ev, Iv */
@@ -5205,13 +5211,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod != 3) {
s->rip_offset = insn_const_size(ot);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
}
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
gen_op_movl_T0_im(val);
if (mod != 3)
gen_op_st_T0_A0(ot + s->mem_index);
@@ -5224,18 +5230,18 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = OT_WORD + dflag;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_op_mov_reg_T0(ot, reg);
break;
case 0x8e: /* mov seg, Gv */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
if (reg >= 6 || reg == R_CS)
goto illegal_op;
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace */
@@ -5251,7 +5257,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x8c: /* mov Gv, seg */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (reg >= 6)
@@ -5261,7 +5267,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_WORD + dflag;
else
ot = OT_WORD;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 0x1b6: /* movzbS Gv, Eb */
@@ -5274,7 +5280,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
d_ot = dflag + OT_WORD;
/* ot is the size of source */
ot = (b & 1) + OT_BYTE;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
@@ -5298,7 +5304,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
gen_op_mov_reg_T0(d_ot, reg);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (b & 8) {
gen_op_lds_T0_A0(ot + s->mem_index);
} else {
@@ -5311,7 +5317,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x8d: /* lea */
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
@@ -5320,7 +5326,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
s->override = -1;
val = s->addseg;
s->addseg = 0;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
s->addseg = val;
gen_op_mov_reg_A0(ot - OT_WORD, reg);
break;
@@ -5338,16 +5344,16 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = dflag + OT_WORD;
#ifdef TARGET_X86_64
if (s->aflag == 2) {
- offset_addr = cpu_ldq_code(cpu_single_env, s->pc);
+ offset_addr = cpu_ldq_code(env, s->pc);
s->pc += 8;
gen_op_movq_A0_im(offset_addr);
} else
#endif
{
if (s->aflag) {
- offset_addr = insn_get(s, OT_LONG);
+ offset_addr = insn_get(env, s, OT_LONG);
} else {
- offset_addr = insn_get(s, OT_WORD);
+ offset_addr = insn_get(env, s, OT_WORD);
}
gen_op_movl_A0_im(offset_addr);
}
@@ -5385,7 +5391,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_mov_reg_T0(OT_BYTE, R_EAX);
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
- val = insn_get(s, OT_BYTE);
+ val = insn_get(env, s, OT_BYTE);
gen_op_movl_T0_im(val);
gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
break;
@@ -5394,7 +5400,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (dflag == 2) {
uint64_t tmp;
/* 64 bit case */
- tmp = cpu_ldq_code(cpu_single_env, s->pc);
+ tmp = cpu_ldq_code(env, s->pc);
s->pc += 8;
reg = (b & 7) | REX_B(s);
gen_movtl_T0_im(tmp);
@@ -5403,7 +5409,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
#endif
{
ot = dflag ? OT_LONG : OT_WORD;
- val = insn_get(s, ot);
+ val = insn_get(env, s, ot);
reg = (b & 7) | REX_B(s);
gen_op_movl_T0_im(val);
gen_op_mov_reg_T0(ot, reg);
@@ -5422,7 +5428,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3) {
@@ -5433,7 +5439,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_mov_reg_T0(ot, rm);
gen_op_mov_reg_T1(ot, reg);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_mov_TN_reg(ot, 0, reg);
/* for xchg, lock is implicit */
if (!(prefixes & PREFIX_LOCK))
@@ -5465,12 +5471,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
op = R_GS;
do_lxx:
ot = dflag ? OT_LONG : OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T1_A0(ot + s->mem_index);
gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
/* load the segment first to handle exceptions properly */
@@ -5497,7 +5503,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
@@ -5505,7 +5511,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (shift == 2) {
s->rip_offset = 1;
}
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = (modrm & 7) | REX_B(s);
@@ -5516,7 +5522,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_shift(s, op, ot, opreg, OR_ECX);
} else {
if (shift == 2) {
- shift = cpu_ldub_code(cpu_single_env, s->pc++);
+ shift = cpu_ldub_code(env, s->pc++);
}
gen_shifti(s, op, ot, opreg, shift);
}
@@ -5550,12 +5556,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
shift = 0;
do_shiftd:
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
reg = ((modrm >> 3) & 7) | rex_r;
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
opreg = OR_TMP0;
} else {
opreg = rm;
@@ -5563,7 +5569,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_op_mov_TN_reg(ot, 1, reg);
if (shift) {
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
tcg_gen_movi_tl(cpu_T3, val);
} else {
tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
@@ -5580,13 +5586,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
rm = modrm & 7;
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
if (mod != 3) {
/* memory op */
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
switch(op) {
case 0x00 ... 0x07: /* fxxxs */
case 0x10 ... 0x17: /* fixxxl */
@@ -6211,7 +6217,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T0_im(val);
gen_check_io(s, ot, pc_start - s->cs_base,
SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
@@ -6231,7 +6237,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_BYTE;
else
ot = dflag ? OT_LONG : OT_WORD;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T0_im(val);
gen_check_io(s, ot, pc_start - s->cs_base,
svm_is_rep(prefixes));
@@ -6293,7 +6299,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/************************/
/* control */
case 0xc2: /* ret im */
- val = cpu_ldsw_code(cpu_single_env, s->pc);
+ val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
gen_pop_T0(s);
if (CODE64(s) && s->dflag)
@@ -6313,7 +6319,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_eob(s);
break;
case 0xca: /* lret im */
- val = cpu_ldsw_code(cpu_single_env, s->pc);
+ val = cpu_ldsw_code(env, s->pc);
s->pc += 2;
do_lret:
if (s->pe && !s->vm86) {
@@ -6369,9 +6375,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0xe8: /* call im */
{
if (dflag)
- tval = (int32_t)insn_get(s, OT_LONG);
+ tval = (int32_t)insn_get(env, s, OT_LONG);
else
- tval = (int16_t)insn_get(s, OT_WORD);
+ tval = (int16_t)insn_get(env, s, OT_WORD);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (s->dflag == 0)
@@ -6390,8 +6396,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (CODE64(s))
goto illegal_op;
ot = dflag ? OT_LONG : OT_WORD;
- offset = insn_get(s, ot);
- selector = insn_get(s, OT_WORD);
+ offset = insn_get(env, s, ot);
+ selector = insn_get(env, s, OT_WORD);
gen_op_movl_T0_im(selector);
gen_op_movl_T1_imu(offset);
@@ -6399,9 +6405,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
goto do_lcall;
case 0xe9: /* jmp im */
if (dflag)
- tval = (int32_t)insn_get(s, OT_LONG);
+ tval = (int32_t)insn_get(env, s, OT_LONG);
else
- tval = (int16_t)insn_get(s, OT_WORD);
+ tval = (int16_t)insn_get(env, s, OT_WORD);
tval += s->pc - s->cs_base;
if (s->dflag == 0)
tval &= 0xffff;
@@ -6416,28 +6422,28 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (CODE64(s))
goto illegal_op;
ot = dflag ? OT_LONG : OT_WORD;
- offset = insn_get(s, ot);
- selector = insn_get(s, OT_WORD);
+ offset = insn_get(env, s, ot);
+ selector = insn_get(env, s, OT_WORD);
gen_op_movl_T0_im(selector);
gen_op_movl_T1_imu(offset);
}
goto do_ljmp;
case 0xeb: /* jmp Jb */
- tval = (int8_t)insn_get(s, OT_BYTE);
+ tval = (int8_t)insn_get(env, s, OT_BYTE);
tval += s->pc - s->cs_base;
if (s->dflag == 0)
tval &= 0xffff;
gen_jmp(s, tval);
break;
case 0x70 ... 0x7f: /* jcc Jb */
- tval = (int8_t)insn_get(s, OT_BYTE);
+ tval = (int8_t)insn_get(env, s, OT_BYTE);
goto do_jcc;
case 0x180 ... 0x18f: /* jcc Jv */
if (dflag) {
- tval = (int32_t)insn_get(s, OT_LONG);
+ tval = (int32_t)insn_get(env, s, OT_LONG);
} else {
- tval = (int16_t)insn_get(s, OT_WORD);
+ tval = (int16_t)insn_get(env, s, OT_WORD);
}
do_jcc:
next_eip = s->pc - s->cs_base;
@@ -6448,9 +6454,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0x190 ... 0x19f: /* setcc Gv */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
gen_setcc(s, b);
- gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
{
@@ -6458,12 +6464,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
TCGv t0;
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
t0 = tcg_temp_local_new();
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
} else {
rm = (modrm & 7) | REX_B(s);
@@ -6555,7 +6561,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
gen_pop_update(s);
s->cc_op = CC_OP_EFLAGS;
- /* abort translation because TF flag may change */
+ /* abort translation because TF/AC flag may change */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
}
@@ -6616,19 +6622,19 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/* bit operations */
case 0x1ba: /* bt/bts/btr/btc Gv, im */
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
op = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
if (mod != 3) {
s->rip_offset = 1;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T0_A0(ot + s->mem_index);
} else {
gen_op_mov_TN_reg(ot, 0, rm);
}
/* load shift */
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
gen_op_movl_T1_im(val);
if (op < 4)
goto illegal_op;
@@ -6647,13 +6653,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
op = 3;
do_btx:
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
gen_op_mov_TN_reg(OT_LONG, 1, reg);
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
/* specific case: we need to add a displacement */
gen_exts(ot, cpu_T[1]);
tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
@@ -6708,9 +6714,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
TCGv t0;
ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s,modrm, ot, OR_TMP0, 0);
gen_extu(ot, cpu_T[0]);
t0 = tcg_temp_local_new();
tcg_gen_mov_tl(t0, cpu_T[0]);
@@ -6780,7 +6786,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0xd4: /* aam */
if (CODE64(s))
goto illegal_op;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (val == 0) {
gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
} else {
@@ -6791,7 +6797,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0xd5: /* aad */
if (CODE64(s))
goto illegal_op;
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
gen_helper_aad(cpu_env, tcg_const_i32(val));
s->cc_op = CC_OP_LOGICB;
break;
@@ -6825,7 +6831,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
break;
case 0xcd: /* int N */
- val = cpu_ldub_code(cpu_single_env, s->pc++);
+ val = cpu_ldub_code(env, s->pc++);
if (s->vm86 && s->iopl != 3) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
@@ -6847,7 +6853,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_debug(s, pc_start - s->cs_base);
#else
/* start debug */
- tb_flush(cpu_single_env);
+ tb_flush(env);
cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
#endif
break;
@@ -6895,13 +6901,13 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (CODE64(s))
goto illegal_op;
ot = dflag ? OT_LONG : OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
gen_op_mov_TN_reg(ot, 0, reg);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
if (ot == OT_WORD) {
@@ -6942,7 +6948,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
{
int l1, l2, l3;
- tval = (int8_t)insn_get(s, OT_BYTE);
+ tval = (int8_t)insn_get(env, s, OT_BYTE);
next_eip = s->pc - s->cs_base;
tval += next_eip;
if (s->dflag == 0)
@@ -7022,7 +7028,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0x134: /* sysenter */
/* For Intel SYSENTER is valid on 64-bit */
- if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
@@ -7035,7 +7041,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
break;
case 0x135: /* sysexit */
/* For Intel SYSEXIT is valid on 64-bit */
- if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
goto illegal_op;
if (!s->pe) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
@@ -7086,7 +7092,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x100:
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
@@ -7098,7 +7104,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_WORD;
if (mod == 3)
ot += s->dflag;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 2: /* lldt */
if (!s->pe || s->vm86)
@@ -7107,7 +7113,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_lldt(cpu_env, cpu_tmp2_i32);
@@ -7121,7 +7127,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
ot = OT_WORD;
if (mod == 3)
ot += s->dflag;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
break;
case 3: /* ltr */
if (!s->pe || s->vm86)
@@ -7130,7 +7136,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_jmp_im(pc_start - s->cs_base);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
gen_helper_ltr(cpu_env, cpu_tmp2_i32);
@@ -7140,7 +7146,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 5: /* verw */
if (!s->pe || s->vm86)
goto illegal_op;
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (op == 4) {
@@ -7155,7 +7161,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x101:
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
rm = modrm & 7;
@@ -7164,7 +7170,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (mod == 3)
goto illegal_op;
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
gen_op_st_T0_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
@@ -7205,12 +7211,30 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
gen_eob(s);
break;
+ case 2: /* clac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
+ s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_clac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+ case 3: /* stac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
+ s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_stac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
default:
goto illegal_op;
}
} else { /* sidt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
gen_op_st_T0_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
@@ -7312,7 +7336,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
} else {
gen_svm_check_intercept(s, pc_start,
op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_T1_A0(OT_WORD + s->mem_index);
gen_add_A0_im(s, 2);
gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
@@ -7334,14 +7358,14 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
#else
tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
#endif
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
break;
case 6: /* lmsw */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
gen_helper_lmsw(cpu_env, cpu_T[0]);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
@@ -7355,7 +7379,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_helper_invlpg(cpu_env, cpu_A0);
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
@@ -7422,7 +7446,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/* d_ot is the size of destination */
d_ot = dflag + OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
mod = (modrm >> 6) & 3;
rm = (modrm & 7) | REX_B(s);
@@ -7434,7 +7458,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
gen_op_mov_reg_T0(d_ot, reg);
} else {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (d_ot == OT_QUAD) {
gen_op_lds_T0_A0(OT_LONG + s->mem_index);
} else {
@@ -7454,12 +7478,12 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
t1 = tcg_temp_local_new();
t2 = tcg_temp_local_new();
ot = OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = (modrm >> 3) & 7;
mod = (modrm >> 6) & 3;
rm = modrm & 7;
if (mod != 3) {
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
a0 = tcg_temp_local_new();
tcg_gen_mov_tl(a0, cpu_A0);
@@ -7502,9 +7526,9 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (!s->pe || s->vm86)
goto illegal_op;
ot = dflag ? OT_LONG : OT_WORD;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
reg = ((modrm >> 3) & 7) | rex_r;
- gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
t0 = tcg_temp_local_new();
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
@@ -7523,7 +7547,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x118:
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
@@ -7533,24 +7557,24 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 3: /* prefetchnt0 */
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
/* nothing more to do */
break;
default: /* nop (multi byte) */
- gen_nop_modrm(s, modrm);
+ gen_nop_modrm(env, s, modrm);
break;
}
break;
case 0x119 ... 0x11f: /* nop (multi byte) */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
- gen_nop_modrm(s, modrm);
+ modrm = cpu_ldub_code(env, s->pc++);
+ gen_nop_modrm(env, s, modrm);
break;
case 0x120: /* mov reg, crN */
case 0x122: /* mov crN, reg */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
@@ -7596,7 +7620,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
* AMD documentation (24594.pdf) and testing of
* intel 386 and 486 processors all show that the mod bits
@@ -7640,16 +7664,16 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (!(s->cpuid_features & CPUID_SSE2))
goto illegal_op;
ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
/* generate a generic store */
- gen_ldst_modrm(s, modrm, ot, reg, 1);
+ gen_ldst_modrm(env, s, modrm, ot, reg, 1);
break;
case 0x1ae:
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
op = (modrm >> 3) & 7;
switch(op) {
@@ -7661,7 +7685,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
@@ -7675,7 +7699,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(pc_start - s->cs_base);
@@ -7691,7 +7715,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
if (op == 2) {
gen_op_ld_T0_A0(OT_LONG + s->mem_index);
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
@@ -7716,7 +7740,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
/* clflush */
if (!(s->cpuid_features & CPUID_CLFLUSH))
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
}
break;
default:
@@ -7724,11 +7748,11 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
}
break;
case 0x10d: /* 3DNow! prefetch(w) */
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
+ modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
- gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
+ gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
/* ignore for now */
break;
case 0x1aa: /* rsm */
@@ -7747,8 +7771,8 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
goto illegal_op;
- modrm = cpu_ldub_code(cpu_single_env, s->pc++);
- reg = ((modrm >> 3) & 7);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
if (s->prefix & PREFIX_DATA)
ot = OT_WORD;
@@ -7757,7 +7781,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
else
ot = OT_QUAD;
- gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
gen_op_mov_reg_T0(ot, reg);
@@ -7774,7 +7798,7 @@ static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
case 0x1c2:
case 0x1c4 ... 0x1c6:
case 0x1d0 ... 0x1fe:
- gen_sse(s, b, pc_start, rex_r);
+ gen_sse(env, s, b, pc_start, rex_r);
break;
default:
goto illegal_op;
@@ -7900,15 +7924,13 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
/* select memory access functions */
dc->mem_index = 0;
if (flags & HF_SOFTMMU_MASK) {
- if (dc->cpl == 3)
- dc->mem_index = 2 * 4;
- else
- dc->mem_index = 1 * 4;
+ dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
}
dc->cpuid_features = env->cpuid_features;
dc->cpuid_ext_features = env->cpuid_ext_features;
dc->cpuid_ext2_features = env->cpuid_ext2_features;
dc->cpuid_ext3_features = env->cpuid_ext3_features;
+ dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
#ifdef TARGET_X86_64
dc->lma = (flags >> HF_LMA_SHIFT) & 1;
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
@@ -7940,7 +7962,7 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
cpu_ptr0 = tcg_temp_new_ptr();
cpu_ptr1 = tcg_temp_new_ptr();
- gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
@@ -7962,7 +7984,7 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
}
}
if (search_pc) {
- j = gen_opc_ptr - gen_opc_buf;
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
@@ -7976,7 +7998,7 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
- pc_ptr = disas_insn(dc, pc_ptr);
+ pc_ptr = disas_insn(env, dc, pc_ptr);
num_insns++;
/* stop translation if indicated */
if (dc->is_jmp)
@@ -7993,7 +8015,7 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
break;
}
/* if too long translation, stop generation too */
- if (gen_opc_ptr >= gen_opc_end ||
+ if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
(pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
num_insns >= max_insns) {
gen_jmp_im(pc_ptr - dc->cs_base);
@@ -8009,10 +8031,10 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
if (tb->cflags & CF_LAST_IO)
gen_io_end();
gen_icount_end(tb, num_insns);
- *gen_opc_ptr = INDEX_op_end;
+ *tcg_ctx.gen_opc_ptr = INDEX_op_end;
/* we don't forget to fill the last values */
if (search_pc) {
- j = gen_opc_ptr - gen_opc_buf;
+ j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
lj++;
while (lj <= j)
gen_opc_instr_start[lj++] = 0;
@@ -8029,7 +8051,7 @@ static inline void gen_intermediate_code_internal(CPUX86State *env,
else
#endif
disas_flags = !dc->code32;
- log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
+ log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
qemu_log("\n");
}
#endif