summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Cooper <andrew.cooper3@citrix.com>2022-01-25 13:38:42 +0100
committerJan Beulich <jbeulich@suse.com>2022-01-25 13:38:42 +0100
commit20b00921f8a62b1b19d893dd468473161706e02d (patch)
tree52ac18b6db34b6bf13bc62bed0d792fd6216e5b6
parent8509519268ea6a467a97ca7891a0dca2bfe88cf7 (diff)
x86/guest: Introduce {get,set}_reg() infrastructure
Various registers have per-guest-type or per-vendor locations or access requirements. To support their use from common code, provide accessors which allow for per-guest-type behaviour. For now, just infrastructure handling default cases and expectations. Subsequent patches will start handling registers using this infrastructure. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> master commit: 88d3ff7ab15da277a85b39735797293fb541c718 master date: 2022-01-20 16:32:11 +0000
-rw-r--r--xen/arch/x86/hvm/hvm.c22
-rw-r--r--xen/arch/x86/hvm/svm/svm.c30
-rw-r--r--xen/arch/x86/hvm/vmx/vmx.c31
-rw-r--r--xen/arch/x86/pv/emulate.c31
-rw-r--r--xen/include/asm-x86/hvm/hvm.h15
-rw-r--r--xen/include/asm-x86/pv/domain.h4
6 files changed, 133 insertions, 0 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b680f62892..6820527df0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3742,6 +3742,28 @@ gp_fault:
return X86EMUL_EXCEPTION;
}
+uint64_t hvm_get_reg(struct vcpu *v, unsigned int reg)
+{
+ ASSERT(v == current || !vcpu_runnable(v));
+
+ switch ( reg )
+ {
+ default:
+ return alternative_call(hvm_funcs.get_reg, v, reg);
+ }
+}
+
+void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
+{
+ ASSERT(v == current || !vcpu_runnable(v));
+
+ switch ( reg )
+ {
+ default:
+ return alternative_vcall(hvm_funcs.set_reg, v, reg, val);
+ }
+}
+
static bool is_sysdesc_access(const struct x86_emulate_state *state,
const struct x86_emulate_ctxt *ctxt)
{
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index f0e10dec04..74b2b0e092 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2469,6 +2469,33 @@ static bool svm_get_pending_event(struct vcpu *v, struct x86_event *info)
return true;
}
+static uint64_t svm_get_reg(struct vcpu *v, unsigned int reg)
+{
+ struct domain *d = v->domain;
+
+ switch ( reg )
+ {
+ default:
+ printk(XENLOG_G_ERR "%s(%pv, 0x%08x) Bad register\n",
+ __func__, v, reg);
+ domain_crash(d);
+ return 0;
+ }
+}
+
+static void svm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
+{
+ struct domain *d = v->domain;
+
+ switch ( reg )
+ {
+ default:
+ printk(XENLOG_G_ERR "%s(%pv, 0x%08x, 0x%016"PRIx64") Bad register\n",
+ __func__, v, reg, val);
+ domain_crash(d);
+ }
+}
+
static struct hvm_function_table __initdata svm_function_table = {
.name = "SVM",
.cpu_up_prepare = svm_cpu_up_prepare,
@@ -2518,6 +2545,9 @@ static struct hvm_function_table __initdata svm_function_table = {
.nhvm_intr_blocked = nsvm_intr_blocked,
.nhvm_hap_walk_L1_p2m = nsvm_hap_walk_L1_p2m,
+ .get_reg = svm_get_reg,
+ .set_reg = svm_set_reg,
+
.tsc_scaling = {
.max_ratio = ~TSC_RATIO_RSVD_BITS,
},
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d403e2d806..6f81751921 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2404,6 +2404,33 @@ static int vmtrace_reset(struct vcpu *v)
return 0;
}
+static uint64_t vmx_get_reg(struct vcpu *v, unsigned int reg)
+{
+ struct domain *d = v->domain;
+
+ switch ( reg )
+ {
+ default:
+ printk(XENLOG_G_ERR "%s(%pv, 0x%08x) Bad register\n",
+ __func__, v, reg);
+ domain_crash(d);
+ return 0;
+ }
+}
+
+static void vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
+{
+ struct domain *d = v->domain;
+
+ switch ( reg )
+ {
+ default:
+ printk(XENLOG_G_ERR "%s(%pv, 0x%08x, 0x%016"PRIx64") Bad register\n",
+ __func__, v, reg, val);
+ domain_crash(d);
+ }
+}
+
static struct hvm_function_table __initdata vmx_function_table = {
.name = "VMX",
.cpu_up_prepare = vmx_cpu_up_prepare,
@@ -2464,6 +2491,10 @@ static struct hvm_function_table __initdata vmx_function_table = {
.vmtrace_set_option = vmtrace_set_option,
.vmtrace_get_option = vmtrace_get_option,
.vmtrace_reset = vmtrace_reset,
+
+ .get_reg = vmx_get_reg,
+ .set_reg = vmx_set_reg,
+
.tsc_scaling = {
.max_ratio = VMX_TSC_MULTIPLIER_MAX,
},
diff --git a/xen/arch/x86/pv/emulate.c b/xen/arch/x86/pv/emulate.c
index e8bb326efd..ae049b60f2 100644
--- a/xen/arch/x86/pv/emulate.c
+++ b/xen/arch/x86/pv/emulate.c
@@ -90,6 +90,37 @@ void pv_emul_instruction_done(struct cpu_user_regs *regs, unsigned long rip)
}
}
+uint64_t pv_get_reg(struct vcpu *v, unsigned int reg)
+{
+ struct domain *d = v->domain;
+
+ ASSERT(v == current || !vcpu_runnable(v));
+
+ switch ( reg )
+ {
+ default:
+ printk(XENLOG_G_ERR "%s(%pv, 0x%08x) Bad register\n",
+ __func__, v, reg);
+ domain_crash(d);
+ return 0;
+ }
+}
+
+void pv_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
+{
+ struct domain *d = v->domain;
+
+ ASSERT(v == current || !vcpu_runnable(v));
+
+ switch ( reg )
+ {
+ default:
+ printk(XENLOG_G_ERR "%s(%pv, 0x%08x, 0x%016"PRIx64") Bad register\n",
+ __func__, v, reg, val);
+ domain_crash(d);
+ }
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index bd2cbb0e7b..acc660c001 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -222,6 +222,9 @@ struct hvm_function_table {
int (*vmtrace_get_option)(struct vcpu *v, uint64_t key, uint64_t *value);
int (*vmtrace_reset)(struct vcpu *v);
+ uint64_t (*get_reg)(struct vcpu *v, unsigned int reg);
+ void (*set_reg)(struct vcpu *v, unsigned int reg, uint64_t val);
+
/*
* Parameters and callbacks for hardware-assisted TSC scaling,
* which are valid only when the hardware feature is available.
@@ -728,6 +731,18 @@ static inline int hvm_vmtrace_reset(struct vcpu *v)
}
/*
+ * Accessors for registers which have per-guest-type or per-vendor locations
+ * (e.g. VMCS, msr load/save lists, VMCB, VMLOAD lazy, etc).
+ *
+ * The caller is responsible for all auditing - these accessors do not fail,
+ * but do use domain_crash() for usage errors.
+ *
+ * Must cope with being called in non-current context.
+ */
+uint64_t hvm_get_reg(struct vcpu *v, unsigned int reg);
+void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val);
+
+/*
* This must be defined as a macro instead of an inline function,
* because it uses 'struct vcpu' and 'struct domain' which have
* not been defined yet.
diff --git a/xen/include/asm-x86/pv/domain.h b/xen/include/asm-x86/pv/domain.h
index df9716ff26..3a67816764 100644
--- a/xen/include/asm-x86/pv/domain.h
+++ b/xen/include/asm-x86/pv/domain.h
@@ -65,6 +65,10 @@ static inline unsigned long get_pcid_bits(const struct vcpu *v, bool is_xpti)
#endif
}
+/* See hvm_{get,set}_reg() for description. */
+uint64_t pv_get_reg(struct vcpu *v, unsigned int reg);
+void pv_set_reg(struct vcpu *v, unsigned int reg, uint64_t val);
+
#ifdef CONFIG_PV
void pv_vcpu_destroy(struct vcpu *v);