aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-05-13 19:53:23 +0300
committerMarcelo Tosatti <mtosatti@redhat.com>2012-05-16 16:03:19 -0300
commit512d5649e8dc3ed36f2ebf0818da64a4d4c2544a (patch)
tree75fa25fcf4c7e7fc8f9fc8ab4c2aa01227ab9983 /arch/x86/kvm/vmx.c
parentd54e4237bcbb400fda11c902fd538aa0b4805720 (diff)
KVM: VMX: Fix %ds/%es clobber
The vmx exit code unconditionally restores %ds and %es to __USER_DS. This can override the user's values, since %ds and %es are not saved and restored in x86_64 syscalls. In practice, this isn't dangerous since nobody uses segment registers in long mode, least of all programs that use KVM. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3062ea95266..f2ee016e100 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6102,7 +6102,10 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u16 _ds, _es;
+ savesegment(ds, _ds);
+ savesegment(es, _es);
if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (vmcs12->idt_vectoring_info_field &
@@ -6263,7 +6266,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
}
}
- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+ loadsegment(ds, _ds);
+ loadsegment(es, _es);
vmx->loaded_vmcs->launched = 1;
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);