aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRishabh Bhatnagar <risbhat@amazon.com>2023-05-10 18:15:43 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-05-17 11:48:19 +0200
commite10a73f5380958629b4ae8d2ecad29f3e82a1b7f (patch)
tree1efbc44fc01ecf374bfe13f62777c3178cdd19a6
parent029662004359364428d6cca688acb0441189af1b (diff)
KVM: x86: do not set st->preempted when going back to user space
From: Paolo Bonzini <pbonzini@redhat.com> commit 54aa83c90198e68eee8b0850c749bc70efb548da upstream. Similar to the Xen path, only change the vCPU's reported state if the vCPU was actually preempted. The reason for KVM's behavior is that for example optimistic spinning might not be a good idea if the guest is doing repeated exits to userspace; however, it is confusing and unlikely to make a difference, because well-tuned guests will hardly ever exit KVM_RUN in the first place. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> [risbhat@amazon.com: Don't check for xen msr as support is not available and skip the SEV-ES condition] Signed-off-by: Rishabh Bhatnagar <risbhat@amazon.com> Tested-by: Allen Pais <apais@linux.microsoft.com> Acked-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/kvm/x86.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3881bf7d1ac4..116a225fb26e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4139,16 +4139,18 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
int idx;
- if (vcpu->preempted)
+ if (vcpu->preempted) {
vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu);
- /*
- * kvm_memslots() will be called by
- * kvm_write_guest_offset_cached() so take the srcu lock.
- */
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- kvm_steal_time_set_preempted(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ /*
+ * Take the srcu lock as memslots will be accessed to check the gfn
+ * cache generation against the memslots generation.
+ */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ kvm_steal_time_set_preempted(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ }
+
kvm_x86_ops.vcpu_put(vcpu);
vcpu->arch.last_host_tsc = rdtsc();
/*