aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-04-28 16:16:31 +0100
committerSasha Levin <sasha.levin@oracle.com>2016-06-03 11:30:30 -0400
commitddad90800973c7eab57a42c5994812294581969a (patch)
treef7b7424b65df993cba9a906e9f67a361e6c28734 /arch/arm
parent73e15d61cd95cfc7b5d2ed42b75897df0f9f90a0 (diff)
arm/arm64: KVM: Enforce Break-Before-Make on Stage-2 page tables
[ Upstream commit d4b9e0790aa764c0b01e18d4e8d33e93ba36d51f ] The ARM architecture mandates that when changing a page table entry from a valid entry to another valid entry, an invalid entry is first written, TLB invalidated, and only then the new entry being written. The current code doesn't respect this, directly writing the new entry and only then invalidating TLBs. Let's fix it up. Cc: <stable@vger.kernel.org> Reported-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/kvm/mmu.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3535480e0e6b..0310b03697e0 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -841,11 +841,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
old_pmd = *pmd;
- kvm_set_pmd(pmd, *new_pmd);
- if (pmd_present(old_pmd))
+ if (pmd_present(old_pmd)) {
+ pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- else
+ } else {
get_page(virt_to_page(pmd));
+ }
+
+ kvm_set_pmd(pmd, *new_pmd);
return 0;
}
@@ -882,12 +885,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
/* Create 2nd stage page table mapping - Level 3 */
old_pte = *pte;
- kvm_set_pte(pte, *new_pte);
- if (pte_present(old_pte))
+ if (pte_present(old_pte)) {
+ kvm_set_pte(pte, __pte(0));
kvm_tlb_flush_vmid_ipa(kvm, addr);
- else
+ } else {
get_page(virt_to_page(pte));
+ }
+ kvm_set_pte(pte, *new_pte);
return 0;
}