aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/cputlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r--accel/tcg/cputlb.c199
1 files changed, 62 insertions, 137 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 93b1ca810b..117b516739 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -21,12 +21,16 @@
#include "qemu/main-loop.h"
#include "hw/core/tcg-cpu-ops.h"
#include "exec/exec-all.h"
+#include "exec/page-protection.h"
#include "exec/memory.h"
#include "exec/cpu_ldst.h"
#include "exec/cputlb.h"
#include "exec/tb-flush.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
+#include "exec/mmu-access-type.h"
+#include "exec/tlb-common.h"
+#include "exec/vaddr.h"
#include "tcg/tcg.h"
#include "qemu/error-report.h"
#include "exec/log.h"
@@ -95,6 +99,54 @@ static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
}
+static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
+ MMUAccessType access_type)
+{
+ /* Do not rearrange the CPUTLBEntry structure members. */
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
+ MMU_DATA_LOAD * sizeof(uint64_t));
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
+ MMU_DATA_STORE * sizeof(uint64_t));
+ QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
+ MMU_INST_FETCH * sizeof(uint64_t));
+
+#if TARGET_LONG_BITS == 32
+ /* Use qatomic_read, in case of addr_write; only care about low bits. */
+ const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
+ ptr += HOST_BIG_ENDIAN;
+ return qatomic_read(ptr);
+#else
+ const uint64_t *ptr = &entry->addr_idx[access_type];
+# if TCG_OVERSIZED_GUEST
+ return *ptr;
+# else
+ /* ofs might correspond to .addr_write, so use qatomic_read */
+ return qatomic_read(ptr);
+# endif
+#endif
+}
+
+static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
+{
+ return tlb_read_idx(entry, MMU_DATA_STORE);
+}
+
+/* Find the TLB index corresponding to the mmu_idx + address pair. */
+static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx,
+ vaddr addr)
+{
+ uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS;
+
+ return (addr >> TARGET_PAGE_BITS) & size_mask;
+}
+
+/* Find the TLB entry corresponding to the mmu_idx + address pair. */
+static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx,
+ vaddr addr)
+{
+ return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)];
+}
+
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
size_t max_entries)
{
@@ -366,12 +418,9 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
{
tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
- if (cpu->created && !qemu_cpu_is_self(cpu)) {
- async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
- RUN_ON_CPU_HOST_INT(idxmap));
- } else {
- tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
- }
+ assert_cpu_is_self(cpu);
+
+ tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
}
void tlb_flush(CPUState *cpu)
@@ -379,21 +428,6 @@ void tlb_flush(CPUState *cpu)
tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
}
-void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
-{
- const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
-
- tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
-
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
- fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
-}
-
-void tlb_flush_all_cpus(CPUState *src_cpu)
-{
- tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
-}
-
void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
{
const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
@@ -575,28 +609,12 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap)
{
tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap);
+ assert_cpu_is_self(cpu);
+
/* This should already be page aligned */
addr &= TARGET_PAGE_MASK;
- if (qemu_cpu_is_self(cpu)) {
- tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
- } else if (idxmap < TARGET_PAGE_SIZE) {
- /*
- * Most targets have only a few mmu_idx. In the case where
- * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
- * allocating memory for this operation.
- */
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
- RUN_ON_CPU_TARGET_PTR(addr | idxmap));
- } else {
- TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
-
- /* Otherwise allocate a structure, freed by the worker. */
- d->addr = addr;
- d->idxmap = idxmap;
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
- RUN_ON_CPU_HOST_PTR(d));
- }
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
}
void tlb_flush_page(CPUState *cpu, vaddr addr)
@@ -604,46 +622,6 @@ void tlb_flush_page(CPUState *cpu, vaddr addr)
tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
}
-void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr,
- uint16_t idxmap)
-{
- tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap);
-
- /* This should already be page aligned */
- addr &= TARGET_PAGE_MASK;
-
- /*
- * Allocate memory to hold addr+idxmap only when needed.
- * See tlb_flush_page_by_mmuidx for details.
- */
- if (idxmap < TARGET_PAGE_SIZE) {
- flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
- RUN_ON_CPU_TARGET_PTR(addr | idxmap));
- } else {
- CPUState *dst_cpu;
-
- /* Allocate a separate data block for each destination cpu. */
- CPU_FOREACH(dst_cpu) {
- if (dst_cpu != src_cpu) {
- TLBFlushPageByMMUIdxData *d
- = g_new(TLBFlushPageByMMUIdxData, 1);
-
- d->addr = addr;
- d->idxmap = idxmap;
- async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
- RUN_ON_CPU_HOST_PTR(d));
- }
- }
- }
-
- tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
-}
-
-void tlb_flush_page_all_cpus(CPUState *src, vaddr addr)
-{
- tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
-}
-
void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
vaddr addr,
uint16_t idxmap)
@@ -799,6 +777,8 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
{
TLBFlushRangeData d;
+ assert_cpu_is_self(cpu);
+
/*
* If all bits are significant, and len is small,
* this devolves to tlb_flush_page.
@@ -819,14 +799,7 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
d.idxmap = idxmap;
d.bits = bits;
- if (qemu_cpu_is_self(cpu)) {
- tlb_flush_range_by_mmuidx_async_0(cpu, d);
- } else {
- /* Otherwise allocate a structure, freed by the worker. */
- TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
- async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
- RUN_ON_CPU_HOST_PTR(p));
- }
+ tlb_flush_range_by_mmuidx_async_0(cpu, d);
}
void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
@@ -835,54 +808,6 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
}
-void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
- vaddr addr, vaddr len,
- uint16_t idxmap, unsigned bits)
-{
- TLBFlushRangeData d;
- CPUState *dst_cpu;
-
- /*
- * If all bits are significant, and len is small,
- * this devolves to tlb_flush_page.
- */
- if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
- tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
- return;
- }
- /* If no page bits are significant, this devolves to tlb_flush. */
- if (bits < TARGET_PAGE_BITS) {
- tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
- return;
- }
-
- /* This should already be page aligned */
- d.addr = addr & TARGET_PAGE_MASK;
- d.len = len;
- d.idxmap = idxmap;
- d.bits = bits;
-
- /* Allocate a separate data block for each destination cpu. */
- CPU_FOREACH(dst_cpu) {
- if (dst_cpu != src_cpu) {
- TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
- async_run_on_cpu(dst_cpu,
- tlb_flush_range_by_mmuidx_async_1,
- RUN_ON_CPU_HOST_PTR(p));
- }
- }
-
- tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
-}
-
-void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
- vaddr addr, uint16_t idxmap,
- unsigned bits)
-{
- tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
- idxmap, bits);
-}
-
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
vaddr addr,
vaddr len,
@@ -1039,7 +964,7 @@ static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
/* update the TLB corresponding to virtual page vaddr
so that it is no longer dirty */
-void tlb_set_dirty(CPUState *cpu, vaddr addr)
+static void tlb_set_dirty(CPUState *cpu, vaddr addr)
{
int mmu_idx;