aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/imx23.dtsi8
-rw-r--r--arch/arm/boot/dts/imx28.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6dl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi4
-rw-r--r--arch/arm/include/asm/mmu_context.h10
-rw-r--r--arch/arm/kernel/perf_event.c1
-rw-r--r--arch/arm/kernel/smp_tlb.c18
-rw-r--r--arch/arm/kernel/smp_twd.c2
-rw-r--r--arch/arm/mach-shmobile/setup-emev2.c8
-rw-r--r--arch/arm/mach-shmobile/setup-r8a73a4.c2
-rw-r--r--arch/arm/mm/context.c55
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/c6x/mm/init.c1
-rw-r--r--arch/parisc/include/asm/special_insns.h9
-rw-r--r--arch/parisc/include/asm/tlbflush.h5
-rw-r--r--arch/parisc/kernel/cache.c2
-rw-r--r--arch/parisc/lib/memcpy.c79
-rw-r--r--arch/x86/boot/compressed/eboot.c20
-rw-r--r--arch/x86/xen/time.c17
19 files changed, 162 insertions, 91 deletions
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 73fd7d0887b..587ceef81e4 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -23,8 +23,12 @@
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 600f7cb51f3..4c10a1968c0 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -32,8 +32,12 @@
};
cpus {
- cpu@0 {
- compatible = "arm,arm926ejs";
+ #address-cells = <0>;
+ #size-cells = <0>;
+
+ cpu {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
};
};
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 5bcdf3a90bb..62dc7812679 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -18,12 +18,14 @@
cpu@0 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
};
cpu@1 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <1>;
next-level-cache = <&L2>;
};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 21e675848bd..dc54a72a3bc 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -18,6 +18,7 @@
cpu@0 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <0>;
next-level-cache = <&L2>;
operating-points = <
@@ -39,18 +40,21 @@
cpu@1 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <1>;
next-level-cache = <&L2>;
};
cpu@2 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <2>;
next-level-cache = <&L2>;
};
cpu@3 {
compatible = "arm,cortex-a9";
+ device_type = "cpu";
reg = <3>;
next-level-cache = <&L2>;
};
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a7b85e0d0cc..dc90203c6dd 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -27,7 +27,15 @@ void __check_vmalloc_seq(struct mm_struct *mm);
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
-DECLARE_PER_CPU(atomic64_t, active_asids);
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask);
+#else /* !CONFIG_ARM_ERRATA_798181 */
+static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask)
+{
+}
+#endif /* CONFIG_ARM_ERRATA_798181 */
#else /* !CONFIG_CPU_HAS_ASID */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d847c622a7b..9e7076df89e 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -588,6 +588,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}
+ perf_callchain_store(entry, regs->ARM_pc);
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 9a52a07aa40..a98b62dca2f 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void)
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
- int cpu, this_cpu;
+ int this_cpu;
cpumask_t mask = { CPU_BITS_NONE };
if (!erratum_a15_798181())
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
dummy_flush_tlb_a15_erratum();
this_cpu = get_cpu();
- for_each_online_cpu(cpu) {
- if (cpu == this_cpu)
- continue;
- /*
- * We only need to send an IPI if the other CPUs are running
- * the same ASID as the one being invalidated. There is no
- * need for locking around the active_asids check since the
- * switch_mm() function has at least one dmb() (as required by
- * this workaround) in case a context switch happens on
- * another CPU after the condition below.
- */
- if (atomic64_read(&mm->context.id) ==
- atomic64_read(&per_cpu(active_asids, cpu)))
- cpumask_set_cpu(cpu, &mask);
- }
+ a15_erratum_get_cpumask(this_cpu, mm, &mask);
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
put_cpu();
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 90525d9d290..f6fd1d4398c 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb,
* changing cpu.
*/
if (flags == POST_RATE_CHANGE)
- smp_call_function(twd_update_frequency,
+ on_each_cpu(twd_update_frequency,
(void *)&cnd->new_rate, 1);
return NOTIFY_OK;
diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c
index 899a86c31ec..1ccddd22811 100644
--- a/arch/arm/mach-shmobile/setup-emev2.c
+++ b/arch/arm/mach-shmobile/setup-emev2.c
@@ -287,14 +287,14 @@ static struct gpio_em_config gio3_config = {
static struct resource gio3_resources[] = {
[0] = {
.name = "GIO_096",
- .start = 0xe0050100,
- .end = 0xe005012b,
+ .start = 0xe0050180,
+ .end = 0xe00501ab,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "GIO_096",
- .start = 0xe0050140,
- .end = 0xe005015f,
+ .start = 0xe00501c0,
+ .end = 0xe00501df,
.flags = IORESOURCE_MEM,
},
[2] = {
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index c5a75a7a508..7f45c2edbca 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -62,7 +62,7 @@ enum { SCIFA0, SCIFA1, SCIFB0, SCIFB1, SCIFB2, SCIFB3 };
static const struct plat_sci_port scif[] = {
SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */
SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */
- SCIFB_DATA(SCIFB0, 0xe6c50000, gic_spi(145)), /* SCIFB0 */
+ SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */
SCIFB_DATA(SCIFB1, 0xe6c30000, gic_spi(149)), /* SCIFB1 */
SCIFB_DATA(SCIFB2, 0xe6ce0000, gic_spi(150)), /* SCIFB2 */
SCIFB_DATA(SCIFB3, 0xe6cf0000, gic_spi(151)), /* SCIFB3 */
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 2ac37372ef5..eeab06ebd06 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -39,19 +39,43 @@
* non 64-bit operations.
*/
#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
-#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
-
-#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
-#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
+#define NUM_USER_ASIDS ASID_FIRST_VERSION
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
+#ifdef CONFIG_ARM_ERRATA_798181
+void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+ cpumask_t *mask)
+{
+ int cpu;
+ unsigned long flags;
+ u64 context_id, asid;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ context_id = mm->context.id.counter;
+ for_each_online_cpu(cpu) {
+ if (cpu == this_cpu)
+ continue;
+ /*
+ * We only need to send an IPI if the other CPUs are
+ * running the same ASID as the one being invalidated.
+ */
+ asid = per_cpu(active_asids, cpu).counter;
+ if (asid == 0)
+ asid = per_cpu(reserved_asids, cpu);
+ if (context_id == asid)
+ cpumask_set_cpu(cpu, mask);
+ }
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+}
+#endif
+
#ifdef CONFIG_ARM_LPAE
static void cpu_set_reserved_ttbr0(void)
{
@@ -128,7 +152,16 @@ static void flush_context(unsigned int cpu)
asid = 0;
} else {
asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
- __set_bit(ASID_TO_IDX(asid), asid_map);
+ /*
+ * If this CPU has already been through a
+ * rollover, but hasn't run another task in
+ * the meantime, we must preserve its reserved
+ * ASID, as this is the only trace we have of
+ * the process it is still running.
+ */
+ if (asid == 0)
+ asid = per_cpu(reserved_asids, i);
+ __set_bit(asid & ~ASID_MASK, asid_map);
}
per_cpu(reserved_asids, i) = asid;
}
@@ -167,17 +200,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
/*
* Allocate a free ASID. If we can't find one, take a
* note of the currently active ASIDs and mark the TLBs
- * as requiring flushes.
+ * as requiring flushes. We always count from ASID #1,
+ * as we reserve ASID #0 to switch via TTBR0 and indicate
+ * rollover events.
*/
- asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
if (asid == NUM_USER_ASIDS) {
generation = atomic64_add_return(ASID_FIRST_VERSION,
&asid_generation);
flush_context(cpu);
- asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
}
__set_bit(asid, asid_map);
- asid = generation | IDX_TO_ASID(asid);
+ asid |= generation;
cpumask_clear(mm_cpumask(mm));
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9a5cdc01fcd..0ecc43fd622 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -600,7 +600,7 @@ void __init mem_init(void)
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
- free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
+ free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, 0, NULL);
#endif
free_highpages();
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index a9fcd89b251..b74ccb5a769 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -18,6 +18,7 @@
#include <linux/initrd.h>
#include <asm/sections.h>
+#include <asm/uaccess.h>
/*
* ZERO_PAGE is a special page that is used for zero-initialized
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index d306b75bc77..e1509308899 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -32,9 +32,12 @@ static inline void set_eiem(unsigned long val)
cr; \
})
-#define mtsp(gr, cr) \
- __asm__ __volatile__("mtsp %0,%1" \
+#define mtsp(val, cr) \
+ { if (__builtin_constant_p(val) && ((val) == 0)) \
+ __asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \
+ else \
+ __asm__ __volatile__("mtsp %0,%1" \
: /* no outputs */ \
- : "r" (gr), "i" (cr) : "memory")
+ : "r" (val), "i" (cr) : "memory"); }
#endif /* __PARISC_SPECIAL_INSNS_H */
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 5273da991e0..9d086a599fa 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -63,13 +63,14 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
- unsigned long flags;
+ unsigned long flags, sid;
/* For one page, it's not worth testing the split_tlb variable */
mb();
- mtsp(vma->vm_mm->context,1);
+ sid = vma->vm_mm->context;
purge_tlb_start(flags);
+ mtsp(sid, 1);
pdtlb(addr);
pitlb(addr);
purge_tlb_end(flags);
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 65fb4cbc3a0..2e65aa54bd1 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -440,8 +440,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
else {
unsigned long flags;
- mtsp(sid, 1);
purge_tlb_start(flags);
+ mtsp(sid, 1);
if (split_tlb) {
while (npages--) {
pdtlb(start);
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index a49cc812df8..ac4370b1ca4 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,6 +2,7 @@
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
+ * Copyright (C) 2013 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -153,17 +154,21 @@ static inline void prefetch_dst(const void *addr)
#define prefetch_dst(addr) do { } while(0)
#endif
+#define PA_MEMCPY_OK 0
+#define PA_MEMCPY_LOAD_ERROR 1
+#define PA_MEMCPY_STORE_ERROR 2
+
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
* per loop. This code is derived from glibc.
*/
-static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len)
+static inline unsigned long copy_dstaligned(unsigned long dst,
+ unsigned long src, unsigned long len)
{
/* gcc complains that a2 and a3 may be uninitialized, but actually
* they cannot be. Initialize a2/a3 to shut gcc up.
*/
register unsigned int a0, a1, a2 = 0, a3 = 0;
int sh_1, sh_2;
- struct exception_data *d;
/* prefetch_src((const void *)src); */
@@ -197,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src
goto do2;
case 0:
if (len == 0)
- return 0;
+ return PA_MEMCPY_OK;
/* a3 = ((unsigned int *) src)[0];
a0 = ((unsigned int *) src)[1]; */
ldw(s_space, 0, src, a3, cda_ldw_exc);
@@ -256,42 +261,35 @@ do0:
preserve_branch(handle_load_error);
preserve_branch(handle_store_error);
- return 0;
+ return PA_MEMCPY_OK;
handle_load_error:
__asm__ __volatile__ ("cda_ldw_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
- o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
- return o_len * 4 - d->fault_addr + o_src;
+ return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("cda_stw_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
- o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
- return o_len * 4 - d->fault_addr + o_dst;
+ return PA_MEMCPY_STORE_ERROR;
}
-/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
+ * In case of an access fault the faulty address can be read from the per_cpu
+ * exception data struct. */
+static unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
+ unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
register unsigned char *pcs, *pcd;
register unsigned int *pws, *pwd;
register double *pds, *pdd;
- unsigned long ret = 0;
- unsigned long o_dst, o_src, o_len;
- struct exception_data *d;
+ unsigned long ret;
src = (unsigned long)srcp;
dst = (unsigned long)dstp;
pcs = (unsigned char *)srcp;
pcd = (unsigned char *)dstp;
- o_dst = dst; o_src = src; o_len = len;
-
/* prefetch_src((const void *)srcp); */
if (len < THRESHOLD)
@@ -401,7 +399,7 @@ byte_copy:
len--;
}
- return 0;
+ return PA_MEMCPY_OK;
unaligned_copy:
/* possibly we are aligned on a word, but not on a double... */
@@ -438,8 +436,7 @@ unaligned_copy:
src = (unsigned long)pcs;
}
- ret = copy_dstaligned(dst, src, len / sizeof(unsigned int),
- o_dst, o_src, o_len);
+ ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
if (ret)
return ret;
@@ -454,17 +451,41 @@ unaligned_copy:
handle_load_error:
__asm__ __volatile__ ("pmc_load_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
- o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
- return o_len - d->fault_addr + o_src;
+ return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("pmc_store_exc:\n");
+ return PA_MEMCPY_STORE_ERROR;
+}
+
+
+/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
+static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+{
+ unsigned long ret, fault_addr, reference;
+ struct exception_data *d;
+
+ ret = pa_memcpy_internal(dstp, srcp, len);
+ if (likely(ret == PA_MEMCPY_OK))
+ return 0;
+
+ /* if a load or store fault occured we can get the faulty addr */
d = &__get_cpu_var(exception_data);
- DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
- o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
- return o_len - d->fault_addr + o_dst;
+ fault_addr = d->fault_addr;
+
+ /* error in load or store? */
+ if (ret == PA_MEMCPY_LOAD_ERROR)
+ reference = (unsigned long) srcp;
+ else
+ reference = (unsigned long) dstp;
+
+ DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
+ ret, len, fault_addr, reference);
+
+ if (fault_addr >= reference)
+ return len - (fault_addr - reference);
+ else
+ return len;
}
#ifdef __KERNEL__
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index c205035a6b9..d606463aa6d 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -992,18 +992,20 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
efi_memory_desc_t *mem_map;
efi_status_t status;
__u32 desc_version;
+ bool called_exit = false;
u8 nr_entries;
int i;
size = sizeof(*mem_map) * 32;
again:
- size += sizeof(*mem_map);
+ size += sizeof(*mem_map) * 2;
_size = size;
status = low_alloc(size, 1, (unsigned long *)&mem_map);
if (status != EFI_SUCCESS)
return status;
+get_map:
status = efi_call_phys5(sys_table->boottime->get_memory_map, &size,
mem_map, &key, &desc_size, &desc_version);
if (status == EFI_BUFFER_TOO_SMALL) {
@@ -1029,8 +1031,20 @@ again:
/* Might as well exit boot services now */
status = efi_call_phys2(sys_table->boottime->exit_boot_services,
handle, key);
- if (status != EFI_SUCCESS)
- goto free_mem_map;
+ if (status != EFI_SUCCESS) {
+ /*
+ * ExitBootServices() will fail if any of the event
+ * handlers change the memory map. In which case, we
+ * must be prepared to retry, but only once so that
+ * we're guaranteed to exit on repeated failures instead
+ * of spinning forever.
+ */
+ if (called_exit)
+ goto free_mem_map;
+
+ called_exit = true;
+ goto get_map;
+ }
/* Historic? */
boot_params->alt_mem_k = 32 * 1024;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 3d88bfdf9e1..13e8935e2ea 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -36,9 +36,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
/* snapshots of runstate info */
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
-/* unused ns of stolen and blocked time */
+/* unused ns of stolen time */
static DEFINE_PER_CPU(u64, xen_residual_stolen);
-static DEFINE_PER_CPU(u64, xen_residual_blocked);
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
@@ -115,7 +114,7 @@ static void do_stolen_accounting(void)
{
struct vcpu_runstate_info state;
struct vcpu_runstate_info *snap;
- s64 blocked, runnable, offline, stolen;
+ s64 runnable, offline, stolen;
cputime_t ticks;
get_runstate_snapshot(&state);
@@ -125,7 +124,6 @@ static void do_stolen_accounting(void)
snap = &__get_cpu_var(xen_runstate_snapshot);
/* work out how much time the VCPU has not been runn*ing* */
- blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
@@ -141,17 +139,6 @@ static void do_stolen_accounting(void)
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
__this_cpu_write(xen_residual_stolen, stolen);
account_steal_ticks(ticks);
-
- /* Add the appropriate number of ticks of blocked time,
- including any left-overs from last time. */
- blocked += __this_cpu_read(xen_residual_blocked);
-
- if (blocked < 0)
- blocked = 0;
-
- ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
- __this_cpu_write(xen_residual_blocked, blocked);
- account_idle_ticks(ticks);
}
/* Get the TSC speed from Xen */