From 97a5b81fa4d3a11dcdf224befc577f2e0abadc0b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 2 May 2013 16:21:15 +0200 Subject: x86: Fix idle consolidation fallout The core code expects the arch idle code to return with interrupts enabled. The conversion missed two x86 cases which fail to do that. Reported-and-tested-by: Markus Trippelsdorf Tested-by: Borislav Petkov Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1305021557030.3972@ionos Signed-off-by: Thomas Gleixner --- arch/x86/kernel/process.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 607af0d4d5ef..4e7a37ff03ab 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -312,6 +312,8 @@ void arch_cpu_idle(void) { if (cpuidle_idle_call()) x86_idle(); + else + local_irq_enable(); } /* @@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu) */ static void amd_e400_idle(void) { - if (need_resched()) - return; - if (!amd_e400_c1e_detected) { u32 lo, hi; -- cgit v1.2.3 From 074d72ff57f65de779e2f70d5906964c0ba1c123 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Wed, 8 May 2013 12:13:03 -0400 Subject: x86/microcode: Add local mutex to fix physical CPU hot-add deadlock This can easily be triggered if a new CPU is added (via ACPI hotplug mechanism) and from user-space you do: echo 1 > /sys/devices/system/cpu/cpu3/online (or wait for UDEV to do it) on a newly appeared physical CPU. The deadlock is that the "store_online" in drivers/base/cpu.c takes the cpu_hotplug_driver_lock() lock, then calls "cpu_up". "cpu_up" eventually ends up calling "save_mc_for_early" which also takes the cpu_hotplug_driver_lock() lock. And here is that lockdep thinks of it: smpboot: Stack at about ffff880075c39f44 smpboot: CPU3: has booted. microcode: CPU3 sig=0x206a7, pf=0x2, revision=0x25 ============================================= [ INFO: possible recursive locking detected ] 3.9.0upstream-10129-g167af0e #1 Not tainted --------------------------------------------- sh/2487 is trying to acquire lock: (x86_cpu_hotplug_driver_mutex){+.+.+.}, at: [] cpu_hotplug_driver_lock+0x12/0x20 but task is already holding lock: (x86_cpu_hotplug_driver_mutex){+.+.+.}, at: [] cpu_hotplug_driver_lock+0x12/0x20 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(x86_cpu_hotplug_driver_mutex); lock(x86_cpu_hotplug_driver_mutex); *** DEADLOCK *** May be due to missing lock nesting notation 6 locks held by sh/2487: #0: (sb_writers#5){.+.+.+}, at: [] vfs_write+0x17d/0x190 #1: (&buffer->mutex){+.+.+.}, at: [] sysfs_write_file+0x3f/0x160 #2: (s_active#20){.+.+.+}, at: [] sysfs_write_file+0xc8/0x160 #3: (x86_cpu_hotplug_driver_mutex){+.+.+.}, at: [] cpu_hotplug_driver_lock+0x12/0x20 #4: (cpu_add_remove_lock){+.+.+.}, at: [] cpu_maps_update_begin+0x12/0x20 #5: (cpu_hotplug.lock){+.+.+.}, at: [] cpu_hotplug_begin+0x27/0x60 Suggested-and-Acked-by: Borislav Petkov Signed-off-by: Konrad Rzeszutek Wilk Cc: fenghua.yu@intel.com Cc: xen-devel@lists.xensource.com Cc: stable@vger.kernel.org # for v3.9 Link: http://lkml.kernel.org/r/1368029583-23337-1-git-send-email-konrad.wilk@oracle.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/microcode_intel_early.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index d893e8ed8ac9..2e9e12871c2b 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c @@ -487,6 +487,7 @@ static inline void show_saved_mc(void) #endif #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) +static DEFINE_MUTEX(x86_cpu_microcode_mutex); /* * Save this mc into mc_saved_data. So it will be loaded early when a CPU is * hot added or resumes. @@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc) * Hold hotplug lock so mc_saved_data is not accessed by a CPU in * hotplug. */ - cpu_hotplug_driver_lock(); + mutex_lock(&x86_cpu_microcode_mutex); mc_saved_count_init = mc_saved_data.mc_saved_count; mc_saved_count = mc_saved_data.mc_saved_count; @@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc) } out: - cpu_hotplug_driver_unlock(); + mutex_unlock(&x86_cpu_microcode_mutex); return ret; } -- cgit v1.2.3 From cf8b166d5c1c89aad6c436a954fa40fd18a75bfb Mon Sep 17 00:00:00 2001 From: Zhang Yanfei Date: Thu, 9 May 2013 23:57:42 +0800 Subject: x86/mm: Add missing comments for initial kernel direct mapping Two sets of comments were lost during patch-series shuffling: - comments for init_range_memory_mapping() - comments in init_mem_mapping that is helpful for reminding people that the pagetable is setup top-down The comments were written by Yinghai in his patch in: https://lkml.org/lkml/2012/11/28/620 This patch reintroduces them. Originally-From: Yinghai Lu Signed-off-by: Zhang Yanfei Cc: Yasuaki Ishimatsu Cc: Konrad Rzeszutek Wilk Cc: Andrew Morton Link: http://lkml.kernel.org/r/518BC776.7010506@gmail.com [ Tidied it all up a bit. ] Signed-off-by: Ingo Molnar --- arch/x86/mm/init.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fdc5dca14fb3..eaac1743def7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, } /* - * would have hole in the middle or ends, and only ram parts will be mapped. + * We need to iterate through the E820 memory map and create direct mappings + * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply + * create direct mappings for all pfns from [0 to max_low_pfn) and + * [4GB to max_pfn) because of possible memory holes in high addresses + * that cannot be marked as UC by fixed/variable range MTRRs. + * Depending on the alignment of E820 ranges, this may possibly result + * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. + * + * init_mem_mapping() calls init_range_memory_mapping() with big range. + * That range would have hole in the middle or ends, and only ram parts + * will be mapped in init_range_memory_mapping(). */ static unsigned long __init init_range_memory_mapping( unsigned long r_start, @@ -419,6 +429,13 @@ void __init init_mem_mapping(void) max_pfn_mapped = 0; /* will get exact value next */ min_pfn_mapped = real_end >> PAGE_SHIFT; last_start = start = real_end; + + /* + * We start from the top (end of memory) and go to the bottom. + * The memblock_find_in_range() gets us a block of RAM from the + * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages + * for page table. + */ while (last_start > ISA_END_ADDRESS) { if (last_start > step_size) { start = round_down(last_start - 1, step_size); -- cgit v1.2.3