aboutsummaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorSteven Rostedt (Red Hat) <rostedt@goodmis.org>2013-03-20 06:20:27 -0400
committerSteven Rostedt <rostedt@goodmis.org>2013-03-20 06:20:27 -0400
commit1cd446d3e9954ac0cdd0e9680b391b906443eeb3 (patch)
treefbbcd9849dcfa57b371fe33058f4b61d8e0db375 /mm/page_alloc.c
parent71d52d4e70160c43dffe1926719c9f54ff715f95 (diff)
parent98bbf3565e3147a40c583ff97e2b5a98370c21a5 (diff)
Merge tag 'v3.0.67' into v3.0-rt
This is the 3.0.67 stable release Conflicts: kernel/hrtimer.c Solved with: diff --cc kernel/hrtimer.c index 87b4917,e079c3e..0000000 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@@ -1054,19 -982,20 +1032,39 @@@ int __hrtimer_start_range_ns(struct hrt * * XXX send_remote_softirq() ? */ - if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) { - ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup); - if (ret) { + if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) + && hrtimer_enqueue_reprogram(timer, new_base)) { - if (wakeup) { ++ ++ if (wakeup ++#ifdef CONFIG_PREEMPT_RT_BASE ++ /* ++ * Move softirq based timers away from the rbtree in ++ * case it expired already. Otherwise we would have a ++ * stale base->first entry until the softirq runs. ++ */ ++ && hrtimer_rt_defer(timer) ++#endif ++ ) { /* - * In case we failed to reprogram the timer (mostly - * because out current timer is already elapsed), - * remove it again and report a failure. This avoids - * stale base->first entries. + * We need to drop cpu_base->lock to avoid a + * lock ordering issue vs. rq->lock. */ - debug_deactivate(timer); - __remove_hrtimer(timer, new_base, - timer->state & HRTIMER_STATE_CALLBACK, 0); + raw_spin_unlock(&new_base->cpu_base->lock); + raise_softirq_irqoff(HRTIMER_SOFTIRQ); + local_irq_restore(flags); + return ret; - } else { - __raise_softirq_irqoff(HRTIMER_SOFTIRQ); } ++ ++ /* ++ * In case we failed to reprogram the timer (mostly ++ * because out current timer is already elapsed), ++ * remove it again and report a failure. This avoids ++ * stale base->first entries. ++ */ ++ debug_deactivate(timer); ++ __remove_hrtimer(timer, new_base, ++ timer->state & HRTIMER_STATE_CALLBACK, 0); ++ ret = -ETIME; } unlock_hrtimer_base(timer, &flags); Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9534b6ccc0fc..d0b3046864fb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4325,10 +4325,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
* round what is now in bits to nearest long in bits, then return it in
* bytes.
*/
-static unsigned long __init usemap_size(unsigned long zonesize)
+static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
{
unsigned long usemapsize;
+ zonesize += zone_start_pfn & (pageblock_nr_pages-1);
usemapsize = roundup(zonesize, pageblock_nr_pages);
usemapsize = usemapsize >> pageblock_order;
usemapsize *= NR_PAGEBLOCK_BITS;
@@ -4338,17 +4339,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
}
static void __init setup_usemap(struct pglist_data *pgdat,
- struct zone *zone, unsigned long zonesize)
+ struct zone *zone,
+ unsigned long zone_start_pfn,
+ unsigned long zonesize)
{
- unsigned long usemapsize = usemap_size(zonesize);
+ unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
zone->pageblock_flags = NULL;
if (usemapsize)
zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
usemapsize);
}
#else
-static inline void setup_usemap(struct pglist_data *pgdat,
- struct zone *zone, unsigned long zonesize) {}
+static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
+ unsigned long zone_start_pfn, unsigned long zonesize) {}
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -4476,7 +4479,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
continue;
set_pageblock_order(pageblock_default_order());
- setup_usemap(pgdat, zone, size);
+ setup_usemap(pgdat, zone, zone_start_pfn, size);
ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
BUG_ON(ret);