diff options
-rw-r--r-- | arch/x86/include/asm/preempt.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 9 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 29 | ||||
-rw-r--r-- | block/blk-mq.c | 8 | ||||
-rw-r--r-- | include/linux/preempt.h | 3 | ||||
-rw-r--r-- | include/linux/rwsem_rt.h | 1 | ||||
-rw-r--r-- | kernel/cpu.c | 2 | ||||
-rw-r--r-- | kernel/locking/rt.c | 60 | ||||
-rw-r--r-- | kernel/stop_machine.c | 2 |
10 files changed, 69 insertions, 54 deletions
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 752fe5647288..1e649c49d28c 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -94,7 +94,11 @@ static __always_inline bool __preempt_count_dec_and_test(void) { if (____preempt_count_dec_and_test()) return true; +#ifdef CONFIG_PREEMPT_LAZY return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else + return false; +#endif } /* @@ -102,8 +106,12 @@ static __always_inline bool __preempt_count_dec_and_test(void) */ static __always_inline bool should_resched(void) { +#ifdef CONFIG_PREEMPT_LAZY return unlikely(!__this_cpu_read_4(__preempt_count) || \ test_thread_flag(TIF_NEED_RESCHED_LAZY)); +#else + return unlikely(!__this_cpu_read_4(__preempt_count)); +#endif } #ifdef CONFIG_PREEMPT diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 7c8b356d3a49..5701b507510b 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -72,4 +72,5 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); } diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index fd2d9769114d..6157ed6eab95 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -365,19 +365,22 @@ ENTRY(resume_kernel) need_resched: # preempt count == 0 + NEED_RS set? cmpl $0,PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY + jnz restore_all +#else jz test_int_off # atleast preempt count == 0 ? - cmpl $_TIF_NEED_RESCHED,PER_CPU_VAR(__preempt_count) + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) jne restore_all cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? jnz restore_all - testl $_TIF_NEED_RESCHED_LAZY, %ecx + testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp) jz restore_all - test_int_off: +#endif testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all call preempt_schedule_irq diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b650b437c118..d893814a0c18 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -658,8 +658,8 @@ sysret_check: /* Handle reschedules */ /* edx: work, edi: workmask */ sysret_careful: - bt $TIF_NEED_RESCHED,%edx - jnc sysret_signal + testl $_TIF_NEED_RESCHED_MASK,%edx + jz sysret_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -771,8 +771,8 @@ GLOBAL(int_with_check) /* First do a reschedule test. */ /* edx: work, edi: workmask */ int_careful: - bt $TIF_NEED_RESCHED,%edx - jnc int_very_careful + testl $_TIF_NEED_RESCHED_MASK,%edx + jz int_very_careful TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1071,8 +1071,8 @@ bad_iret: /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE - bt $TIF_NEED_RESCHED,%edx - jnc retint_signal + testl $_TIF_NEED_RESCHED_MASK,%edx + jz retint_signal TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi @@ -1104,7 +1104,22 @@ retint_signal: /* rcx: threadinfo. interrupts off. */ ENTRY(retint_kernel) cmpl $0,PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY jnz retint_restore_args +#else + jz check_int_off + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) + jnz retint_restore_args + + cmpl $0, TI_preempt_lazy_count(%rcx) + jnz retint_restore_args + + bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) + jnc retint_restore_args +check_int_off: +#endif bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ jnc retint_restore_args call preempt_schedule_irq @@ -1540,7 +1555,7 @@ paranoid_userspace: movq %rsp,%rdi /* &pt_regs */ call sync_regs movq %rax,%rsp /* switch stack for scheduling */ - testl $_TIF_NEED_RESCHED,%ebx + testl $_TIF_NEED_RESCHED_MASK,%ebx jnz paranoid_schedule movl %ebx,%edx /* arg3: thread flags */ TRACE_IRQS_ON diff --git a/block/blk-mq.c b/block/blk-mq.c index 25afbcc3a742..5fb26f785320 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -48,9 +48,14 @@ static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) return __blk_mq_get_ctx(q, get_cpu_light()); } -static void blk_mq_put_ctx(struct blk_mq_ctx *ctx) +static void __blk_mq_put_ctx(struct blk_mq_ctx *ctx) { spin_unlock(&ctx->cpu_lock); +} + +static void blk_mq_put_ctx(struct blk_mq_ctx *ctx) +{ + __blk_mq_put_ctx(ctx); put_cpu_light(); } @@ -980,6 +985,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, clear_bit(ctx->index_hw, hctx->ctx_map); } spin_unlock(&ctx->lock); + __blk_mq_put_ctx(ctx); if (list_empty(&tmp)) return; diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 116af6a9c380..5b2cdf4a6e76 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -126,8 +126,7 @@ do { \ #define preempt_enable_notrace() \ do { \ barrier(); \ - if (unlikely(__preempt_count_dec_and_test() || \ - test_thread_flag(TIF_NEED_RESCHED_LAZY))) \ + if (unlikely(__preempt_count_dec_and_test())) \ __preempt_schedule_context(); \ } while (0) #else diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h index 924c2d274ab5..0065b08fbb7a 100644 --- a/include/linux/rwsem_rt.h +++ b/include/linux/rwsem_rt.h @@ -20,7 +20,6 @@ struct rw_semaphore { struct rt_mutex lock; - int read_depth; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif diff --git a/kernel/cpu.c b/kernel/cpu.c index 041fada58f4c..ce0032983fc9 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -649,7 +649,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) /* CPU didn't die: tell everyone. Can't complain. */ smpboot_unpark_threads(cpu); cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); - goto out_cancel; + goto out_release; } BUG_ON(cpu_online(cpu)); diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c index 5d177272ffb4..055a3df13c2a 100644 --- a/kernel/locking/rt.c +++ b/kernel/locking/rt.c @@ -180,12 +180,14 @@ EXPORT_SYMBOL(_mutex_unlock); */ int __lockfunc rt_write_trylock(rwlock_t *rwlock) { - int ret = rt_mutex_trylock(&rwlock->lock); + int ret; - if (ret) { + migrate_disable(); + ret = rt_mutex_trylock(&rwlock->lock); + if (ret) rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); - migrate_disable(); - } + else + migrate_enable(); return ret; } @@ -212,11 +214,13 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock) * write locked. */ if (rt_mutex_owner(lock) != current) { + migrate_disable(); ret = rt_mutex_trylock(lock); - if (ret) { + if (ret) rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); - migrate_disable(); - } + else + migrate_enable(); + } else if (!rwlock->read_depth) { ret = 0; } @@ -240,13 +244,14 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock) { struct rt_mutex *lock = &rwlock->lock; + /* * recursive read locks succeed when current owns the lock */ if (rt_mutex_owner(lock) != current) { - rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); - __rt_spin_lock(lock); migrate_disable(); + rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_); + __rt_spin_lock(lock); } rwlock->read_depth++; } @@ -316,10 +321,8 @@ EXPORT_SYMBOL(rt_up_write); void rt_up_read(struct rw_semaphore *rwsem) { - if (--rwsem->read_depth == 0) { - rwsem_release(&rwsem->dep_map, 1, _RET_IP_); - rt_mutex_unlock(&rwsem->lock); - } + rwsem_release(&rwsem->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&rwsem->lock); } EXPORT_SYMBOL(rt_up_read); @@ -330,7 +333,6 @@ EXPORT_SYMBOL(rt_up_read); void rt_downgrade_write(struct rw_semaphore *rwsem) { BUG_ON(rt_mutex_owner(&rwsem->lock) != current); - rwsem->read_depth = 1; } EXPORT_SYMBOL(rt_downgrade_write); @@ -367,37 +369,20 @@ void rt_down_write_nested_lock(struct rw_semaphore *rwsem, int rt_down_read_trylock(struct rw_semaphore *rwsem) { - struct rt_mutex *lock = &rwsem->lock; - int ret = 1; - - /* - * recursive read locks succeed when current owns the rwsem, - * but not when read_depth == 0 which means that the rwsem is - * write locked. - */ - if (rt_mutex_owner(lock) != current) { - ret = rt_mutex_trylock(&rwsem->lock); - if (ret) - rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); - } else if (!rwsem->read_depth) { - ret = 0; - } + int ret; + ret = rt_mutex_trylock(&rwsem->lock); if (ret) - rwsem->read_depth++; + rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); + return ret; } EXPORT_SYMBOL(rt_down_read_trylock); static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) { - struct rt_mutex *lock = &rwsem->lock; - - if (rt_mutex_owner(lock) != current) { - rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); - rt_mutex_lock(&rwsem->lock); - } - rwsem->read_depth++; + rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); + rt_mutex_lock(&rwsem->lock); } void rt_down_read(struct rw_semaphore *rwsem) @@ -422,7 +407,6 @@ void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); lockdep_init_map(&rwsem->dep_map, name, key, 0); #endif - rwsem->read_depth = 0; rwsem->lock.save_state = 0; } EXPORT_SYMBOL(__rt_rwsem_init); diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index a933173675bf..bcbae9c962a9 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -675,7 +675,7 @@ int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, ret = multi_cpu_stop(&msdata); /* Busy wait for completion. */ - while (!atomic_read(&done.nr_todo)) + while (atomic_read(&done.nr_todo)) cpu_relax(); mutex_unlock(&stop_cpus_mutex); |