From: Thomas Gleixner Date: Thu, 12 Oct 2017 17:31:14 +0200 Subject: rtmutex: wire up RT's locking Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.3-rt1.tar.xz Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior --- include/Kbuild | 7 +++++++ include/linux/mutex.h | 20 +++++++++++++------- include/linux/percpu-rwsem.h | 4 ++-- include/linux/rwsem.h | 12 ++++++++++++ include/linux/spinlock.h | 12 +++++++++++- include/linux/spinlock_api_smp.h | 4 +++- include/linux/spinlock_types.h | 11 ++++++++--- kernel/locking/Makefile | 10 +++++++--- kernel/locking/rwsem.c | 8 ++++++++ kernel/locking/rwsem.h | 2 ++ kernel/locking/spinlock.c | 7 +++++++ kernel/locking/spinlock_debug.c | 5 +++++ 12 files changed, 85 insertions(+), 17 deletions(-) --- a/include/Kbuild +++ b/include/Kbuild @@ -1158,8 +1158,15 @@ header-test- += xen/xenbus.h # Do not include directly header-test- += linux/compiler-clang.h header-test- += linux/compiler-gcc.h +header-test- += linux/mutex_rt.h header-test- += linux/patchkey.h header-test- += linux/rwlock_api_smp.h +header-test- += linux/rwlock_rt.h +header-test- += linux/rwlock_types_rt.h +header-test- += linux/rwsem-rt.h +header-test- += linux/spinlock_rt.h +header-test- += linux/spinlock_types_nort.h +header-test- += linux/spinlock_types_rt.h header-test- += linux/spinlock_types_up.h header-test- += linux/spinlock_up.h header-test- += linux/wimax/debug.h --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -22,6 +22,17 @@ struct ww_acquire_ctx; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifdef CONFIG_PREEMPT_RT +# include +#else + /* * Simple, straightforward mutexes with strict semantics: * @@ -108,13 +119,6 @@ do { \ __mutex_init((mutex), #mutex, &__key); \ } while (0) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { .name = #lockname } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ @@ -210,4 +214,6 @@ enum mutex_trylock_recursive_enum { extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum mutex_trylock_recursive(struct mutex *lock); +#endif /* !PREEMPT_RT */ + #endif /* __LINUX_MUTEX_H */ --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -119,7 +119,7 @@ static inline void percpu_rwsem_release( bool read, unsigned long ip) { lock_release(&sem->rw_sem.dep_map, 1, ip); -#ifdef CONFIG_RWSEM_SPIN_ON_OWNER +#if defined(CONFIG_RWSEM_SPIN_ON_OWNER) && !defined(CONFIG_PREEMPT_RT) if (!read) atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN); #endif @@ -129,7 +129,7 @@ static inline void percpu_rwsem_acquire( bool read, unsigned long ip) { lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); -#ifdef CONFIG_RWSEM_SPIN_ON_OWNER +#if defined(CONFIG_RWSEM_SPIN_ON_OWNER) && !defined(CONFIG_PREEMPT_RT) if (!read) atomic_long_set(&sem->rw_sem.owner, (long)current); #endif --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,6 +16,11 @@ #include #include #include + +#ifdef CONFIG_PREEMPT_RT +#include +#else /* PREEMPT_RT */ + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include #endif @@ -121,6 +126,13 @@ static inline int rwsem_is_contended(str return !list_empty(&sem->wait_list); } +#endif /* !PREEMPT_RT */ + +/* + * The functions below are the same for all rwsem implementations including + * the RT specific variant. + */ + /* * lock for reading */ --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -307,7 +307,11 @@ static inline void do_raw_spin_unlock(ra }) /* Include rwlock functions */ -#include +#ifdef CONFIG_PREEMPT_RT +# include +#else +# include +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -318,6 +322,10 @@ static inline void do_raw_spin_unlock(ra # include #endif +#ifdef CONFIG_PREEMPT_RT +# include +#else /* PREEMPT_RT */ + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -438,6 +446,8 @@ static __always_inline int spin_is_conte #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) +#endif /* !PREEMPT_RT */ + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh( return 0; } -#include +#ifndef CONFIG_PREEMPT_RT +# include +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -11,8 +11,13 @@ #include -#include - -#include +#ifndef CONFIG_PREEMPT_RT +# include +# include +#else +# include +# include +# include +#endif #endif /* __LINUX_SPINLOCK_TYPES_H */ --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -3,7 +3,7 @@ # and is generally not a function of system call inputs. KCOV_INSTRUMENT := n -obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o +obj-y += semaphore.o rwsem.o percpu-rwsem.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) @@ -12,19 +12,23 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) endif -obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o obj-$(CONFIG_LOCKDEP) += lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o endif obj-$(CONFIG_SMP) += spinlock.o -obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o obj-$(CONFIG_PROVE_LOCKING) += spinlock.o obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o +ifneq ($(CONFIG_PREEMPT_RT),y) +obj-y += mutex.o +obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o +obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o +endif +obj-$(CONFIG_PREEMPT_RT) += mutex-rt.o rwsem-rt.o rwlock-rt.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -29,6 +29,8 @@ #include #include "rwsem.h" + +#ifndef CONFIG_PREEMPT_RT #include "lock_events.h" /* @@ -1335,6 +1337,7 @@ static struct rw_semaphore *rwsem_downgr return sem; } + /* * lock for reading */ @@ -1485,6 +1488,7 @@ static inline void __downgrade_write(str if (tmp & RWSEM_FLAG_WAITERS) rwsem_downgrade_wake(sem); } +#endif /* * lock for reading @@ -1616,6 +1620,7 @@ void _down_write_nest_lock(struct rw_sem } EXPORT_SYMBOL(_down_write_nest_lock); +#ifndef CONFIG_PREEMPT_RT void down_read_non_owner(struct rw_semaphore *sem) { might_sleep(); @@ -1623,6 +1628,7 @@ void down_read_non_owner(struct rw_semap __rwsem_set_reader_owned(sem, NULL); } EXPORT_SYMBOL(down_read_non_owner); +#endif void down_write_nested(struct rw_semaphore *sem, int subclass) { @@ -1647,11 +1653,13 @@ int __sched down_write_killable_nested(s } EXPORT_SYMBOL(down_write_killable_nested); +#ifndef CONFIG_PREEMPT_RT void up_read_non_owner(struct rw_semaphore *sem) { DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem); __up_read(sem); } EXPORT_SYMBOL(up_read_non_owner); +#endif #endif --- a/kernel/locking/rwsem.h +++ b/kernel/locking/rwsem.h @@ -4,7 +4,9 @@ #define __INTERNAL_RWSEM_H #include +#ifndef CONFIG_PREEMPT_RT extern void __down_read(struct rw_semaphore *sem); extern void __up_read(struct rw_semaphore *sem); +#endif #endif /* __INTERNAL_RWSEM_H */ --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(loc * __[spin|read|write]_lock_bh() */ BUILD_LOCK_OPS(spin, raw_spinlock); + +#ifndef CONFIG_PREEMPT_RT BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); +#endif #endif @@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_ EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif +#ifndef CONFIG_PREEMPT_RT + #ifndef CONFIG_INLINE_READ_TRYLOCK int __lockfunc _raw_read_trylock(rwlock_t *lock) { @@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwl EXPORT_SYMBOL(_raw_write_unlock_bh); #endif +#endif /* !PREEMPT_RT */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t EXPORT_SYMBOL(__raw_spin_lock_init); +#ifndef CONFIG_PREEMPT_RT void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const } EXPORT_SYMBOL(__rwlock_init); +#endif static void spin_dump(raw_spinlock_t *lock, const char *msg) { @@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t * arch_spin_unlock(&lock->raw_lock); } +#ifndef CONFIG_PREEMPT_RT static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) @@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } + +#endif