locking/kconfig: Simplify INLINE_SPIN_UNLOCK usage
authorRaghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Thu, 22 Mar 2012 09:55:08 +0000 (15:25 +0530)
committerIngo Molnar <mingo@kernel.org>
Fri, 23 Mar 2012 12:18:57 +0000 (13:18 +0100)
Get rid of INLINE_SPIN_UNLOCK entirely replacing it with
UNINLINE_SPIN_UNLOCK instead of the reverse meaning.

Whoever wants to change the default spinlock inlining
behavior and uninline the spinlocks for some weird reason,
such as spinlock debugging, paravirt etc. can now all just
select UNINLINE_SPIN_UNLOCK

Original discussion at: https://lkml.org/lkml/2012/3/21/357

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: linux-mips@linux-mips.org
Link: http://lkml.kernel.org/r/20120322095502.30866.75756.sendpatchset@codeblue
[ tidied up the changelog a bit ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>

arch/mips/configs/db1300_defconfig
arch/xtensa/configs/iss_defconfig
include/linux/spinlock_api_smp.h
kernel/Kconfig.locks
kernel/Kconfig.preempt
kernel/spinlock.c
lib/Kconfig.debug

index c38b190..3590ab5 100644 (file)
@@ -133,7 +133,7 @@ CONFIG_BLK_DEV_BSG=y
 CONFIG_IOSCHED_NOOP=y
 CONFIG_DEFAULT_NOOP=y
 CONFIG_DEFAULT_IOSCHED="noop"
-CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_UNINLINE_SPIN_UNLOCK is not set
 CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
 CONFIG_INLINE_READ_UNLOCK=y
 CONFIG_INLINE_READ_UNLOCK_IRQ=y
index f932b30..ddab37b 100644 (file)
@@ -113,7 +113,7 @@ CONFIG_DEFAULT_IOSCHED="noop"
 # CONFIG_INLINE_SPIN_LOCK_BH is not set
 # CONFIG_INLINE_SPIN_LOCK_IRQ is not set
 # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
-CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_UNINLINE_SPIN_UNLOCK is not set
 # CONFIG_INLINE_SPIN_UNLOCK_BH is not set
 CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
 # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
index e253ccd..51df117 100644 (file)
@@ -67,7 +67,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
 #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
 #endif
 
-#ifdef CONFIG_INLINE_SPIN_UNLOCK
+#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
 #define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
 #endif
 
index 5068e2a..2251882 100644 (file)
@@ -124,8 +124,8 @@ config INLINE_SPIN_LOCK_IRQSAVE
        def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
                 ARCH_INLINE_SPIN_LOCK_IRQSAVE
 
-config INLINE_SPIN_UNLOCK
-       def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
+config UNINLINE_SPIN_UNLOCK
+       bool
 
 config INLINE_SPIN_UNLOCK_BH
        def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
index 24e7cb0..3f9c974 100644 (file)
@@ -36,6 +36,7 @@ config PREEMPT_VOLUNTARY
 config PREEMPT
        bool "Preemptible Kernel (Low-Latency Desktop)"
        select PREEMPT_COUNT
+       select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
        help
          This option reduces the latency of the kernel by making
          all kernel code (that is not executing in a critical section)
index 84c7d96..5cdd806 100644 (file)
@@ -163,7 +163,7 @@ void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
 EXPORT_SYMBOL(_raw_spin_lock_bh);
 #endif
 
-#ifndef CONFIG_INLINE_SPIN_UNLOCK
+#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
 {
        __raw_spin_unlock(lock);
index 05037dc..f32a41f 100644 (file)
@@ -499,6 +499,7 @@ config RT_MUTEX_TESTER
 config DEBUG_SPINLOCK
        bool "Spinlock and rw-lock debugging: basic checks"
        depends on DEBUG_KERNEL
+       select UNINLINE_SPIN_UNLOCK
        help
          Say Y here and build SMP to catch missing spinlock initialization
          and certain other kinds of spinlock errors commonly made.  This is