aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds2010-11-17 14:58:36 -0800
committerLinus Torvalds2010-11-17 14:58:36 -0800
commit7957f0a857754c555e07f58a3fb83ac29501478c (patch)
tree120976183d3f871b2023a745e888d71f96fbcfb3 /include
parent460781b54253e3ed10a0a2a433bdc548ec952269 (diff)
Fix build failure due to hwirq.h needing smp_lock.h
Arnd Bergmann did an automated scripting run to find left-over instances of <linux/smp_lock.h>, and had made it trigger it on the normal BKL use of lock_kernel and unlock_lernel (and apparently release_kernel_lock and reacquire_kernel_lock too, used by the scheduler). That resulted in commit 451a3c24b013 ("BKL: remove extraneous #include <smp_lock.h>"). However, hardirq.h was the only remaining user of the old 'kernel_locked()' interface, and Arnd's script hadn't checked for that. So depending on your configuration and what header files had been included, you would get errors like "implicit declaration of function 'kernel_locked'" during the build. The right fix is not to just re-instate the smp_lock.h include - it is to just remove 'kernel_locked()' entirely, since the only use was this one special low-level detail. Just make hardirq.h do it directly. In fact this simplifies and clarifies the code, because some trivial analysis makes it clear that hardirq.h only ever used _one_ of the two definitions of kernel_locked(), so we can remove the other one entirely. Reported-by: Zimny Lech <napohybelskurwysynom2010@gmail.com> Reported-and-acked-by: Randy Dunlap <randy.dunlap@oracle.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/smp_lock.h3
2 files changed, 1 insertions, 4 deletions
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8f3f467c57c6..bea1612d8f5c 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -96,7 +96,7 @@
#define in_nmi() (preempt_count() & NMI_MASK)
#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
-# define PREEMPT_INATOMIC_BASE kernel_locked()
+# define PREEMPT_INATOMIC_BASE (current->lock_depth >= 0)
#else
# define PREEMPT_INATOMIC_BASE 0
#endif
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h
index 291f721144c2..3a1988202731 100644
--- a/include/linux/smp_lock.h
+++ b/include/linux/smp_lock.h
@@ -4,8 +4,6 @@
#ifdef CONFIG_LOCK_KERNEL
#include <linux/sched.h>
-#define kernel_locked() (current->lock_depth >= 0)
-
extern int __lockfunc __reacquire_kernel_lock(void);
extern void __lockfunc __release_kernel_lock(void);
@@ -58,7 +56,6 @@ static inline void cycle_kernel_lock(void)
#define lock_kernel()
#define unlock_kernel()
#define cycle_kernel_lock() do { } while(0)
-#define kernel_locked() 1
#endif /* CONFIG_BKL */
#define release_kernel_lock(task) do { } while(0)