aboutsummaryrefslogtreecommitdiff
path: root/kernel/lockdep_internals.h
diff options
context:
space:
mode:
authorPeter Zijlstra2013-10-31 18:14:17 +0100
committerIngo Molnar2013-11-06 07:55:08 +0100
commit8eddac3f103736163f49255bcb109edadea167f6 (patch)
treecd3161b76bb7a7e2614817d0ba66446676e6b677 /kernel/lockdep_internals.h
parent01768b42dc97a67b4fb33a2535c49fc1969880df (diff)
locking: Move the lockdep code to kernel/locking/
Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-wl7s3tta5isufzfguc23et06@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/lockdep_internals.h')
-rw-r--r--kernel/lockdep_internals.h170
1 files changed, 0 insertions, 170 deletions
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
deleted file mode 100644
index 4f560cfedc8f..000000000000
--- a/kernel/lockdep_internals.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * kernel/lockdep_internals.h
- *
- * Runtime locking correctness validator
- *
- * lockdep subsystem internal functions and variables.
- */
-
-/*
- * Lock-class usage-state bits:
- */
-enum lock_usage_bit {
-#define LOCKDEP_STATE(__STATE) \
- LOCK_USED_IN_##__STATE, \
- LOCK_USED_IN_##__STATE##_READ, \
- LOCK_ENABLED_##__STATE, \
- LOCK_ENABLED_##__STATE##_READ,
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
- LOCK_USED,
- LOCK_USAGE_STATES
-};
-
-/*
- * Usage-state bitmasks:
- */
-#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
-
-enum {
-#define LOCKDEP_STATE(__STATE) \
- __LOCKF(USED_IN_##__STATE) \
- __LOCKF(USED_IN_##__STATE##_READ) \
- __LOCKF(ENABLED_##__STATE) \
- __LOCKF(ENABLED_##__STATE##_READ)
-#include "lockdep_states.h"
-#undef LOCKDEP_STATE
- __LOCKF(USED)
-};
-
-#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
-#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
-
-#define LOCKF_ENABLED_IRQ_READ \
- (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
-#define LOCKF_USED_IN_IRQ_READ \
- (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
-
-/*
- * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
- * we track.
- *
- * We use the per-lock dependency maps in two ways: we grow it by adding
- * every to-be-taken lock to all currently held lock's own dependency
- * table (if it's not there yet), and we check it for lock order
- * conflicts and deadlocks.
- */
-#define MAX_LOCKDEP_ENTRIES 16384UL
-
-#define MAX_LOCKDEP_CHAINS_BITS 15
-#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
-
-#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
-
-/*
- * Stack-trace: tightly packed array of stack backtrace
- * addresses. Protected by the hash_lock.
- */
-#define MAX_STACK_TRACE_ENTRIES 262144UL
-
-extern struct list_head all_lock_classes;
-extern struct lock_chain lock_chains[];
-
-#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
-
-extern void get_usage_chars(struct lock_class *class,
- char usage[LOCK_USAGE_CHARS]);
-
-extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
-
-struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
-
-extern unsigned long nr_lock_classes;
-extern unsigned long nr_list_entries;
-extern unsigned long nr_lock_chains;
-extern int nr_chain_hlocks;
-extern unsigned long nr_stack_trace_entries;
-
-extern unsigned int nr_hardirq_chains;
-extern unsigned int nr_softirq_chains;
-extern unsigned int nr_process_chains;
-extern unsigned int max_lockdep_depth;
-extern unsigned int max_recursion_depth;
-
-extern unsigned int max_bfs_queue_depth;
-
-#ifdef CONFIG_PROVE_LOCKING
-extern unsigned long lockdep_count_forward_deps(struct lock_class *);
-extern unsigned long lockdep_count_backward_deps(struct lock_class *);
-#else
-static inline unsigned long
-lockdep_count_forward_deps(struct lock_class *class)
-{
- return 0;
-}
-static inline unsigned long
-lockdep_count_backward_deps(struct lock_class *class)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_DEBUG_LOCKDEP
-
-#include <asm/local.h>
-/*
- * Various lockdep statistics.
- * We want them per cpu as they are often accessed in fast path
- * and we want to avoid too much cache bouncing.
- */
-struct lockdep_stats {
- int chain_lookup_hits;
- int chain_lookup_misses;
- int hardirqs_on_events;
- int hardirqs_off_events;
- int redundant_hardirqs_on;
- int redundant_hardirqs_off;
- int softirqs_on_events;
- int softirqs_off_events;
- int redundant_softirqs_on;
- int redundant_softirqs_off;
- int nr_unused_locks;
- int nr_cyclic_checks;
- int nr_cyclic_check_recursions;
- int nr_find_usage_forwards_checks;
- int nr_find_usage_forwards_recursions;
- int nr_find_usage_backwards_checks;
- int nr_find_usage_backwards_recursions;
-};
-
-DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
-
-#define __debug_atomic_inc(ptr) \
- this_cpu_inc(lockdep_stats.ptr);
-
-#define debug_atomic_inc(ptr) { \
- WARN_ON_ONCE(!irqs_disabled()); \
- __this_cpu_inc(lockdep_stats.ptr); \
-}
-
-#define debug_atomic_dec(ptr) { \
- WARN_ON_ONCE(!irqs_disabled()); \
- __this_cpu_dec(lockdep_stats.ptr); \
-}
-
-#define debug_atomic_read(ptr) ({ \
- struct lockdep_stats *__cpu_lockdep_stats; \
- unsigned long long __total = 0; \
- int __cpu; \
- for_each_possible_cpu(__cpu) { \
- __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
- __total += __cpu_lockdep_stats->ptr; \
- } \
- __total; \
-})
-#else
-# define __debug_atomic_inc(ptr) do { } while (0)
-# define debug_atomic_inc(ptr) do { } while (0)
-# define debug_atomic_dec(ptr) do { } while (0)
-# define debug_atomic_read(ptr) 0
-#endif