aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller2016-04-09 17:41:41 -0400
committerDavid S. Miller2016-04-09 17:41:41 -0400
commitae95d7126104591348d37aaf78c8325967e02386 (patch)
tree3270712f030549d77d4c55246d056e02b9def29d /kernel
parent03c5b534185f9844c1b5fcfdbae2adc32821ec42 (diff)
parent183c948a3cb3efbf45eabed41fa7ee04c19378fc (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c15
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/locking/lockdep.c79
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/time/tick-sched.c61
-rw-r--r--kernel/time/tick-sched.h2
6 files changed, 143 insertions, 40 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9a01019ff7c8..5056abffef27 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2417,14 +2417,24 @@ static void ctx_sched_out(struct perf_event_context *ctx,
cpuctx->task_ctx = NULL;
}
- is_active ^= ctx->is_active; /* changed bits */
-
+ /*
+ * Always update time if it was set; not only when it changes.
+ * Otherwise we can 'forget' to update time for any but the last
+ * context we sched out. For example:
+ *
+ * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
+ * ctx_sched_out(.event_type = EVENT_PINNED)
+ *
+ * would only update time for the pinned events.
+ */
if (is_active & EVENT_TIME) {
/* update (and stop) ctx time */
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
}
+ is_active ^= ctx->is_active; /* changed bits */
+
if (!ctx->nr_active || !(is_active & EVENT_ALL))
return;
@@ -8547,6 +8557,7 @@ SYSCALL_DEFINE5(perf_event_open,
f_flags);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
+ event_file = NULL;
goto err_context;
}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 220fc17b9718..7edc95edfaee 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -321,7 +321,7 @@ retry:
copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
ret = __replace_page(vma, vaddr, old_page, new_page);
- page_cache_release(new_page);
+ put_page(new_page);
put_old:
put_page(old_page);
@@ -539,14 +539,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp,
* see uprobe_register().
*/
if (mapping->a_ops->readpage)
- page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp);
+ page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
else
- page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
copy_from_page(page, offset, insn, nbytes);
- page_cache_release(page);
+ put_page(page);
return 0;
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 53ab2f85d77e..2324ba5310db 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2000,6 +2000,77 @@ static inline int get_first_held_lock(struct task_struct *curr,
}
/*
+ * Returns the next chain_key iteration
+ */
+static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
+{
+ u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
+
+ printk(" class_idx:%d -> chain_key:%016Lx",
+ class_idx,
+ (unsigned long long)new_chain_key);
+ return new_chain_key;
+}
+
+static void
+print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
+{
+ struct held_lock *hlock;
+ u64 chain_key = 0;
+ int depth = curr->lockdep_depth;
+ int i;
+
+ printk("depth: %u\n", depth + 1);
+ for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
+ hlock = curr->held_locks + i;
+ chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
+
+ print_lock(hlock);
+ }
+
+ print_chain_key_iteration(hlock_next->class_idx, chain_key);
+ print_lock(hlock_next);
+}
+
+static void print_chain_keys_chain(struct lock_chain *chain)
+{
+ int i;
+ u64 chain_key = 0;
+ int class_id;
+
+ printk("depth: %u\n", chain->depth);
+ for (i = 0; i < chain->depth; i++) {
+ class_id = chain_hlocks[chain->base + i];
+ chain_key = print_chain_key_iteration(class_id + 1, chain_key);
+
+ print_lock_name(lock_classes + class_id);
+ printk("\n");
+ }
+}
+
+static void print_collision(struct task_struct *curr,
+ struct held_lock *hlock_next,
+ struct lock_chain *chain)
+{
+ printk("\n");
+ printk("======================\n");
+ printk("[chain_key collision ]\n");
+ print_kernel_ident();
+ printk("----------------------\n");
+ printk("%s/%d: ", current->comm, task_pid_nr(current));
+ printk("Hash chain already cached but the contents don't match!\n");
+
+ printk("Held locks:");
+ print_chain_keys_held_locks(curr, hlock_next);
+
+ printk("Locks in cached chain:");
+ print_chain_keys_chain(chain);
+
+ printk("\nstack backtrace:\n");
+ dump_stack();
+}
+
+/*
* Checks whether the chain and the current held locks are consistent
* in depth and also in content. If they are not it most likely means
* that there was a collision during the calculation of the chain_key.
@@ -2014,14 +2085,18 @@ static int check_no_collision(struct task_struct *curr,
i = get_first_held_lock(curr, hlock);
- if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1)))
+ if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
+ print_collision(curr, hlock, chain);
return 0;
+ }
for (j = 0; j < chain->depth - 1; j++, i++) {
id = curr->held_locks[i].class_idx - 1;
- if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id))
+ if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
+ print_collision(curr, hlock, chain);
return 0;
+ }
}
#endif
return 1;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d8465eeab8b3..8b489fcac37b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -321,6 +321,24 @@ static inline void init_hrtick(void)
}
#endif /* CONFIG_SCHED_HRTICK */
+/*
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#define fetch_or(ptr, mask) \
+ ({ \
+ typeof(ptr) _ptr = (ptr); \
+ typeof(mask) _mask = (mask); \
+ typeof(*_ptr) _old, _val = *_ptr; \
+ \
+ for (;;) { \
+ _old = cmpxchg(_ptr, _val, _val | _mask); \
+ if (_old == _val) \
+ break; \
+ _val = _old; \
+ } \
+ _old; \
+})
+
#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
/*
* Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 084b79f5917e..58e3310c9b21 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -157,52 +157,50 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
cpumask_var_t tick_nohz_full_mask;
cpumask_var_t housekeeping_mask;
bool tick_nohz_full_running;
-static unsigned long tick_dep_mask;
+static atomic_t tick_dep_mask;
-static void trace_tick_dependency(unsigned long dep)
+static bool check_tick_dependency(atomic_t *dep)
{
- if (dep & TICK_DEP_MASK_POSIX_TIMER) {
+ int val = atomic_read(dep);
+
+ if (val & TICK_DEP_MASK_POSIX_TIMER) {
trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
- return;
+ return true;
}
- if (dep & TICK_DEP_MASK_PERF_EVENTS) {
+ if (val & TICK_DEP_MASK_PERF_EVENTS) {
trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
- return;
+ return true;
}
- if (dep & TICK_DEP_MASK_SCHED) {
+ if (val & TICK_DEP_MASK_SCHED) {
trace_tick_stop(0, TICK_DEP_MASK_SCHED);
- return;
+ return true;
}
- if (dep & TICK_DEP_MASK_CLOCK_UNSTABLE)
+ if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
+ return true;
+ }
+
+ return false;
}
static bool can_stop_full_tick(struct tick_sched *ts)
{
WARN_ON_ONCE(!irqs_disabled());
- if (tick_dep_mask) {
- trace_tick_dependency(tick_dep_mask);
+ if (check_tick_dependency(&tick_dep_mask))
return false;
- }
- if (ts->tick_dep_mask) {
- trace_tick_dependency(ts->tick_dep_mask);
+ if (check_tick_dependency(&ts->tick_dep_mask))
return false;
- }
- if (current->tick_dep_mask) {
- trace_tick_dependency(current->tick_dep_mask);
+ if (check_tick_dependency(&current->tick_dep_mask))
return false;
- }
- if (current->signal->tick_dep_mask) {
- trace_tick_dependency(current->signal->tick_dep_mask);
+ if (check_tick_dependency(&current->signal->tick_dep_mask))
return false;
- }
return true;
}
@@ -259,12 +257,12 @@ static void tick_nohz_full_kick_all(void)
preempt_enable();
}
-static void tick_nohz_dep_set_all(unsigned long *dep,
+static void tick_nohz_dep_set_all(atomic_t *dep,
enum tick_dep_bits bit)
{
- unsigned long prev;
+ int prev;
- prev = fetch_or(dep, BIT_MASK(bit));
+ prev = atomic_fetch_or(dep, BIT(bit));
if (!prev)
tick_nohz_full_kick_all();
}
@@ -280,7 +278,7 @@ void tick_nohz_dep_set(enum tick_dep_bits bit)
void tick_nohz_dep_clear(enum tick_dep_bits bit)
{
- clear_bit(bit, &tick_dep_mask);
+ atomic_andnot(BIT(bit), &tick_dep_mask);
}
/*
@@ -289,12 +287,12 @@ void tick_nohz_dep_clear(enum tick_dep_bits bit)
*/
void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
{
- unsigned long prev;
+ int prev;
struct tick_sched *ts;
ts = per_cpu_ptr(&tick_cpu_sched, cpu);
- prev = fetch_or(&ts->tick_dep_mask, BIT_MASK(bit));
+ prev = atomic_fetch_or(&ts->tick_dep_mask, BIT(bit));
if (!prev) {
preempt_disable();
/* Perf needs local kick that is NMI safe */
@@ -313,7 +311,7 @@ void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
{
struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
- clear_bit(bit, &ts->tick_dep_mask);
+ atomic_andnot(BIT(bit), &ts->tick_dep_mask);
}
/*
@@ -331,7 +329,7 @@ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
{
- clear_bit(bit, &tsk->tick_dep_mask);
+ atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
}
/*
@@ -345,7 +343,7 @@ void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
{
- clear_bit(bit, &sig->tick_dep_mask);
+ atomic_andnot(BIT(bit), &sig->tick_dep_mask);
}
/*
@@ -366,7 +364,8 @@ void __tick_nohz_task_switch(void)
ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->tick_stopped) {
- if (current->tick_dep_mask || current->signal->tick_dep_mask)
+ if (atomic_read(&current->tick_dep_mask) ||
+ atomic_read(&current->signal->tick_dep_mask))
tick_nohz_full_kick();
}
out:
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index eb4e32566a83..bf38226e5c17 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -60,7 +60,7 @@ struct tick_sched {
u64 next_timer;
ktime_t idle_expires;
int do_timer_last;
- unsigned long tick_dep_mask;
+ atomic_t tick_dep_mask;
};
extern struct tick_sched *tick_get_tick_sched(int cpu);