aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar2015-02-04 07:53:17 +0100
committerIngo Molnar2015-02-04 07:53:17 +0100
commit2622e849a1d3d3ef531d1cc33308bb7fd00eee40 (patch)
treef2be9d9f08444cb93b3a8f87099fe4ff8ac3cd57 /kernel
parent996636ddae5cab8883bd76b996cd4f2ea9a152be (diff)
parente36f014edff70fc02b3d3d79cead1d58f289332e (diff)
Merge tag 'v3.19-rc7' into locking/core, to refresh the branch before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/syscall.c25
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/events/core.c15
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/module.c91
-rw-r--r--kernel/params.c3
-rw-r--r--kernel/range.c10
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/time/ntp.c7
-rw-r--r--kernel/time/time.c4
-rw-r--r--kernel/trace/ftrace.c53
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace_events.c69
-rw-r--r--kernel/workqueue.c25
17 files changed, 235 insertions, 85 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d6594e457a25..a64e7a207d2b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
- module_free(NULL, hdr);
+ module_memfree(hdr);
}
#endif /* CONFIG_BPF_JIT */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 088ac0b1b106..536edc2be307 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
int ufd = attr->map_fd;
struct fd f = fdget(ufd);
struct bpf_map *map;
- void *key, *value;
+ void *key, *value, *ptr;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
if (copy_from_user(key, ukey, map->key_size) != 0)
goto free_key;
- err = -ENOENT;
- rcu_read_lock();
- value = map->ops->map_lookup_elem(map, key);
+ err = -ENOMEM;
+ value = kmalloc(map->value_size, GFP_USER);
if (!value)
- goto err_unlock;
+ goto free_key;
+
+ rcu_read_lock();
+ ptr = map->ops->map_lookup_elem(map, key);
+ if (ptr)
+ memcpy(value, ptr, map->value_size);
+ rcu_read_unlock();
+
+ err = -ENOENT;
+ if (!ptr)
+ goto free_value;
err = -EFAULT;
if (copy_to_user(uvalue, value, map->value_size) != 0)
- goto err_unlock;
+ goto free_value;
err = 0;
-err_unlock:
- rcu_read_unlock();
+free_value:
+ kfree(value);
free_key:
kfree(key);
err_put:
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index bb263d0caab3..04cfe8ace520 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb)
*
* And don't kill the default root.
*/
- if (css_has_online_children(&root->cgrp.self) ||
+ if (!list_empty(&root->cgrp.self.children) ||
root == &cgrp_dfl_root)
cgroup_put(&root->cgrp);
else
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index f191bddf64b8..7b40c5f07dce 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2023,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv)
kdb_printf("%-20s%8u 0x%p ", mod->name,
mod->core_size, (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
- kdb_printf("%4ld ", module_refcount(mod));
+ kdb_printf("%4d ", module_refcount(mod));
#endif
if (mod->state == MODULE_STATE_GOING)
kdb_printf(" (Unloading)");
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 882f835a0d85..19efcf13375a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6776,7 +6776,6 @@ skip_type:
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
- cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
__perf_cpu_hrtimer_init(cpuctx, cpu);
@@ -7420,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open,
* task or CPU context:
*/
if (move_group) {
- if (group_leader->ctx->type != ctx->type)
+ /*
+ * Make sure we're both on the same task, or both
+ * per-cpu events.
+ */
+ if (group_leader->ctx->task != ctx->task)
+ goto err_context;
+
+ /*
+ * Make sure we're both events for the same CPU;
+ * grouping events for different CPUs is broken; since
+ * you can never concurrently schedule them anyhow.
+ */
+ if (group_leader->cpu != event->cpu)
goto err_context;
} else {
if (group_leader->ctx != ctx)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 06f58309fed2..ee619929cf90 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -127,7 +127,7 @@ static void *alloc_insn_page(void)
static void free_insn_page(void *page)
{
- module_free(NULL, page);
+ module_memfree(page);
}
struct kprobe_insn_cache kprobe_insn_slots = {
diff --git a/kernel/module.c b/kernel/module.c
index 3965511ae133..d856e96a3cce 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
return 0;
}
-unsigned long module_refcount(struct module *mod)
+/**
+ * module_refcount - return the refcount or -1 if unloading
+ *
+ * @mod: the module we're checking
+ *
+ * Returns:
+ * -1 if the module is in the process of unloading
+ * otherwise the number of references in the kernel to the module
+ */
+int module_refcount(struct module *mod)
{
- return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
+ return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
}
EXPORT_SYMBOL(module_refcount);
@@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
struct module_use *use;
int printed_something = 0;
- seq_printf(m, " %lu ", module_refcount(mod));
+ seq_printf(m, " %i ", module_refcount(mod));
/*
* Always include a trailing , so userspace can differentiate
@@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr);
static ssize_t show_refcnt(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
- return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
+ return sprintf(buffer, "%i\n", module_refcount(mk->mod));
}
static struct module_attribute modinfo_refcnt =
@@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { }
static void unset_module_init_ro_nx(struct module *mod) { }
#endif
-void __weak module_free(struct module *mod, void *module_region)
+void __weak module_memfree(void *module_region)
{
vfree(module_region);
}
@@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod)
{
}
+void __weak module_arch_freeing_init(struct module *mod)
+{
+}
+
/* Free a module, remove from lists, etc. */
static void free_module(struct module *mod)
{
@@ -1841,7 +1854,8 @@ static void free_module(struct module *mod)
/* This may be NULL, but that's OK */
unset_module_init_ro_nx(mod);
- module_free(mod, mod->module_init);
+ module_arch_freeing_init(mod);
+ module_memfree(mod->module_init);
kfree(mod->args);
percpu_modfree(mod);
@@ -1850,7 +1864,7 @@ static void free_module(struct module *mod)
/* Finally, free the core (containing the module structure) */
unset_module_core_ro_nx(mod);
- module_free(mod, mod->module_core);
+ module_memfree(mod->module_core);
#ifdef CONFIG_MPU
update_protections(current->mm);
@@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info)
*/
kmemleak_ignore(ptr);
if (!ptr) {
- module_free(mod, mod->module_core);
+ module_memfree(mod->module_core);
return -ENOMEM;
}
memset(ptr, 0, mod->init_size);
@@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
- module_free(mod, mod->module_init);
- module_free(mod, mod->module_core);
+ module_arch_freeing_init(mod);
+ module_memfree(mod->module_init);
+ module_memfree(mod->module_core);
}
int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod)
#endif
}
+/* For freeing module_init on success, in case kallsyms traversing */
+struct mod_initfree {
+ struct rcu_head rcu;
+ void *module_init;
+};
+
+static void do_free_init(struct rcu_head *head)
+{
+ struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
+ module_memfree(m->module_init);
+ kfree(m);
+}
+
/* This is where the real work happens */
static int do_init_module(struct module *mod)
{
int ret = 0;
+ struct mod_initfree *freeinit;
+
+ freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
+ if (!freeinit) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ freeinit->module_init = mod->module_init;
/*
* We want to find out whether @mod uses async during init. Clear
@@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod)
if (mod->init != NULL)
ret = do_one_initcall(mod->init);
if (ret < 0) {
- /*
- * Init routine failed: abort. Try to protect us from
- * buggy refcounters.
- */
- mod->state = MODULE_STATE_GOING;
- synchronize_sched();
- module_put(mod);
- blocking_notifier_call_chain(&module_notify_list,
- MODULE_STATE_GOING, mod);
- free_module(mod);
- wake_up_all(&module_wq);
- return ret;
+ goto fail_free_freeinit;
}
if (ret > 0) {
pr_warn("%s: '%s'->init suspiciously returned %d, it should "
@@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod)
mod->strtab = mod->core_strtab;
#endif
unset_module_init_ro_nx(mod);
- module_free(mod, mod->module_init);
+ module_arch_freeing_init(mod);
mod->module_init = NULL;
mod->init_size = 0;
mod->init_ro_size = 0;
mod->init_text_size = 0;
+ /*
+ * We want to free module_init, but be aware that kallsyms may be
+ * walking this with preempt disabled. In all the failure paths,
+ * we call synchronize_rcu/synchronize_sched, but we don't want
+ * to slow down the success path, so use actual RCU here.
+ */
+ call_rcu(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
return 0;
+
+fail_free_freeinit:
+ kfree(freeinit);
+fail:
+ /* Try to protect us from buggy refcounters. */
+ mod->state = MODULE_STATE_GOING;
+ synchronize_sched();
+ module_put(mod);
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_GOING, mod);
+ free_module(mod);
+ wake_up_all(&module_wq);
+ return ret;
}
static int may_init_module(void)
diff --git a/kernel/params.c b/kernel/params.c
index 0af9b2c4e56c..728e05b167de 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
mk->mp->grp.attrs = new_attrs;
/* Tack new one on the end. */
+ memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
mk->mp->attrs[mk->mp->num].param = kp;
mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
/* Do not allow runtime DAC changes to make param writable. */
if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
+ else
+ mk->mp->attrs[mk->mp->num].mattr.store = NULL;
mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
mk->mp->num++;
diff --git a/kernel/range.c b/kernel/range.c
index 322ea8e93e4b..82cfc285b046 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2)
{
const struct range *r1 = x1;
const struct range *r2 = x2;
- s64 start1, start2;
- start1 = r1->start;
- start2 = r2->start;
-
- return start1 - start2;
+ if (r1->start < r2->start)
+ return -1;
+ if (r1->start > r2->start)
+ return 1;
+ return 0;
}
int clean_sort_range(struct range *range, int az)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c0accc00566e..e628cb11b560 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7292,13 +7292,12 @@ void __might_sleep(const char *file, int line, int preempt_offset)
* since we will exit with TASK_RUNNING make sure we enter with it,
* otherwise we will destroy state.
*/
- if (WARN_ONCE(current->state != TASK_RUNNING,
+ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
"do not call blocking ops when !TASK_RUNNING; "
"state=%lx set at [<%p>] %pS\n",
current->state,
(void *)current->task_state_change,
- (void *)current->task_state_change))
- __set_current_state(TASK_RUNNING);
+ (void *)current->task_state_change);
___might_sleep(file, line, preempt_offset);
}
diff --git a/kernel/sys.c b/kernel/sys.c
index a8c9f5a7dda6..ea9c88109894 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
up_write(&me->mm->mmap_sem);
break;
case PR_MPX_ENABLE_MANAGEMENT:
+ if (arg2 || arg3 || arg4 || arg5)
+ return -EINVAL;
error = MPX_ENABLE_MANAGEMENT(me);
break;
case PR_MPX_DISABLE_MANAGEMENT:
+ if (arg2 || arg3 || arg4 || arg5)
+ return -EINVAL;
error = MPX_DISABLE_MANAGEMENT(me);
break;
default:
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 87a346fd6d61..28bf91c60a0b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
return -EPERM;
+ if (txc->modes & ADJ_FREQUENCY) {
+ if (LONG_MIN / PPM_SCALE > txc->freq)
+ return -EINVAL;
+ if (LONG_MAX / PPM_SCALE < txc->freq)
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 6390517e77d4..2c85b7724af4 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
if (tv) {
if (copy_from_user(&user_tv, tv, sizeof(*tv)))
return -EFAULT;
+
+ if (!timeval_valid(&user_tv))
+ return -EINVAL;
+
new_ts.tv_sec = user_tv.tv_sec;
new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 929a733d302e..224e768bdc73 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command)
}
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
- struct ftrace_hash *old_hash)
+ struct ftrace_ops_hash *old_hash)
{
ops->flags |= FTRACE_OPS_FL_MODIFYING;
- ops->old_hash.filter_hash = old_hash;
+ ops->old_hash.filter_hash = old_hash->filter_hash;
+ ops->old_hash.notrace_hash = old_hash->notrace_hash;
ftrace_run_update_code(command);
ops->old_hash.filter_hash = NULL;
+ ops->old_hash.notrace_hash = NULL;
ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
}
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
static int ftrace_probe_registered;
-static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
+static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
{
int ret;
int i;
@@ -3637,6 +3639,7 @@ int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data)
{
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_probe *entry;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
struct ftrace_hash *old_hash = *orig_hash;
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+ old_hash_ops.filter_hash = old_hash;
+ /* Probes only have filters */
+ old_hash_ops.notrace_hash = NULL;
+
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
if (!hash) {
count = -ENOMEM;
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
- __enable_ftrace_function_probe(old_hash);
+ __enable_ftrace_function_probe(&old_hash_ops);
if (!ret)
free_ftrace_hash_rcu(old_hash);
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
}
static void ftrace_ops_update_code(struct ftrace_ops *ops,
- struct ftrace_hash *old_hash)
+ struct ftrace_ops_hash *old_hash)
{
- if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+ struct ftrace_ops *op;
+
+ if (!ftrace_enabled)
+ return;
+
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
+ return;
+ }
+
+ /*
+ * If this is the shared global_ops filter, then we need to
+ * check if there is another ops that shares it, is enabled.
+ * If so, we still need to run the modify code.
+ */
+ if (ops->func_hash != &global_ops.local_hash)
+ return;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (op->func_hash == &global_ops.local_hash &&
+ op->flags & FTRACE_OPS_FL_ENABLED) {
+ ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
+ /* Only need to do this once */
+ return;
+ }
+ } while_for_each_ftrace_op(op);
}
static int
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
{
struct ftrace_hash **orig_hash;
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_hash *old_hash;
struct ftrace_hash *hash;
int ret;
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
+ old_hash_ops.filter_hash = ops->func_hash->filter_hash;
+ old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
if (!ret) {
- ftrace_ops_update_code(ops, old_hash);
+ ftrace_ops_update_code(ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock);
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void)
int ftrace_regex_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_iterator *iter;
struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
+ old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
+ old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
if (!ret) {
- ftrace_ops_update_code(iter->ops, old_hash);
+ ftrace_ops_update_code(iter->ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2e767972e99c..4a9079b9f082 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6918,7 +6918,6 @@ void __init trace_init(void)
tracepoint_printk = 0;
}
tracer_alloc_buffers();
- init_ftrace_syscalls();
trace_event_init();
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 366a78a3e61e..b03a0ea77b99 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void)
return 0;
}
+static __init void
+early_enable_events(struct trace_array *tr, bool disable_first)
+{
+ char *buf = bootup_event_buf;
+ char *token;
+ int ret;
+
+ while (true) {
+ token = strsep(&buf, ",");
+
+ if (!token)
+ break;
+ if (!*token)
+ continue;
+
+ /* Restarting syscalls requires that we stop them first */
+ if (disable_first)
+ ftrace_set_clr_event(tr, token, 0);
+
+ ret = ftrace_set_clr_event(tr, token, 1);
+ if (ret)
+ pr_warn("Failed to enable trace event: %s\n", token);
+
+ /* Put back the comma to allow this to be called again */
+ if (buf)
+ *(buf - 1) = ',';
+ }
+}
+
static __init int event_trace_enable(void)
{
struct trace_array *tr = top_trace_array();
struct ftrace_event_call **iter, *call;
- char *buf = bootup_event_buf;
- char *token;
int ret;
if (!tr)
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void)
*/
__trace_early_add_events(tr);
- while (true) {
- token = strsep(&buf, ",");
-
- if (!token)
- break;
- if (!*token)
- continue;
-
- ret = ftrace_set_clr_event(tr, token, 1);
- if (ret)
- pr_warn("Failed to enable trace event: %s\n", token);
- }
+ early_enable_events(tr, false);
trace_printk_start_comm();
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void)
return 0;
}
+/*
+ * event_trace_enable() is called from trace_event_init() first to
+ * initialize events and perhaps start any events that are on the
+ * command line. Unfortunately, there are some events that will not
+ * start this early, like the system call tracepoints that need
+ * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
+ * is called before pid 1 starts, and this flag is never set, making
+ * the syscall tracepoint never get reached, but the event is enabled
+ * regardless (and not doing anything).
+ */
+static __init int event_trace_enable_again(void)
+{
+ struct trace_array *tr;
+
+ tr = top_trace_array();
+ if (!tr)
+ return -ENODEV;
+
+ early_enable_events(tr, true);
+
+ return 0;
+}
+
+early_initcall(event_trace_enable_again);
+
static __init int event_trace_init(void)
{
struct trace_array *tr;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6202b08f1933..beeeac9e0e3e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool)
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
- *
- * Return:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
*/
-static bool maybe_create_worker(struct worker_pool *pool)
+static void maybe_create_worker(struct worker_pool *pool)
__releases(&pool->lock)
__acquires(&pool->lock)
{
- if (!need_to_create_worker(pool))
- return false;
restart:
spin_unlock_irq(&pool->lock);
@@ -1877,7 +1871,6 @@ restart:
*/
if (need_to_create_worker(pool))
goto restart;
- return true;
}
/**
@@ -1897,16 +1890,14 @@ restart:
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
- * %false if the pool don't need management and the caller can safely start
- * processing works, %true indicates that the function released pool->lock
- * and reacquired it to perform some management function and that the
- * conditions that the caller verified while holding the lock before
- * calling the function might no longer be true.
+ * %false if the pool doesn't need management and the caller can safely
+ * start processing works, %true if management function was performed and
+ * the conditions that the caller verified before calling the function may
+ * no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- bool ret = false;
/*
* Anyone who successfully grabs manager_arb wins the arbitration
@@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker)
* actual management, the pool may stall indefinitely.
*/
if (!mutex_trylock(&pool->manager_arb))
- return ret;
+ return false;
- ret |= maybe_create_worker(pool);
+ maybe_create_worker(pool);
mutex_unlock(&pool->manager_arb);
- return ret;
+ return true;
}
/**