aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kcov.c26
-rw-r--r--kernel/kthread.c61
2 files changed, 79 insertions, 8 deletions
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 55c5d883a93e..6afae0bcbac4 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -427,7 +427,8 @@ void kcov_task_exit(struct task_struct *t)
* WARN_ON(!kcov->remote && kcov->t != t);
*
* For KCOV_REMOTE_ENABLE devices, the exiting task is either:
- * 2. A remote task between kcov_remote_start() and kcov_remote_stop().
+ *
+ * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
* In this case we should print a warning right away, since a task
* shouldn't be exiting when it's in a kcov coverage collection
* section. Here t points to the task that is collecting remote
@@ -437,7 +438,7 @@ void kcov_task_exit(struct task_struct *t)
* WARN_ON(kcov->remote && kcov->t != t);
*
* 2. The task that created kcov exiting without calling KCOV_DISABLE,
- * and then again we can make sure that t->kcov->t == t:
+ * and then again we make sure that t->kcov->t == t:
* WARN_ON(kcov->remote && kcov->t != t);
*
* By combining all three checks into one we get:
@@ -764,7 +765,7 @@ static const struct file_operations kcov_fops = {
* Internally, kcov_remote_start() looks up the kcov device associated with the
* provided handle, allocates an area for coverage collection, and saves the
* pointers to kcov and area into the current task_struct to allow coverage to
- * be collected via __sanitizer_cov_trace_pc()
+ * be collected via __sanitizer_cov_trace_pc().
* In turns kcov_remote_stop() clears those pointers from task_struct to stop
* collecting coverage and copies all collected coverage into the kcov area.
*/
@@ -972,16 +973,25 @@ void kcov_remote_stop(void)
local_irq_restore(flags);
return;
}
- kcov = t->kcov;
- area = t->kcov_area;
- size = t->kcov_size;
- sequence = t->kcov_sequence;
-
+ /*
+ * When in softirq, check if the corresponding kcov_remote_start()
+ * actually found the remote handle and started collecting coverage.
+ */
+ if (in_serving_softirq() && !t->kcov_softirq) {
+ local_irq_restore(flags);
+ return;
+ }
+ /* Make sure that kcov_softirq is only set when in softirq. */
if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
local_irq_restore(flags);
return;
}
+ kcov = t->kcov;
+ area = t->kcov_area;
+ size = t->kcov_size;
+ sequence = t->kcov_sequence;
+
kcov_stop(t);
if (in_serving_softirq()) {
t->kcov_softirq = 0;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index b84fc7eec035..8e3d2d7fdf5e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -1,13 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Kernel thread helper functions.
* Copyright (C) 2004 IBM Corporation, Rusty Russell.
+ * Copyright (C) 2009 Red Hat, Inc.
*
* Creation is done via kthreadd, so that we get a clean environment
* even if we're invoked from userspace (think modprobe, hotplug cpu,
* etc.).
*/
#include <uapi/linux/sched/types.h>
+#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/kthread.h>
#include <linux/completion.h>
@@ -25,6 +29,7 @@
#include <linux/numa.h>
#include <trace/events/sched.h>
+
static DEFINE_SPINLOCK(kthread_create_lock);
static LIST_HEAD(kthread_create_list);
struct task_struct *kthreadd_task;
@@ -48,6 +53,7 @@ struct kthread {
unsigned int cpu;
int (*threadfn)(void *);
void *data;
+ mm_segment_t oldfs;
struct completion parked;
struct completion exited;
#ifdef CONFIG_BLK_CGROUP
@@ -1220,6 +1226,61 @@ void kthread_destroy_worker(struct kthread_worker *worker)
}
EXPORT_SYMBOL(kthread_destroy_worker);
+/**
+ * kthread_use_mm - make the calling kthread operate on an address space
+ * @mm: address space to operate on
+ */
+void kthread_use_mm(struct mm_struct *mm)
+{
+ struct mm_struct *active_mm;
+ struct task_struct *tsk = current;
+
+ WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
+ WARN_ON_ONCE(tsk->mm);
+
+ task_lock(tsk);
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ mmgrab(mm);
+ tsk->active_mm = mm;
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
+ task_unlock(tsk);
+#ifdef finish_arch_post_lock_switch
+ finish_arch_post_lock_switch();
+#endif
+
+ if (active_mm != mm)
+ mmdrop(active_mm);
+
+ to_kthread(tsk)->oldfs = get_fs();
+ set_fs(USER_DS);
+}
+EXPORT_SYMBOL_GPL(kthread_use_mm);
+
+/**
+ * kthread_unuse_mm - reverse the effect of kthread_use_mm()
+ * @mm: address space to operate on
+ */
+void kthread_unuse_mm(struct mm_struct *mm)
+{
+ struct task_struct *tsk = current;
+
+ WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
+ WARN_ON_ONCE(!tsk->mm);
+
+ set_fs(to_kthread(tsk)->oldfs);
+
+ task_lock(tsk);
+ sync_mm_rss(mm);
+ tsk->mm = NULL;
+ /* active_mm is still 'mm' */
+ enter_lazy_tlb(mm, tsk);
+ task_unlock(tsk);
+}
+EXPORT_SYMBOL_GPL(kthread_unuse_mm);
+
#ifdef CONFIG_BLK_CGROUP
/**
* kthread_associate_blkcg - associate blkcg to current kthread