diff options
author | Valentin Schneider | 2019-09-23 15:36:12 +0100 |
---|---|---|
committer | Ingo Molnar | 2019-09-25 17:42:32 +0200 |
commit | a49b4f4012ef233143c5f7ce44f97851e54d5ef9 (patch) | |
tree | 9d39821367916b49ada43fc72b231607173c9a36 /kernel | |
parent | 763a9ec06c409dcde2a761aac4bb83ff3938e0b3 (diff) |
sched/core: Fix preempt_schedule() interrupt return comment
preempt_schedule_irq() is the one that should be called on return from
interrupt, clean up the comment to avoid any ambiguity.
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-m68k@lists.linux-m68k.org
Cc: linux-riscv@lists.infradead.org
Cc: uclinux-h8-devel@lists.sourceforge.jp
Link: https://lkml.kernel.org/r/20190923143620.29334-2-valentin.schneider@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 83ea23e9e91f..00ef44c8f7b5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4218,9 +4218,8 @@ static void __sched notrace preempt_schedule_common(void) #ifdef CONFIG_PREEMPTION /* - * this is the entry point to schedule() from in-kernel preemption - * off of preempt_enable. Kernel preemptions off return from interrupt - * occur there and call schedule directly. + * This is the entry point to schedule() from in-kernel preemption + * off of preempt_enable. */ asmlinkage __visible void __sched notrace preempt_schedule(void) { @@ -4291,7 +4290,7 @@ EXPORT_SYMBOL_GPL(preempt_schedule_notrace); #endif /* CONFIG_PREEMPTION */ /* - * this is the entry point to schedule() from kernel preemption + * This is the entry point to schedule() from kernel preemption * off of irq context. * Note, that this is called and return with irqs disabled. This will * protect us against recursive calling from irq. |