aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--include/linux/preempt.h5
-rw-r--r--kernel/sched/clock.c53
-rw-r--r--kernel/sched/fair.c8
4 files changed, 43 insertions, 25 deletions
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index a3acbac2ee72..19e5adb49a27 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -180,7 +180,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
static void cyc2ns_data_init(struct cyc2ns_data *data)
{
- data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR;
+ data->cyc2ns_mul = 0;
data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
data->cyc2ns_offset = 0;
data->__count = 0;
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 59749fc48328..de83b4eb1642 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -134,7 +134,6 @@ do { \
#undef preempt_check_resched
#endif
-#ifdef CONFIG_PREEMPT
#define preempt_set_need_resched() \
do { \
set_preempt_need_resched(); \
@@ -144,10 +143,6 @@ do { \
if (tif_need_resched()) \
set_preempt_need_resched(); \
} while (0)
-#else
-#define preempt_set_need_resched() do { } while (0)
-#define preempt_fold_need_resched() do { } while (0)
-#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 6bd6a6731b21..43c2bcc35761 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -77,35 +77,50 @@ __read_mostly int sched_clock_running;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
+static int __sched_clock_stable_early;
int sched_clock_stable(void)
{
- if (static_key_false(&__sched_clock_stable))
- return false;
- return true;
+ return static_key_false(&__sched_clock_stable);
}
-void set_sched_clock_stable(void)
+static void __set_sched_clock_stable(void)
{
if (!sched_clock_stable())
- static_key_slow_dec(&__sched_clock_stable);
+ static_key_slow_inc(&__sched_clock_stable);
+}
+
+void set_sched_clock_stable(void)
+{
+ __sched_clock_stable_early = 1;
+
+ smp_mb(); /* matches sched_clock_init() */
+
+ if (!sched_clock_running)
+ return;
+
+ __set_sched_clock_stable();
}
static void __clear_sched_clock_stable(struct work_struct *work)
{
/* XXX worry about clock continuity */
if (sched_clock_stable())
- static_key_slow_inc(&__sched_clock_stable);
+ static_key_slow_dec(&__sched_clock_stable);
}
static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
void clear_sched_clock_stable(void)
{
- if (keventd_up())
- schedule_work(&sched_clock_work);
- else
- __clear_sched_clock_stable(&sched_clock_work);
+ __sched_clock_stable_early = 0;
+
+ smp_mb(); /* matches sched_clock_init() */
+
+ if (!sched_clock_running)
+ return;
+
+ schedule_work(&sched_clock_work);
}
struct sched_clock_data {
@@ -140,6 +155,20 @@ void sched_clock_init(void)
}
sched_clock_running = 1;
+
+ /*
+ * Ensure that it is impossible to not do a static_key update.
+ *
+ * Either {set,clear}_sched_clock_stable() must see sched_clock_running
+ * and do the update, or we must see their __sched_clock_stable_early
+ * and do the update, or both.
+ */
+ smp_mb(); /* matches {set,clear}_sched_clock_stable() */
+
+ if (__sched_clock_stable_early)
+ __set_sched_clock_stable();
+ else
+ __clear_sched_clock_stable(NULL);
}
/*
@@ -340,7 +369,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
*/
u64 cpu_clock(int cpu)
{
- if (static_key_false(&__sched_clock_stable))
+ if (!sched_clock_stable())
return sched_clock_cpu(cpu);
return sched_clock();
@@ -355,7 +384,7 @@ u64 cpu_clock(int cpu)
*/
u64 local_clock(void)
{
- if (static_key_false(&__sched_clock_stable))
+ if (!sched_clock_stable())
return sched_clock_cpu(raw_smp_processor_id());
return sched_clock();
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 867b0a4b0893..966cc2bfcb77 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2360,13 +2360,7 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
}
wakeup = 0;
} else {
- /*
- * Task re-woke on same cpu (or else migrate_task_rq_fair()
- * would have made count negative); we must be careful to avoid
- * double-accounting blocked time after synchronizing decays.
- */
- se->avg.last_runnable_update += __synchronize_entity_decay(se)
- << 20;
+ __synchronize_entity_decay(se);
}
/* migrated tasks did not contribute to our blocked load */