diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/time/alarmtimer.c | 6 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 4 | ||||
-rw-r--r-- | kernel/time/hrtimer.c | 18 | ||||
-rw-r--r-- | kernel/time/jiffies.c | 2 | ||||
-rw-r--r-- | kernel/time/ntp.c | 2 | ||||
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 6 | ||||
-rw-r--r-- | kernel/time/posix-timers.c | 4 | ||||
-rw-r--r-- | kernel/time/test_udelay.c | 7 | ||||
-rw-r--r-- | kernel/time/tick-broadcast-hrtimer.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 20 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 7 | ||||
-rw-r--r-- | kernel/time/tick-oneshot.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.h | 2 | ||||
-rw-r--r-- | kernel/time/time.c | 2 | ||||
-rw-r--r-- | kernel/time/timecounter.c | 2 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 46 | ||||
-rw-r--r-- | kernel/time/timer.c | 4 | ||||
-rw-r--r-- | kernel/time/vsyscall.c | 2 |
19 files changed, 78 insertions, 62 deletions
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 4d94e2b5499d..bea9d08b1698 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -2,13 +2,13 @@ /* * Alarmtimer interface * - * This interface provides a timer which is similarto hrtimers, + * This interface provides a timer which is similar to hrtimers, * but triggers a RTC alarm if the box is suspend. * * This interface is influenced by the Android RTC Alarm timer * interface. * - * Copyright (C) 2010 IBM Corperation + * Copyright (C) 2010 IBM Corporation * * Author: John Stultz <john.stultz@linaro.org> */ @@ -811,7 +811,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart) /** * alarm_timer_nsleep - alarmtimer nanosleep * @which_clock: clockid - * @flags: determins abstime or relative + * @flags: determines abstime or relative * @tsreq: requested sleep time (abs or rel) * * Handles clock_nanosleep calls against _ALARM clockids diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index cce484a2cc7c..1d1a61371b5a 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -38,7 +38,7 @@ * calculated mult and shift factors. This guarantees that no 64bit * overflow happens when the input value of the conversion is * multiplied with the calculated mult factor. Larger ranges may - * reduce the conversion accuracy by chosing smaller mult and shift + * reduce the conversion accuracy by choosing smaller mult and shift * factors. */ void @@ -518,7 +518,7 @@ static void clocksource_suspend_select(bool fallback) * the suspend time when resuming system. * * This function is called late in the suspend process from timekeeping_suspend(), - * that means processes are freezed, non-boot cpus and interrupts are disabled + * that means processes are frozen, non-boot cpus and interrupts are disabled * now. It is therefore possible to start the suspend timer without taking the * clocksource mutex. */ diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 5c9d968187ae..4a66725b1d4a 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -683,7 +683,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) * T1 is removed, so this code is called and would reprogram * the hardware to 5s from now. Any hrtimer_start after that * will not reprogram the hardware due to hang_detected being - * set. So we'd effectivly block all timers until the T2 event + * set. So we'd effectively block all timers until the T2 event * fires. */ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) @@ -1019,7 +1019,7 @@ static void __remove_hrtimer(struct hrtimer *timer, * cpu_base->next_timer. This happens when we remove the first * timer on a remote cpu. No harm as we never dereference * cpu_base->next_timer. So the worst thing what can happen is - * an superflous call to hrtimer_force_reprogram() on the + * an superfluous call to hrtimer_force_reprogram() on the * remote cpu later on if the same timer gets enqueued again. */ if (reprogram && timer == cpu_base->next_timer) @@ -1212,7 +1212,7 @@ static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) * The counterpart to hrtimer_cancel_wait_running(). * * If there is a waiter for cpu_base->expiry_lock, then it was waiting for - * the timer callback to finish. Drop expiry_lock and reaquire it. That + * the timer callback to finish. Drop expiry_lock and reacquire it. That * allows the waiter to acquire the lock and make progress. */ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base, @@ -1398,7 +1398,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, int base; /* - * On PREEMPT_RT enabled kernels hrtimers which are not explicitely + * On PREEMPT_RT enabled kernels hrtimers which are not explicitly * marked for hard interrupt expiry mode are moved into soft * interrupt context for latency reasons and because the callbacks * can invoke functions which might sleep on RT, e.g. spin_lock(). @@ -1430,7 +1430,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, * hrtimer_init - initialize a timer to the given clock * @timer: the timer to be initialized * @clock_id: the clock to be used - * @mode: The modes which are relevant for intitialization: + * @mode: The modes which are relevant for initialization: * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT, * HRTIMER_MODE_REL_SOFT * @@ -1487,7 +1487,7 @@ EXPORT_SYMBOL_GPL(hrtimer_active); * insufficient for that. * * The sequence numbers are required because otherwise we could still observe - * a false negative if the read side got smeared over multiple consequtive + * a false negative if the read side got smeared over multiple consecutive * __run_hrtimer() invocations. */ @@ -1588,7 +1588,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, * minimizing wakeups, not running timers at the * earliest interrupt after their soft expiration. * This allows us to avoid using a Priority Search - * Tree, which can answer a stabbing querry for + * Tree, which can answer a stabbing query for * overlapping intervals and instead use the simple * BST we already have. * We don't add extra wakeups by delaying timers that @@ -1822,7 +1822,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, enum hrtimer_mode mode) { /* - * On PREEMPT_RT enabled kernels hrtimers which are not explicitely + * On PREEMPT_RT enabled kernels hrtimers which are not explicitly * marked for hard interrupt expiry mode are moved into soft * interrupt context either for latency reasons or because the * hrtimer callback takes regular spinlocks or invokes other @@ -1835,7 +1835,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, * the same CPU. That causes a latency spike due to the wakeup of * a gazillion threads. * - * OTOH, priviledged real-time user space applications rely on the + * OTOH, privileged real-time user space applications rely on the * low latency of hard interrupt wakeups. If the current task is in * a real-time scheduling class, mark the mode for hard interrupt * expiry. diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index a5cffe2a1770..a492e4da69ba 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -44,7 +44,7 @@ static u64 jiffies_read(struct clocksource *cs) * the timer interrupt frequency HZ and it suffers * inaccuracies caused by missed or lost timer * interrupts and the inability for the timer - * interrupt hardware to accuratly tick at the + * interrupt hardware to accurately tick at the * requested HZ value. It is also not recommended * for "tick-less" systems. */ diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5247afd7f345..406dccb79c2b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -544,7 +544,7 @@ static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec, struct timespec64 *to_set, const struct timespec64 *now) { - /* Allowed error in tv_nsec, arbitarily set to 5 jiffies in ns. */ + /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */ const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; struct timespec64 delay = {.tv_sec = -1, .tv_nsec = set_offset_nsec}; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 9abe15255bc4..3bb96a8b49c9 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -279,7 +279,7 @@ void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples) * @tsk: Task for which cputime needs to be started * @samples: Storage for time samples * - * The thread group cputime accouting is avoided when there are no posix + * The thread group cputime accounting is avoided when there are no posix * CPU timers armed. Before starting a timer it's required to check whether * the time accounting is active. If not, a full update of the atomic * accounting store needs to be done and the accounting enabled. @@ -390,7 +390,7 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer) /* * If posix timer expiry is handled in task work context then * timer::it_lock can be taken without disabling interrupts as all - * other locking happens in task context. This requires a seperate + * other locking happens in task context. This requires a separate * lock class key otherwise regular posix timer expiry would record * the lock class being taken in interrupt context and generate a * false positive warning. @@ -1216,7 +1216,7 @@ static void handle_posix_cpu_timers(struct task_struct *tsk) check_process_timers(tsk, &firing); /* - * The above timer checks have updated the exipry cache and + * The above timer checks have updated the expiry cache and * because nothing can have queued or modified timers after * sighand lock was taken above it is guaranteed to be * consistent. So the next timer interrupt fastpath check diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index bf540f5a4115..dd5697d7347b 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -1191,8 +1191,8 @@ SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, err = do_clock_adjtime(which_clock, &ktx); - if (err >= 0) - err = put_old_timex32(utp, &ktx); + if (err >= 0 && put_old_timex32(utp, &ktx)) + return -EFAULT; return err; } diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c index 77c63005dc4e..13b11eb62685 100644 --- a/kernel/time/test_udelay.c +++ b/kernel/time/test_udelay.c @@ -21,7 +21,6 @@ #define DEBUGFS_FILENAME "udelay_test" static DEFINE_MUTEX(udelay_test_lock); -static struct dentry *udelay_test_debugfs_file; static int udelay_test_usecs; static int udelay_test_iterations = DEFAULT_ITERATIONS; @@ -138,8 +137,8 @@ static const struct file_operations udelay_test_debugfs_ops = { static int __init udelay_test_init(void) { mutex_lock(&udelay_test_lock); - udelay_test_debugfs_file = debugfs_create_file(DEBUGFS_FILENAME, - S_IRUSR, NULL, NULL, &udelay_test_debugfs_ops); + debugfs_create_file(DEBUGFS_FILENAME, S_IRUSR, NULL, NULL, + &udelay_test_debugfs_ops); mutex_unlock(&udelay_test_lock); return 0; @@ -150,7 +149,7 @@ module_init(udelay_test_init); static void __exit udelay_test_exit(void) { mutex_lock(&udelay_test_lock); - debugfs_remove(udelay_test_debugfs_file); + debugfs_remove(debugfs_lookup(DEBUGFS_FILENAME, NULL)); mutex_unlock(&udelay_test_lock); } diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index b5a65e212df2..797eb93103ad 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -53,7 +53,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) * reasons. * * Each caller tries to arm the hrtimer on its own CPU, but if the - * hrtimer callbback function is currently running, then + * hrtimer callback function is currently running, then * hrtimer_start() cannot move it and the timer stays on the CPU on * which it is assigned at the moment. * diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 5a23829372c7..a44055228796 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -107,6 +107,19 @@ void tick_install_broadcast_device(struct clock_event_device *dev) tick_broadcast_device.evtdev = dev; if (!cpumask_empty(tick_broadcast_mask)) tick_broadcast_start_periodic(dev); + + if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) + return; + + /* + * If the system already runs in oneshot mode, switch the newly + * registered broadcast device to oneshot mode explicitly. + */ + if (tick_broadcast_oneshot_active()) { + tick_broadcast_switch_to_oneshot(); + return; + } + /* * Inform all cpus about this. We might be in a situation * where we did not switch to oneshot mode because the per cpu @@ -115,8 +128,7 @@ void tick_install_broadcast_device(struct clock_event_device *dev) * notification the systems stays stuck in periodic mode * forever. */ - if (dev->features & CLOCK_EVT_FEAT_ONESHOT) - tick_clock_notify(); + tick_clock_notify(); } /* @@ -157,7 +169,7 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev) } /* - * Check, if the device is disfunctional and a place holder, which + * Check, if the device is dysfunctional and a placeholder, which * needs to be handled by the broadcast device. */ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) @@ -391,7 +403,7 @@ void tick_broadcast_control(enum tick_broadcast_mode mode) * - the broadcast device exists * - the broadcast device is not a hrtimer based one * - the broadcast device is in periodic mode to - * avoid a hickup during switch to oneshot mode + * avoid a hiccup during switch to oneshot mode */ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 9d3a22510bab..e15bc0ef1912 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -348,12 +348,7 @@ void tick_check_new_device(struct clock_event_device *newdev) td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; - /* cpu local device ? */ - if (!tick_check_percpu(curdev, newdev, cpu)) - goto out_bc; - - /* Preference decision */ - if (!tick_check_preferred(curdev, newdev)) + if (!tick_check_replacement(curdev, newdev)) goto out_bc; if (!try_module_get(newdev->owner)) diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index f9745d47425a..475ecceda768 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -45,7 +45,7 @@ int tick_program_event(ktime_t expires, int force) } /** - * tick_resume_onshot - resume oneshot mode + * tick_resume_oneshot - resume oneshot mode */ void tick_resume_oneshot(void) { diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 0cc55791b2b6..d34894f3862a 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -751,7 +751,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) * Aside of that check whether the local timer softirq is * pending. If so its a bad idea to call get_next_timer_interrupt() * because there is an already expired timer, so it will request - * immeditate expiry, which rearms the hardware timer with a + * immediate expiry, which rearms the hardware timer with a * minimal delta which brings us back to this place * immediately. Lather, rinse and repeat... */ diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 4fb06527cf64..d952ae393423 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -29,7 +29,7 @@ enum tick_nohz_mode { * @inidle: Indicator that the CPU is in the tick idle mode * @tick_stopped: Indicator that the idle tick has been stopped * @idle_active: Indicator that the CPU is actively in the tick idle mode; - * it is resetted during irq handling phases. + * it is reset during irq handling phases. * @do_timer_lst: CPU was the last one doing do_timer before going idle * @got_idle_tick: Tick timer function has run with @inidle set * @last_tick: Store the last tick expiry time when the tick diff --git a/kernel/time/time.c b/kernel/time/time.c index 3985b2b32d08..29923b20e0e4 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -571,7 +571,7 @@ EXPORT_SYMBOL(__usecs_to_jiffies); /* * The TICK_NSEC - 1 rounds up the value to the next resolution. Note * that a remainder subtract here would not do the right thing as the - * resolution values don't fall on second boundries. I.e. the line: + * resolution values don't fall on second boundaries. I.e. the line: * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. * Note that due to the small error in the multiplier here, this * rounding is incorrect for sufficiently large values of tv_nsec, but diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c index 85b98e727306..e6285288d765 100644 --- a/kernel/time/timecounter.c +++ b/kernel/time/timecounter.c @@ -76,7 +76,7 @@ static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, return ns; } -u64 timecounter_cyc2time(struct timecounter *tc, +u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp) { u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6aee5768c86f..81fe2a33b80c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -596,14 +596,14 @@ EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns); * careful cache layout of the timekeeper because the sequence count and * struct tk_read_base would then need two cache lines instead of one. * - * Access to the time keeper clock source is disabled accross the innermost + * Access to the time keeper clock source is disabled across the innermost * steps of suspend/resume. The accessors still work, but the timestamps * are frozen until time keeping is resumed which happens very early. * * For regular suspend/resume there is no observable difference vs. sched * clock, but it might affect some of the nasty low level debug printks. * - * OTOH, access to sched clock is not guaranteed accross suspend/resume on + * OTOH, access to sched clock is not guaranteed across suspend/resume on * all systems either so it depends on the hardware in use. * * If that turns out to be a real problem then this could be mitigated by @@ -899,7 +899,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs) EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset); /** - * ktime_mono_to_any() - convert mononotic time to any other time + * ktime_mono_to_any() - convert monotonic time to any other time * @tmono: time to convert. * @offs: which offset to use */ @@ -1427,35 +1427,45 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset) static int change_clocksource(void *data) { struct timekeeper *tk = &tk_core.timekeeper; - struct clocksource *new, *old; + struct clocksource *new, *old = NULL; unsigned long flags; + bool change = false; new = (struct clocksource *) data; - raw_spin_lock_irqsave(&timekeeper_lock, flags); - write_seqcount_begin(&tk_core.seq); - - timekeeping_forward_now(tk); /* * If the cs is in module, get a module reference. Succeeds * for built-in code (owner == NULL) as well. */ if (try_module_get(new->owner)) { - if (!new->enable || new->enable(new) == 0) { - old = tk->tkr_mono.clock; - tk_setup_internals(tk, new); - if (old->disable) - old->disable(old); - module_put(old->owner); - } else { + if (!new->enable || new->enable(new) == 0) + change = true; + else module_put(new->owner); - } } + + raw_spin_lock_irqsave(&timekeeper_lock, flags); + write_seqcount_begin(&tk_core.seq); + + timekeeping_forward_now(tk); + + if (change) { + old = tk->tkr_mono.clock; + tk_setup_internals(tk, new); + } + timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); write_seqcount_end(&tk_core.seq); raw_spin_unlock_irqrestore(&timekeeper_lock, flags); + if (old) { + if (old->disable) + old->disable(old); + + module_put(old->owner); + } + return 0; } @@ -1948,7 +1958,7 @@ static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk, * xtime_nsec_1 = offset + xtime_nsec_2 * Which gives us: * xtime_nsec_2 = xtime_nsec_1 - offset - * Which simplfies to: + * Which simplifies to: * xtime_nsec -= offset */ if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) { @@ -2336,7 +2346,7 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc) /* * Validate if a timespec/timeval used to inject a time - * offset is valid. Offsets can be postive or negative, so + * offset is valid. Offsets can be positive or negative, so * we don't check tv_sec. The value of the timeval/timespec * is the sum of its fields,but *NOTE*: * The field tv_usec/tv_nsec must always be non-negative and diff --git a/kernel/time/timer.c b/kernel/time/timer.c index f475f1a027c8..d111adf4a0cb 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -894,7 +894,7 @@ static inline void forward_timer_base(struct timer_base *base) /* * No need to forward if we are close enough below jiffies. * Also while executing timers, base->clk is 1 offset ahead - * of jiffies to avoid endless requeuing to current jffies. + * of jiffies to avoid endless requeuing to current jiffies. */ if ((long)(jnow - base->clk) < 1) return; @@ -1271,7 +1271,7 @@ static inline void timer_base_unlock_expiry(struct timer_base *base) * The counterpart to del_timer_wait_running(). * * If there is a waiter for base->expiry_lock, then it was waiting for the - * timer callback to finish. Drop expiry_lock and reaquire it. That allows + * timer callback to finish. Drop expiry_lock and reacquire it. That allows * the waiter to acquire the lock and make progress. */ static void timer_sync_wait_running(struct timer_base *base) diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c index 88e6b8ed6ca5..f0d5062d9cbc 100644 --- a/kernel/time/vsyscall.c +++ b/kernel/time/vsyscall.c @@ -108,7 +108,7 @@ void update_vsyscall(struct timekeeper *tk) /* * If the current clocksource is not VDSO capable, then spare the - * update of the high reolution parts. + * update of the high resolution parts. */ if (clock_mode != VDSO_CLOCKMODE_NONE) update_vdso_data(vdata, tk); |