aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/kernel/time.c8
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--include/linux/hardirq.h15
-rw-r--r--include/linux/kernel_stat.h9
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/vtime.h47
-rw-r--r--kernel/sched/cputime.c20
-rw-r--r--kernel/softirq.c6
10 files changed, 89 insertions, 40 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index f6388216080d..5e4850305d3f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -106,9 +106,9 @@ void vtime_task_switch(struct task_struct *prev)
struct thread_info *ni = task_thread_info(current);
if (idle_task(smp_processor_id()) != prev)
- vtime_account_system(prev);
+ __vtime_account_system(prev);
else
- vtime_account_idle(prev);
+ __vtime_account_idle(prev);
vtime_account_user(prev);
@@ -135,14 +135,14 @@ static cputime_t vtime_delta(struct task_struct *tsk)
return delta_stime;
}
-void vtime_account_system(struct task_struct *tsk)
+void __vtime_account_system(struct task_struct *tsk)
{
cputime_t delta = vtime_delta(tsk);
account_system_time(tsk, 0, delta, delta);
}
-void vtime_account_idle(struct task_struct *tsk)
+void __vtime_account_idle(struct task_struct *tsk)
{
account_idle_time(vtime_delta(tsk));
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index ce4cb772dc78..0db456f30d45 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -336,7 +336,7 @@ static u64 vtime_delta(struct task_struct *tsk,
return delta;
}
-void vtime_account_system(struct task_struct *tsk)
+void __vtime_account_system(struct task_struct *tsk)
{
u64 delta, sys_scaled, stolen;
@@ -346,7 +346,7 @@ void vtime_account_system(struct task_struct *tsk)
account_steal_time(stolen);
}
-void vtime_account_idle(struct task_struct *tsk)
+void __vtime_account_idle(struct task_struct *tsk)
{
u64 delta, sys_scaled, stolen;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 790334427895..783e988c4e1e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -140,6 +140,10 @@ void vtime_account(struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(vtime_account);
+void __vtime_account_system(struct task_struct *tsk)
+__attribute__((alias("vtime_account")));
+EXPORT_SYMBOL_GPL(__vtime_account_system);
+
void __kprobes vtime_stop_cpu(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ecced9d18986..d91a95568002 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -608,9 +608,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
kvm_s390_deliver_pending_interrupts(vcpu);
vcpu->arch.sie_block->icptcode = 0;
- local_irq_disable();
kvm_guest_enter();
- local_irq_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu,
@@ -629,9 +627,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
- local_irq_disable();
kvm_guest_exit();
- local_irq_enable();
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
return rc;
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index cab3da3d0949..624ef3f45c8e 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -4,6 +4,7 @@
#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
+#include <linux/vtime.h>
#include <asm/hardirq.h>
/*
@@ -129,16 +130,6 @@ extern void synchronize_irq(unsigned int irq);
# define synchronize_irq(irq) barrier()
#endif
-struct task_struct;
-
-#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
-static inline void vtime_account(struct task_struct *tsk)
-{
-}
-#else
-extern void vtime_account(struct task_struct *tsk);
-#endif
-
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
static inline void rcu_nmi_enter(void)
@@ -162,7 +153,7 @@ extern void rcu_nmi_exit(void);
*/
#define __irq_enter() \
do { \
- vtime_account(current); \
+ vtime_account_irq_enter(current); \
add_preempt_count(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \
} while (0)
@@ -178,7 +169,7 @@ extern void irq_enter(void);
#define __irq_exit() \
do { \
trace_hardirq_exit(); \
- vtime_account(current); \
+ vtime_account_irq_exit(current); \
sub_preempt_count(HARDIRQ_OFFSET); \
} while (0)
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 36d12f0884c3..1865b1f29770 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -7,6 +7,7 @@
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/vtime.h>
#include <asm/irq.h>
#include <asm/cputime.h>
@@ -130,12 +131,4 @@ extern void account_process_tick(struct task_struct *, int user);
extern void account_steal_ticks(unsigned long ticks);
extern void account_idle_ticks(unsigned long ticks);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void vtime_task_switch(struct task_struct *prev);
-extern void vtime_account_system(struct task_struct *tsk);
-extern void vtime_account_idle(struct task_struct *tsk);
-#else
-static inline void vtime_task_switch(struct task_struct *prev) { }
-#endif
-
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 93bfc9f9815c..0e2212fe4784 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -737,7 +737,11 @@ static inline int kvm_deassign_device(struct kvm *kvm,
static inline void kvm_guest_enter(void)
{
BUG_ON(preemptible());
- vtime_account(current);
+ /*
+ * This is running in ioctl context so we can avoid
+ * the call to vtime_account() with its unnecessary idle check.
+ */
+ vtime_account_system(current);
current->flags |= PF_VCPU;
/* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
@@ -751,7 +755,11 @@ static inline void kvm_guest_enter(void)
static inline void kvm_guest_exit(void)
{
- vtime_account(current);
+ /*
+ * This is running in ioctl context so we can avoid
+ * the call to vtime_account() with its unnecessary idle check.
+ */
+ vtime_account_system(current);
current->flags &= ~PF_VCPU;
}
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
new file mode 100644
index 000000000000..0c2a2d303020
--- /dev/null
+++ b/include/linux/vtime.h
@@ -0,0 +1,47 @@
+#ifndef _LINUX_KERNEL_VTIME_H
+#define _LINUX_KERNEL_VTIME_H
+
+struct task_struct;
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void vtime_task_switch(struct task_struct *prev);
+extern void __vtime_account_system(struct task_struct *tsk);
+extern void vtime_account_system(struct task_struct *tsk);
+extern void __vtime_account_idle(struct task_struct *tsk);
+extern void vtime_account(struct task_struct *tsk);
+#else
+static inline void vtime_task_switch(struct task_struct *prev) { }
+static inline void __vtime_account_system(struct task_struct *tsk) { }
+static inline void vtime_account_system(struct task_struct *tsk) { }
+static inline void vtime_account(struct task_struct *tsk) { }
+#endif
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+extern void irqtime_account_irq(struct task_struct *tsk);
+#else
+static inline void irqtime_account_irq(struct task_struct *tsk) { }
+#endif
+
+static inline void vtime_account_irq_enter(struct task_struct *tsk)
+{
+ /*
+ * Hardirq can interrupt idle task anytime. So we need vtime_account()
+ * that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING.
+ * Softirq can also interrupt idle task directly if it calls
+ * local_bh_enable(). Such case probably don't exist but we never know.
+ * Ksoftirqd is not concerned because idle time is flushed on context
+ * switch. Softirqs in the end of hardirqs are also not a problem because
+ * the idle time is flushed on hardirq time already.
+ */
+ vtime_account(tsk);
+ irqtime_account_irq(tsk);
+}
+
+static inline void vtime_account_irq_exit(struct task_struct *tsk)
+{
+ /* On hard|softirq exit we always account to hard|softirq cputime */
+ __vtime_account_system(tsk);
+ irqtime_account_irq(tsk);
+}
+
+#endif /* _LINUX_KERNEL_VTIME_H */
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 81b763ba58a6..8d859dae5bed 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq);
* Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
-void vtime_account(struct task_struct *curr)
+void irqtime_account_irq(struct task_struct *curr)
{
unsigned long flags;
s64 delta;
@@ -73,7 +73,7 @@ void vtime_account(struct task_struct *curr)
irq_time_write_end();
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(vtime_account);
+EXPORT_SYMBOL_GPL(irqtime_account_irq);
static int irqtime_account_hi_update(void)
{
@@ -433,10 +433,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
*st = cputime.stime;
}
+void vtime_account_system(struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __vtime_account_system(tsk);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(vtime_account_system);
+
/*
* Archs that account the whole time spent in the idle task
* (outside irq) as idle time can rely on this and just implement
- * vtime_account_system() and vtime_account_idle(). Archs that
+ * __vtime_account_system() and __vtime_account_idle(). Archs that
* have other meaning of the idle time (s390 only includes the
* time spent by the CPU when it's in low power mode) must override
* vtime_account().
@@ -449,9 +459,9 @@ void vtime_account(struct task_struct *tsk)
local_irq_save(flags);
if (in_interrupt() || !is_idle_task(tsk))
- vtime_account_system(tsk);
+ __vtime_account_system(tsk);
else
- vtime_account_idle(tsk);
+ __vtime_account_idle(tsk);
local_irq_restore(flags);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index cc96bdc0c2c9..ed567babe789 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -221,7 +221,7 @@ asmlinkage void __do_softirq(void)
current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
- vtime_account(current);
+ vtime_account_irq_enter(current);
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
@@ -272,7 +272,7 @@ restart:
lockdep_softirq_exit();
- vtime_account(current);
+ vtime_account_irq_exit(current);
__local_bh_enable(SOFTIRQ_OFFSET);
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
@@ -341,7 +341,7 @@ static inline void invoke_softirq(void)
*/
void irq_exit(void)
{
- vtime_account(current);
+ vtime_account_irq_exit(current);
trace_hardirq_exit();
sub_preempt_count(IRQ_EXIT_OFFSET);
if (!in_interrupt() && local_softirq_pending())