aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorHaozhong Zhang2015-10-20 15:39:03 +0800
committerPaolo Bonzini2015-11-10 12:06:15 +0100
commit35181e86df97e4223f4a28fb33e2bcf3b73de141 (patch)
treee7df6020aaf6649eb287aa5c75f3e6cc324d9347 /include
parentad721883e9c5f46cc5fa9496bc12c097c6238b4a (diff)
KVM: x86: Add a common TSC scaling function
VMX and SVM calculate the TSC scaling ratio in a similar logic, so this patch generalizes it to a common TSC scaling function. Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com> [Inline the multiplication and shift steps into mul_u64_u64_shr. Remove BUG_ON. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/math64.h51
2 files changed, 52 insertions, 0 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 242a6d2b53ff..5706a2108f0a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1183,4 +1183,5 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
+
#endif
diff --git a/include/linux/math64.h b/include/linux/math64.h
index c45c089bfdac..44282ec7b682 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
}
#endif /* mul_u64_u32_shr */
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u64_shr */
+
#else
#ifndef mul_u64_u32_shr
@@ -161,6 +168,50 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
}
#endif /* mul_u64_u32_shr */
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
+{
+ union {
+ u64 ll;
+ struct {
+#ifdef __BIG_ENDIAN
+ u32 high, low;
+#else
+ u32 low, high;
+#endif
+ } l;
+ } rl, rm, rn, rh, a0, b0;
+ u64 c;
+
+ a0.ll = a;
+ b0.ll = b;
+
+ rl.ll = (u64)a0.l.low * b0.l.low;
+ rm.ll = (u64)a0.l.low * b0.l.high;
+ rn.ll = (u64)a0.l.high * b0.l.low;
+ rh.ll = (u64)a0.l.high * b0.l.high;
+
+ /*
+ * Each of these lines computes a 64-bit intermediate result into "c",
+ * starting at bits 32-95. The low 32-bits go into the result of the
+ * multiplication, the high 32-bits are carried into the next step.
+ */
+ rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
+ rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
+ rh.l.high = (c >> 32) + rh.l.high;
+
+ /*
+ * The 128-bit result of the multiplication is in rl.ll and rh.ll,
+ * shift it right and throw away the high part of the result.
+ */
+ if (shift == 0)
+ return rl.ll;
+ if (shift < 64)
+ return (rl.ll >> shift) | (rh.ll << (64 - shift));
+ return rh.ll >> (shift & 63);
+}
+#endif /* mul_u64_u64_shr */
+
#endif
#endif /* _LINUX_MATH64_H */