From ae2e15eb3b6c2a011bee615470bf52d2beb99a4b Mon Sep 17 00:00:00 2001 From: Glauber de Oliveira Costa Date: Wed, 30 Jan 2008 13:31:40 +0100 Subject: x86: unify prefetch operations This patch moves the prefetch[w]? functions to processor.h Signed-off-by: Glauber de Oliveira Costa Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- include/asm-x86/processor.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'include/asm-x86/processor.h') diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index c6b749a018a7..bfac9739f57e 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -596,6 +596,36 @@ extern char ignore_fpu_irq; #define ARCH_HAS_PREFETCHW #define ARCH_HAS_SPINLOCK_PREFETCH +#ifdef CONFIG_X86_32 +#define BASE_PREFETCH ASM_NOP4 +#define ARCH_HAS_PREFETCH +#else +#define BASE_PREFETCH "prefetcht0 (%1)" +#endif + +/* Prefetch instructions for Pentium III and AMD Athlon */ +/* It's not worth to care about 3dnow! prefetches for the K6 + because they are microcoded there and very slow. + However we don't do prefetches for pre XP Athlons currently + That should be fixed. */ +static inline void prefetch(const void *x) +{ + alternative_input(BASE_PREFETCH, + "prefetchnta (%1)", + X86_FEATURE_XMM, + "r" (x)); +} + +/* 3dnow! prefetch to get an exclusive cache line. Useful for + spinlocks to avoid one state transition in the cache coherency protocol. */ +static inline void prefetchw(const void *x) +{ + alternative_input(BASE_PREFETCH, + "prefetchw (%1)", + X86_FEATURE_3DNOW, + "r" (x)); +} + #define spin_lock_prefetch(x) prefetchw(x) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. -- cgit v1.2.3