diff options
author | Daniel Schwierzeck | 2016-01-12 21:48:25 +0100 |
---|---|---|
committer | Daniel Schwierzeck | 2016-01-16 21:06:46 +0100 |
commit | 23ff8633fd8ca75d2ffd4595b9c72bb1a5fdbd20 (patch) | |
tree | f40ffd62bb772fc2760bc36d90488817c0afde35 | |
parent | df50b3b41437e59f86730f25a696cf94b6ad2577 (diff) |
MIPS: sync I/O related header files with linux-4.4
Mainly sync asm/io.h to get a working ioremap() implementation
as well as the full set of I/O accessors. Pull in additional
header files to make this work.
Furthermore port over the directory 'arch/mips/include/asm/mach-generic/'
with contains default definitions for I/O and memory spaces and default
implementations for mapping those spaces. All files in that directory
can be overwritten by a SoC/machine.
Signed-off-by: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
-rw-r--r-- | arch/mips/Makefile | 5 | ||||
-rw-r--r-- | arch/mips/include/asm/addrspace.h | 4 | ||||
-rw-r--r-- | arch/mips/include/asm/const.h | 27 | ||||
-rw-r--r-- | arch/mips/include/asm/cpu-features.h | 30 | ||||
-rw-r--r-- | arch/mips/include/asm/io.h | 761 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-generic/cpu-feature-overrides.h | 11 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-generic/ioremap.h | 32 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-generic/mangle-port.h | 50 | ||||
-rw-r--r-- | arch/mips/include/asm/mach-generic/spaces.h | 102 | ||||
-rw-r--r-- | arch/mips/include/asm/pgtable-bits.h | 283 |
10 files changed, 941 insertions, 364 deletions
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 90cd5906c6a..2133e7e065d 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -25,4 +25,7 @@ tune-$(CONFIG_MIPS_TUNE_4KC) += -mtune=4kc tune-$(CONFIG_MIPS_TUNE_14KC) += -mtune=14kc tune-$(CONFIG_MIPS_TUNE_24KC) += -mtune=24kc -PLATFORM_CPPFLAGS += $(arch-y) $(tune-y) +# Include default header files +cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic + +PLATFORM_CPPFLAGS += $(arch-y) $(tune-y) $(cflags-y) diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h index b768bb5081c..e7fe6ed7e4b 100644 --- a/arch/mips/include/asm/addrspace.h +++ b/arch/mips/include/asm/addrspace.h @@ -10,6 +10,8 @@ #ifndef _ASM_ADDRSPACE_H #define _ASM_ADDRSPACE_H +#include <spaces.h> + /* * Configure language */ @@ -50,7 +52,7 @@ */ #define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) #define XPHYSADDR(a) ((_ACAST64_(a)) & \ - _CONST64_(0x000000ffffffffff)) + _CONST64_(0x0000ffffffffffff)) #ifdef CONFIG_64BIT diff --git a/arch/mips/include/asm/const.h b/arch/mips/include/asm/const.h new file mode 100644 index 00000000000..c872bfd25e1 --- /dev/null +++ b/arch/mips/include/asm/const.h @@ -0,0 +1,27 @@ +/* const.h: Macros for dealing with constants. */ + +#ifndef _LINUX_CONST_H +#define _LINUX_CONST_H + +/* Some constant macros are used in both assembler and + * C code. Therefore we cannot annotate them always with + * 'UL' and other type specifiers unilaterally. We + * use the following macros to deal with this. + * + * Similarly, _AT() will cast an expression with a type in C, but + * leave it unchanged in asm. + */ + +#ifdef __ASSEMBLY__ +#define _AC(X,Y) X +#define _AT(T,X) X +#else +#define __AC(X,Y) (X##Y) +#define _AC(X,Y) __AC(X,Y) +#define _AT(T,X) ((T)(X)) +#endif + +#define _BITUL(x) (_AC(1,UL) << (x)) +#define _BITULL(x) (_AC(1,ULL) << (x)) + +#endif /* !(_LINUX_CONST_H) */ diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h new file mode 100644 index 00000000000..a6e9d940190 --- /dev/null +++ b/arch/mips/include/asm/cpu-features.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + * + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef __ASM_CPU_FEATURES_H +#define __ASM_CPU_FEATURES_H + +#include <cpu-feature-overrides.h> + +#ifdef CONFIG_32BIT +# ifndef cpu_has_64bits +# define cpu_has_64bits 0 +# endif +# ifndef cpu_has_64bit_addresses +# define cpu_has_64bit_addresses 0 +# endif +#endif + +#ifdef CONFIG_64BIT +# ifndef cpu_has_64bits +# define cpu_has_64bits 1 +# endif +# ifndef cpu_has_64bit_addresses +# define cpu_has_64bit_addresses 1 +# endif +#endif + +#endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index a7ab087c0d4..eb06afafaed 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -1,21 +1,28 @@ /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 1994, 1995 Waldorf GmbH - * Copyright (C) 1994 - 2000 Ralf Baechle + * Copyright (C) 1994 - 2000, 06 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2000 FSMLabs, Inc. + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Author: Maciej W. Rozycki <macro@mips.com> + * + * SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_IO_H #define _ASM_IO_H -#if 0 -#include <linux/pagemap.h> -#endif +#include <linux/compiler.h> +#include <linux/types.h> + #include <asm/addrspace.h> #include <asm/byteorder.h> +#include <asm/cpu-features.h> +#include <asm/pgtable-bits.h> +#include <asm/processor.h> +#include <asm/string.h> + +#include <ioremap.h> +#include <mangle-port.h> +#include <spaces.h> /* * Slowdown I/O port space accesses for antique hardware. @@ -23,44 +30,20 @@ #undef CONF_SLOWDOWN_IO /* - * Sane hardware offers swapping of I/O space accesses in hardware; less - * sane hardware forces software to fiddle with this ... + * Raw operations are never swapped in software. OTOH values that raw + * operations are working on may or may not have been swapped by the bus + * hardware. An example use would be for flash memory that's used for + * execute in place. */ -#if defined(CONFIG_SWAP_IO_SPACE) && defined(__MIPSEB__) +# define __raw_ioswabb(a, x) (x) +# define __raw_ioswabw(a, x) (x) +# define __raw_ioswabl(a, x) (x) +# define __raw_ioswabq(a, x) (x) +# define ____raw_ioswabq(a, x) (x) -#define __ioswab8(x) (x) -#define __ioswab16(x) swab16(x) -#define __ioswab32(x) swab32(x) - -#else - -#define __ioswab8(x) (x) -#define __ioswab16(x) (x) -#define __ioswab32(x) (x) - -#endif +/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ -/* - * This file contains the definitions for the MIPS counterpart of the - * x86 in/out instructions. This heap of macros and C results in much - * better code than the approach of doing it in plain C. The macros - * result in code that is to fast for certain hardware. On the other - * side the performance of the string functions should be improved for - * sake of certain devices like EIDE disks that do highspeed polled I/O. - * - * Ralf - * - * This file contains the definitions for the x86 IO instructions - * inb/inw/inl/outb/outw/outl and the "string versions" of the same - * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" - * versions of the single-IO instructions (inb_p/inw_p/..). - * - * This file is not meant to be obfuscating: it's just complicated - * to (a) handle it all in a way that makes gcc able to optimize it - * as well as possible and (b) trying to avoid writing the same thing - * over and over again with slight variations and possibly making a - * mistake somewhere. - */ +#define IO_SPACE_LIMIT 0xffff /* * On MIPS I/O ports are memory mapped, so we access them using normal @@ -84,6 +67,7 @@ extern const unsigned long mips_io_port_base; static inline void set_io_port_base(unsigned long base) { * (unsigned long *) &mips_io_port_base = base; + barrier(); } /* @@ -114,378 +98,429 @@ static inline void set_io_port_base(unsigned long base) #endif /* - * Change virtual addresses to physical addresses and vv. - * These are trivial on the 1:1 Linux/MIPS mapping + * virt_to_phys - map virtual addresses to physical + * @address: address to remap + * + * The returned physical address is the physical (CPU) mapping for + * the memory address given. It is only valid to use this function on + * addresses directly mapped or allocated via kmalloc. + * + * This function does not give bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function */ -static inline phys_addr_t virt_to_phys(volatile void * address) +static inline unsigned long virt_to_phys(volatile const void *address) { -#ifndef CONFIG_64BIT - return CPHYSADDR(address); + unsigned long addr = (unsigned long)address; + + /* this corresponds to kernel implementation of __pa() */ +#ifdef CONFIG_64BIT + if (addr < CKSEG0) + return XPHYSADDR(addr); + + return CPHYSADDR(addr); #else - return XPHYSADDR(address); + return addr - PAGE_OFFSET + PHYS_OFFSET; #endif } -static inline void * phys_to_virt(unsigned long address) +/* + * phys_to_virt - map physical address to virtual + * @address: address to remap + * + * The returned virtual address is a current CPU mapping for + * the memory address given. It is only valid to use this function on + * addresses that have a kernel mapping + * + * This function does not handle bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline void *phys_to_virt(unsigned long address) { -#ifndef CONFIG_64BIT - return (void *)KSEG0ADDR(address); -#else - return (void *)CKSEG0ADDR(address); -#endif + return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); } /* - * IO bus memory addresses are also 1:1 with the physical address + * ISA I/O bus memory addresses are 1:1 with the physical address. */ -static inline unsigned long virt_to_bus(volatile void * address) +static inline unsigned long isa_virt_to_bus(volatile void *address) { -#ifndef CONFIG_64BIT - return CPHYSADDR(address); -#else - return XPHYSADDR(address); -#endif + return (unsigned long)address - PAGE_OFFSET; } -static inline void * bus_to_virt(unsigned long address) +static inline void *isa_bus_to_virt(unsigned long address) { -#ifndef CONFIG_64BIT - return (void *)KSEG0ADDR(address); -#else - return (void *)CKSEG0ADDR(address); -#endif + return (void *)(address + PAGE_OFFSET); } +#define isa_page_to_bus page_to_phys + /* - * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped - * for the processor. + * However PCI ones are not necessarily 1:1 and therefore these interfaces + * are forbidden in portable PCI drivers. + * + * Allow them for x86 for legacy drivers, though. */ -extern unsigned long isa_slot_offset; - -extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt -#if 0 -static inline void *ioremap(unsigned long offset, unsigned long size) +static inline void __iomem *__ioremap_mode(phys_addr_t offset, unsigned long size, + unsigned long flags) { - return __ioremap(offset, size, _CACHE_UNCACHED); -} + void __iomem *addr; + phys_addr_t phys_addr; -static inline void *ioremap_nocache(unsigned long offset, unsigned long size) -{ - return __ioremap(offset, size, _CACHE_UNCACHED); -} + addr = plat_ioremap(offset, size, flags); + if (addr) + return addr; -extern void iounmap(void *addr); -#endif + phys_addr = fixup_bigphys_addr(offset, size); + return (void __iomem *)(unsigned long)CKSEG1ADDR(phys_addr); +} /* - * XXX We need system specific versions of these to handle EISA address bits - * 24-31 on SNI. - * XXX more SNI hacks. + * ioremap - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. */ -#define __raw_readb(addr) (*(volatile unsigned char *)(addr)) -#define __raw_readw(addr) (*(volatile unsigned short *)(addr)) -#define __raw_readl(addr) (*(volatile unsigned int *)(addr)) -#define readb(addr) __raw_readb((addr)) -#define readw(addr) __ioswab16(__raw_readw((addr))) -#define readl(addr) __ioswab32(__raw_readl((addr))) - -#define __raw_writeb(b, addr) (*(volatile unsigned char *)(addr)) = (b) -#define __raw_writew(b, addr) (*(volatile unsigned short *)(addr)) = (b) -#define __raw_writel(b, addr) (*(volatile unsigned int *)(addr)) = (b) -#define writeb(b, addr) __raw_writeb((b), (addr)) -#define writew(b, addr) __raw_writew(__ioswab16(b), (addr)) -#define writel(b, addr) __raw_writel(__ioswab32(b), (addr)) - -#define memset_io(a,b,c) memset((void *)(a),(b),(c)) -#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) -#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) - -/* END SNI HACKS ... */ +#define ioremap(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED) /* - * ISA space is 'always mapped' on currently supported MIPS systems, no need - * to explicitly ioremap() it. The fact that the ISA IO space is mapped - * to PAGE_OFFSET is pure coincidence - it does not mean ISA values - * are physical addresses. The following constant pointer can be - * used as the IO-area pointer (it can be iounmapped as well, so the - * analogy with PCI is quite large): + * ioremap_nocache - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap_nocache performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * This version of ioremap ensures that the memory is marked uncachable + * on the CPU as well as honouring existing caching rules from things like + * the PCI bus. Note that there are other caches and buffers on many + * busses. In particular driver authors should read up on PCI writes + * + * It's useful if some control registers are in such an area and + * write combining or read caching is not desirable: */ -#define __ISA_IO_base ((char *)(PAGE_OFFSET)) +#define ioremap_nocache(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED) +#define ioremap_uc ioremap_nocache -#define isa_readb(a) readb(a) -#define isa_readw(a) readw(a) -#define isa_readl(a) readl(a) -#define isa_writeb(b,a) writeb(b,a) -#define isa_writew(w,a) writew(w,a) -#define isa_writel(l,a) writel(l,a) - -#define isa_memset_io(a,b,c) memset_io((a),(b),(c)) -#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),(b),(c)) -#define isa_memcpy_toio(a,b,c) memcpy_toio((a),(b),(c)) +/* + * ioremap_cachable - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap_nocache performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * This version of ioremap ensures that the memory is marked cachable by + * the CPU. Also enables full write-combining. Useful for some + * memory-like regions on I/O busses. + */ +#define ioremap_cachable(offset, size) \ + __ioremap_mode((offset), (size), _page_cachable_default) /* - * We don't have csum_partial_copy_fromio() yet, so we cheat here and - * just copy it. The net code will then do the checksum later. + * These two are MIPS specific ioremap variant. ioremap_cacheable_cow + * requests a cachable mapping, ioremap_uncached_accelerated requests a + * mapping using the uncached accelerated mode which isn't supported on + * all processors. */ -#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) -#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d)) +#define ioremap_cacheable_cow(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) +#define ioremap_uncached_accelerated(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) -static inline int check_signature(unsigned long io_addr, - const unsigned char *signature, int length) +static inline void iounmap(const volatile void __iomem *addr) { - int retval = 0; - do { - if (readb(io_addr) != *signature) - goto out; - io_addr++; - signature++; - length--; - } while (length); - retval = 1; -out: - return retval; + plat_iounmap(addr); } -#define isa_check_signature(io, s, l) check_signature(i,s,l) -/* - * Talk about misusing macros.. - */ +#ifdef CONFIG_CPU_CAVIUM_OCTEON +#define war_octeon_io_reorder_wmb() wmb() +#else +#define war_octeon_io_reorder_wmb() do { } while (0) +#endif -#define __OUT1(s) \ -static inline void __out##s(unsigned int value, unsigned int port) { +#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ + \ +static inline void pfx##write##bwlq(type val, \ + volatile void __iomem *mem) \ +{ \ + volatile type *__mem; \ + type __val; \ + \ + war_octeon_io_reorder_wmb(); \ + \ + __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ + \ + __val = pfx##ioswab##bwlq(__mem, val); \ + \ + if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ + *__mem = __val; \ + else if (cpu_has_64bits) { \ + type __tmp; \ + \ + __asm__ __volatile__( \ + ".set arch=r4000" "\t\t# __writeq""\n\t" \ + "dsll32 %L0, %L0, 0" "\n\t" \ + "dsrl32 %L0, %L0, 0" "\n\t" \ + "dsll32 %M0, %M0, 0" "\n\t" \ + "or %L0, %L0, %M0" "\n\t" \ + "sd %L0, %2" "\n\t" \ + ".set mips0" "\n" \ + : "=r" (__tmp) \ + : "0" (__val), "m" (*__mem)); \ + } else \ + BUG(); \ +} \ + \ +static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ +{ \ + volatile type *__mem; \ + type __val; \ + \ + __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ + \ + if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ + __val = *__mem; \ + else if (cpu_has_64bits) { \ + __asm__ __volatile__( \ + ".set arch=r4000" "\t\t# __readq" "\n\t" \ + "ld %L0, %1" "\n\t" \ + "dsra32 %M0, %L0, 0" "\n\t" \ + "sll %L0, %L0, 0" "\n\t" \ + ".set mips0" "\n" \ + : "=r" (__val) \ + : "m" (*__mem)); \ + } else { \ + __val = 0; \ + BUG(); \ + } \ + \ + return pfx##ioswab##bwlq(__mem, __val); \ +} -#define __OUT2(m) \ -__asm__ __volatile__ ("s" #m "\t%0,%1(%2)" +#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ + \ +static inline void pfx##out##bwlq##p(type val, unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + war_octeon_io_reorder_wmb(); \ + \ + __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ + \ + __val = pfx##ioswab##bwlq(__addr, val); \ + \ + /* Really, we want this to be atomic */ \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + *__addr = __val; \ + slow; \ +} \ + \ +static inline type pfx##in##bwlq##p(unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + __val = *__addr; \ + slow; \ + \ + return pfx##ioswab##bwlq(__addr, __val); \ +} -#define __OUT(m,s,w) \ -__OUT1(s) __OUT2(m) : : "r" (__ioswab##w(value)), "i" (0), "r" (mips_io_port_base+port)); } \ -__OUT1(s##c) __OUT2(m) : : "r" (__ioswab##w(value)), "ir" (port), "r" (mips_io_port_base)); } \ -__OUT1(s##_p) __OUT2(m) : : "r" (__ioswab##w(value)), "i" (0), "r" (mips_io_port_base+port)); \ - SLOW_DOWN_IO; } \ -__OUT1(s##c_p) __OUT2(m) : : "r" (__ioswab##w(value)), "ir" (port), "r" (mips_io_port_base)); \ - SLOW_DOWN_IO; } +#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ + \ +__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) + +#define BUILDIO_MEM(bwlq, type) \ + \ +__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ +__BUILD_MEMORY_PFX(, bwlq, type) \ +__BUILD_MEMORY_PFX(__mem_, bwlq, type) \ + +BUILDIO_MEM(b, u8) +BUILDIO_MEM(w, u16) +BUILDIO_MEM(l, u32) +BUILDIO_MEM(q, u64) + +#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) + +#define BUILDIO_IOPORT(bwlq, type) \ + __BUILD_IOPORT_PFX(, bwlq, type) \ + __BUILD_IOPORT_PFX(__mem_, bwlq, type) + +BUILDIO_IOPORT(b, u8) +BUILDIO_IOPORT(w, u16) +BUILDIO_IOPORT(l, u32) +#ifdef CONFIG_64BIT +BUILDIO_IOPORT(q, u64) +#endif -#define __IN1(t,s) \ -static inline t __in##s(unsigned int port) { t _v; +#define __BUILDIO(bwlq, type) \ + \ +__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) + +__BUILDIO(q, u64) + +#define readb_relaxed readb +#define readw_relaxed readw +#define readl_relaxed readl +#define readq_relaxed readq + +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel +#define writeq_relaxed writeq + +#define readb_be(addr) \ + __raw_readb((__force unsigned *)(addr)) +#define readw_be(addr) \ + be16_to_cpu(__raw_readw((__force unsigned *)(addr))) +#define readl_be(addr) \ + be32_to_cpu(__raw_readl((__force unsigned *)(addr))) +#define readq_be(addr) \ + be64_to_cpu(__raw_readq((__force unsigned *)(addr))) + +#define writeb_be(val, addr) \ + __raw_writeb((val), (__force unsigned *)(addr)) +#define writew_be(val, addr) \ + __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) +#define writel_be(val, addr) \ + __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) +#define writeq_be(val, addr) \ + __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) /* - * Required nops will be inserted by the assembler + * Some code tests for these symbols */ -#define __IN2(m) \ -__asm__ __volatile__ ("l" #m "\t%0,%1(%2)" - -#define __IN(t,m,s,w) \ -__IN1(t,s) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); return __ioswab##w(_v); } \ -__IN1(t,s##c) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); return __ioswab##w(_v); } \ -__IN1(t,s##_p) __IN2(m) : "=r" (_v) : "i" (0), "r" (mips_io_port_base+port)); SLOW_DOWN_IO; return __ioswab##w(_v); } \ -__IN1(t,s##c_p) __IN2(m) : "=r" (_v) : "ir" (port), "r" (mips_io_port_base)); SLOW_DOWN_IO; return __ioswab##w(_v); } - -#define __INS1(s) \ -static inline void __ins##s(unsigned int port, void * addr, unsigned long count) { - -#define __INS2(m) \ -if (count) \ -__asm__ __volatile__ ( \ - ".set\tnoreorder\n\t" \ - ".set\tnoat\n" \ - "1:\tl" #m "\t$1,%4(%5)\n\t" \ - "subu\t%1,1\n\t" \ - "s" #m "\t$1,(%0)\n\t" \ - "bne\t$0,%1,1b\n\t" \ - "addiu\t%0,%6\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" - -#define __INS(m,s,i) \ -__INS1(s) __INS2(m) \ - : "=r" (addr), "=r" (count) \ - : "0" (addr), "1" (count), "i" (0), \ - "r" (mips_io_port_base+port), "I" (i) \ - : "$1");} \ -__INS1(s##c) __INS2(m) \ - : "=r" (addr), "=r" (count) \ - : "0" (addr), "1" (count), "ir" (port), \ - "r" (mips_io_port_base), "I" (i) \ - : "$1");} - -#define __OUTS1(s) \ -static inline void __outs##s(unsigned int port, const void * addr, unsigned long count) { - -#define __OUTS2(m) \ -if (count) \ -__asm__ __volatile__ ( \ - ".set\tnoreorder\n\t" \ - ".set\tnoat\n" \ - "1:\tl" #m "\t$1,(%0)\n\t" \ - "subu\t%1,1\n\t" \ - "s" #m "\t$1,%4(%5)\n\t" \ - "bne\t$0,%1,1b\n\t" \ - "addiu\t%0,%6\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" - -#define __OUTS(m,s,i) \ -__OUTS1(s) __OUTS2(m) \ - : "=r" (addr), "=r" (count) \ - : "0" (addr), "1" (count), "i" (0), "r" (mips_io_port_base+port), "I" (i) \ - : "$1");} \ -__OUTS1(s##c) __OUTS2(m) \ - : "=r" (addr), "=r" (count) \ - : "0" (addr), "1" (count), "ir" (port), "r" (mips_io_port_base), "I" (i) \ - : "$1");} - -__IN(unsigned char,b,b,8) -__IN(unsigned short,h,w,16) -__IN(unsigned int,w,l,32) - -__OUT(b,b,8) -__OUT(h,w,16) -__OUT(w,l,32) - -__INS(b,b,1) -__INS(h,w,2) -__INS(w,l,4) - -__OUTS(b,b,1) -__OUTS(h,w,2) -__OUTS(w,l,4) +#define readq readq +#define writeq writeq + +#define __BUILD_MEMORY_STRING(bwlq, type) \ + \ +static inline void writes##bwlq(volatile void __iomem *mem, \ + const void *addr, unsigned int count) \ +{ \ + const volatile type *__addr = addr; \ + \ + while (count--) { \ + __mem_write##bwlq(*__addr, mem); \ + __addr++; \ + } \ +} \ + \ +static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ + unsigned int count) \ +{ \ + volatile type *__addr = addr; \ + \ + while (count--) { \ + *__addr = __mem_read##bwlq(mem); \ + __addr++; \ + } \ +} +#define __BUILD_IOPORT_STRING(bwlq, type) \ + \ +static inline void outs##bwlq(unsigned long port, const void *addr, \ + unsigned int count) \ +{ \ + const volatile type *__addr = addr; \ + \ + while (count--) { \ + __mem_out##bwlq(*__addr, port); \ + __addr++; \ + } \ +} \ + \ +static inline void ins##bwlq(unsigned long port, void *addr, \ + unsigned int count) \ +{ \ + volatile type *__addr = addr; \ + \ + while (count--) { \ + *__addr = __mem_in##bwlq(port); \ + __addr++; \ + } \ +} -/* - * Note that due to the way __builtin_constant_p() works, you - * - can't use it inside an inline function (it will never be true) - * - you don't have to worry about side effects within the __builtin.. - */ -#define outb(val,port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outbc((val),(port)) : \ - __outb((val),(port))) - -#define inb(port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __inbc(port) : \ - __inb(port)) - -#define outb_p(val,port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outbc_p((val),(port)) : \ - __outb_p((val),(port))) - -#define inb_p(port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __inbc_p(port) : \ - __inb_p(port)) - -#define outw(val,port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outwc((val),(port)) : \ - __outw((val),(port))) - -#define inw(port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __inwc(port) : \ - __inw(port)) - -#define outw_p(val,port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outwc_p((val),(port)) : \ - __outw_p((val),(port))) - -#define inw_p(port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __inwc_p(port) : \ - __inw_p(port)) - -#define outl(val,port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outlc((val),(port)) : \ - __outl((val),(port))) - -#define inl(port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __inlc(port) : \ - __inl(port)) - -#define outl_p(val,port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outlc_p((val),(port)) : \ - __outl_p((val),(port))) - -#define inl_p(port) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __inlc_p(port) : \ - __inl_p(port)) - - -#define outsb(port,addr,count) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outsbc((port),(addr),(count)) : \ - __outsb ((port),(addr),(count))) - -#define insb(port,addr,count) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __insbc((port),(addr),(count)) : \ - __insb((port),(addr),(count))) - -#define outsw(port,addr,count) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outswc((port),(addr),(count)) : \ - __outsw ((port),(addr),(count))) - -#define insw(port,addr,count) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __inswc((port),(addr),(count)) : \ - __insw((port),(addr),(count))) - -#define outsl(port,addr,count) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __outslc((port),(addr),(count)) : \ - __outsl ((port),(addr),(count))) - -#define insl(port,addr,count) \ -((__builtin_constant_p((port)) && (port) < 32768) ? \ - __inslc((port),(addr),(count)) : \ - __insl((port),(addr),(count))) +#define BUILDSTRING(bwlq, type) \ + \ +__BUILD_MEMORY_STRING(bwlq, type) \ +__BUILD_IOPORT_STRING(bwlq, type) -#define IO_SPACE_LIMIT 0xffff +BUILDSTRING(b, u8) +BUILDSTRING(w, u16) +BUILDSTRING(l, u32) +#ifdef CONFIG_64BIT +BUILDSTRING(q, u64) +#endif -/* - * The caches on some architectures aren't dma-coherent and have need to - * handle this in software. There are three types of operations that - * can be applied to dma buffers. - * - * - dma_cache_wback_inv(start, size) makes caches and coherent by - * writing the content of the caches back to memory, if necessary. - * The function also invalidates the affected part of the caches as - * necessary before DMA transfers from outside to memory. - * - dma_cache_wback(start, size) makes caches and coherent by - * writing the content of the caches back to memory, if necessary. - * The function also invalidates the affected part of the caches as - * necessary before DMA transfers from outside to memory. - * - dma_cache_inv(start, size) invalidates the affected parts of the - * caches. Dirty lines of the caches may be written back or simply - * be discarded. This operation is necessary before dma operations - * to the memory. - */ -extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); -extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); -extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); -#define dma_cache_wback_inv(start,size) _dma_cache_wback_inv(start,size) -#define dma_cache_wback(start,size) _dma_cache_wback(start,size) -#define dma_cache_inv(start,size) _dma_cache_inv(start,size) +#ifdef CONFIG_CPU_CAVIUM_OCTEON +#define mmiowb() wmb() +#else +/* Depends on MIPS II instruction set */ +#define mmiowb() asm volatile ("sync" ::: "memory") +#endif -static inline void sync(void) +static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) +{ + memset((void __force *)addr, val, count); +} +static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) +{ + memcpy(dst, (void __force *)src, count); +} +static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) { + memcpy((void __force *)dst, src, count); } /* - * Given a physical address and a length, return a virtual address - * that can be used to access the memory range with the caching - * properties specified by "flags". + * Read a 32-bit register that requires a 64-bit read cycle on the bus. + * Avoid interrupt mucking, just adjust the address for 4-byte access. + * Assume the addresses are 8-byte aligned. */ -#define MAP_NOCACHE (0) +#ifdef __MIPSEB__ +#define __CSR_32_ADJUST 4 +#else +#define __CSR_32_ADJUST 0 +#endif + +#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) +#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) + +/* + * U-Boot specific + */ +#define sync() mmiowb() + +#define MAP_NOCACHE (1) #define MAP_WRCOMBINE (0) #define MAP_WRBACK (0) #define MAP_WRTHROUGH (0) @@ -493,6 +528,9 @@ static inline void sync(void) static inline void * map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) { + if (flags == MAP_NOCACHE) + return ioremap(paddr, len); + return (void *)paddr; } @@ -501,7 +539,6 @@ map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) */ static inline void unmap_physmem(void *vaddr, unsigned long flags) { - } #endif /* _ASM_IO_H */ diff --git a/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h new file mode 100644 index 00000000000..613f844f08c --- /dev/null +++ b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h @@ -0,0 +1,11 @@ +/* + * Copyright (C) 2003 Ralf Baechle + * + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H + +/* Intentionally empty file ... */ + +#endif /* __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-generic/ioremap.h b/arch/mips/include/asm/mach-generic/ioremap.h new file mode 100644 index 00000000000..6b191d54a69 --- /dev/null +++ b/arch/mips/include/asm/mach-generic/ioremap.h @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef __ASM_MACH_GENERIC_IOREMAP_H +#define __ASM_MACH_GENERIC_IOREMAP_H + +#include <linux/types.h> + +/* + * Allow physical addresses to be fixed up to help peripherals located + * outside the low 32-bit range -- generic pass-through version. + */ +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, + phys_addr_t size) +{ + return phys_addr; +} + +static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size, + unsigned long flags) +{ + return NULL; +} + +static inline int plat_iounmap(const volatile void __iomem *addr) +{ + return 0; +} + +#define _page_cachable_default _CACHE_CACHABLE_NONCOHERENT + +#endif /* __ASM_MACH_GENERIC_IOREMAP_H */ diff --git a/arch/mips/include/asm/mach-generic/mangle-port.h b/arch/mips/include/asm/mach-generic/mangle-port.h new file mode 100644 index 00000000000..f18e53f7b83 --- /dev/null +++ b/arch/mips/include/asm/mach-generic/mangle-port.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2003, 2004 Ralf Baechle + * + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H +#define __ASM_MACH_GENERIC_MANGLE_PORT_H + +#define __swizzle_addr_b(port) (port) +#define __swizzle_addr_w(port) (port) +#define __swizzle_addr_l(port) (port) +#define __swizzle_addr_q(port) (port) + +/* + * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; + * less sane hardware forces software to fiddle with this... + * + * Regardless, if the host bus endianness mismatches that of PCI/ISA, then + * you can't have the numerical value of data and byte addresses within + * multibyte quantities both preserved at the same time. Hence two + * variations of functions: non-prefixed ones that preserve the value + * and prefixed ones that preserve byte addresses. The latters are + * typically used for moving raw data between a peripheral and memory (cf. + * string I/O functions), hence the "__mem_" prefix. + */ +#if defined(CONFIG_SWAP_IO_SPACE) + +# define ioswabb(a, x) (x) +# define __mem_ioswabb(a, x) (x) +# define ioswabw(a, x) le16_to_cpu(x) +# define __mem_ioswabw(a, x) (x) +# define ioswabl(a, x) le32_to_cpu(x) +# define __mem_ioswabl(a, x) (x) +# define ioswabq(a, x) le64_to_cpu(x) +# define __mem_ioswabq(a, x) (x) + +#else + +# define ioswabb(a, x) (x) +# define __mem_ioswabb(a, x) (x) +# define ioswabw(a, x) (x) +# define __mem_ioswabw(a, x) cpu_to_le16(x) +# define ioswabl(a, x) (x) +# define __mem_ioswabl(a, x) cpu_to_le32(x) +# define ioswabq(a, x) (x) +# define __mem_ioswabq(a, x) cpu_to_le32(x) + +#endif + +#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */ diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h new file mode 100644 index 00000000000..ab06674b144 --- /dev/null +++ b/arch/mips/include/asm/mach-generic/spaces.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + * + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef _ASM_MACH_GENERIC_SPACES_H +#define _ASM_MACH_GENERIC_SPACES_H + +#include <asm/const.h> + +/* + * This gives the physical RAM offset. + */ +#ifndef PHYS_OFFSET +#define PHYS_OFFSET _AC(0, UL) +#endif + +#ifdef CONFIG_32BIT +#ifdef CONFIG_KVM_GUEST +#define CAC_BASE _AC(0x40000000, UL) +#else +#define CAC_BASE _AC(0x80000000, UL) +#endif +#ifndef IO_BASE +#define IO_BASE _AC(0xa0000000, UL) +#endif +#ifndef UNCAC_BASE +#define UNCAC_BASE _AC(0xa0000000, UL) +#endif + +#ifndef MAP_BASE +#ifdef CONFIG_KVM_GUEST +#define MAP_BASE _AC(0x60000000, UL) +#else +#define MAP_BASE _AC(0xc0000000, UL) +#endif +#endif + +/* + * Memory above this physical address will be considered highmem. + */ +#ifndef HIGHMEM_START +#define HIGHMEM_START _AC(0x20000000, UL) +#endif + +#endif /* CONFIG_32BIT */ + +#ifdef CONFIG_64BIT + +#ifndef CAC_BASE +#ifdef CONFIG_DMA_NONCOHERENT +#define CAC_BASE _AC(0x9800000000000000, UL) +#else +#define CAC_BASE _AC(0xa800000000000000, UL) +#endif +#endif + +#ifndef IO_BASE +#define IO_BASE _AC(0x9000000000000000, UL) +#endif + +#ifndef UNCAC_BASE +#define UNCAC_BASE _AC(0x9000000000000000, UL) +#endif + +#ifndef MAP_BASE +#define MAP_BASE _AC(0xc000000000000000, UL) +#endif + +/* + * Memory above this physical address will be considered highmem. + * Fixme: 59 bits is a fictive number and makes assumptions about processors + * in the distant future. Nobody will care for a few years :-) + */ +#ifndef HIGHMEM_START +#define HIGHMEM_START (_AC(1, UL) << _AC(59, UL)) +#endif + +#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK)) +#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK)) +#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK)) + +#endif /* CONFIG_64BIT */ + +/* + * This handles the memory map. + */ +#ifndef PAGE_OFFSET +#define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET) +#endif + +#ifndef FIXADDR_TOP +#ifdef CONFIG_KVM_GUEST +#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000) +#else +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) +#endif +#endif + +#endif /* __ASM_MACH_GENERIC_SPACES_H */ diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h new file mode 100644 index 00000000000..9e5fa98d1c8 --- /dev/null +++ b/arch/mips/include/asm/pgtable-bits.h @@ -0,0 +1,283 @@ +/* + * Copyright (C) 1994 - 2002 by Ralf Baechle + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. + * Copyright (C) 2002 Maciej W. Rozycki + * + * SPDX-License-Identifier: GPL-2.0 + */ +#ifndef _ASM_PGTABLE_BITS_H +#define _ASM_PGTABLE_BITS_H + + +/* + * Note that we shift the lower 32bits of each EntryLo[01] entry + * 6 bits to the left. That way we can convert the PFN into the + * physical address by a single 'and' operation and gain 6 additional + * bits for storing information which isn't present in a normal + * MIPS page table. + * + * Similar to the Alpha port, we need to keep track of the ref + * and mod bits in software. We have a software "yeah you can read + * from this page" bit, and a hardware one which actually lets the + * process read from the page. On the same token we have a software + * writable bit and the real hardware one which actually lets the + * process write to the page, this keeps a mod bit via the hardware + * dirty bit. + * + * Certain revisions of the R4000 and R5000 have a bug where if a + * certain sequence occurs in the last 3 instructions of an executable + * page, and the following page is not mapped, the cpu can do + * unpredictable things. The code (when it is written) to deal with + * this problem will be in the update_mmu_cache() code for the r4k. + */ +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) + +/* + * The following bits are implemented by the TLB hardware + */ +#define _PAGE_NO_EXEC_SHIFT 0 +#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) +#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) +#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT) +#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) +#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) +#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) +#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) +#define _CACHE_MASK (7 << _CACHE_SHIFT) + +/* + * The following bits are implemented in software + */ +#define _PAGE_PRESENT_SHIFT (24) +#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) +#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) +#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) + +#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) + +/* + * Bits for extended EntryLo0/EntryLo1 registers + */ +#define _PFNX_MASK 0xffffff + +#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + +/* + * The following bits are implemented in software + */ +#define _PAGE_PRESENT_SHIFT (0) +#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) +#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) +#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) + +/* + * The following bits are implemented by the TLB hardware + */ +#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4) +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) +#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) +#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) +#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1) +#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) +#define _CACHE_MASK _CACHE_UNCACHED + +#define _PFN_SHIFT PAGE_SHIFT + +#else +/* + * Below are the "Normal" R4K cases + */ + +/* + * The following bits are implemented in software + */ +#define _PAGE_PRESENT_SHIFT 0 +#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) +/* R2 or later cores check for RI/XI support to determine _PAGE_READ */ +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) +#define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#else +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#endif +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) +#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) +#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) + +#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) +/* Huge TLB page */ +#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) +#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) +#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) +#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) +#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */ + +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) +/* XI - page cannot be executed */ +#ifdef _PAGE_SPLITTING_SHIFT +#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1) +#else +#define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1) +#endif +#define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) + +/* RI - page cannot be read */ +#define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) +#define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT)) +#define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT +#define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0) +#endif /* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */ + +#if defined(_PAGE_NO_READ_SHIFT) +#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) +#elif defined(_PAGE_SPLITTING_SHIFT) +#define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1) +#else +#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1) +#endif +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) + +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) +#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) +#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) +#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) +#define _CACHE_MASK (7 << _CACHE_SHIFT) + +#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) + +#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ + +#ifndef _PAGE_NO_EXEC +#define _PAGE_NO_EXEC 0 +#endif +#ifndef _PAGE_NO_READ +#define _PAGE_NO_READ 0 +#endif + +#define _PAGE_SILENT_READ _PAGE_VALID +#define _PAGE_SILENT_WRITE _PAGE_DIRTY + +#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) + +/* + * The final layouts of the PTE bits are: + * + * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P + * 32-bit, R1 or earler: CCC D V G M A W R P + * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P + * 32-bit, R2 or later: CCC D V G RI/R XI M A W P + */ + + +#ifndef __ASSEMBLY__ +/* + * pte_to_entrylo converts a page table entry (PTE) into a Mips + * entrylo0/1 value. + */ +static inline uint64_t pte_to_entrylo(unsigned long pte_val) +{ +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) + if (cpu_has_rixi) { + int sa; +#ifdef CONFIG_32BIT + sa = 31 - _PAGE_NO_READ_SHIFT; +#else + sa = 63 - _PAGE_NO_READ_SHIFT; +#endif + /* + * C has no way to express that this is a DSRL + * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily + * in the fast path this is done in assembly + */ + return (pte_val >> _PAGE_GLOBAL_SHIFT) | + ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); + } +#endif + + return pte_val >> _PAGE_GLOBAL_SHIFT; +} +#endif + +/* + * Cache attributes + */ +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + +#define _CACHE_CACHABLE_NONCOHERENT 0 +#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED + +#elif defined(CONFIG_CPU_SB1) + +/* No penalty for being coherent on the SB1, so just + use it for "noncoherent" spaces, too. Shouldn't hurt. */ + +#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) + +#elif defined(CONFIG_CPU_LOONGSON3) + +/* Using COHERENT flag for NONCOHERENT doesn't hurt. */ + +#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */ +#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */ + +#elif defined(CONFIG_MACH_INGENIC) + +/* Ingenic uses the WA bit to achieve write-combine memory writes */ +#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT) + +#endif + +#ifndef _CACHE_CACHABLE_NO_WA +#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_WA +#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_UNCACHED +#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_NONCOHERENT +#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_CE +#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_COW +#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_CUW +#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_UNCACHED_ACCELERATED +#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) +#endif + +#define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) + +#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ + _PFN_MASK | _CACHE_MASK) + +#endif /* _ASM_PGTABLE_BITS_H */ |